diff --git a/app_animals.py b/app_animals.py
index 361044a..ea7326d 100644
--- a/app_animals.py
+++ b/app_animals.py
@@ -96,6 +96,7 @@ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Plus Jakarta San
[osp.join(example_portrait_dir, "s30.jpg")],
[osp.join(example_portrait_dir, "s31.jpg")],
[osp.join(example_portrait_dir, "s32.jpg")],
+ [osp.join(example_portrait_dir, "s33.jpg")],
[osp.join(example_portrait_dir, "s39.jpg")],
[osp.join(example_portrait_dir, "s40.jpg")],
[osp.join(example_portrait_dir, "s41.jpg")],
diff --git a/assets/docs/changelog/2025-01-01.md b/assets/docs/changelog/2025-01-01.md
new file mode 100644
index 0000000..ad5263d
--- /dev/null
+++ b/assets/docs/changelog/2025-01-01.md
@@ -0,0 +1,29 @@
+## 2025/01/01
+
+**Weโre thrilled that cats ๐ฑ are now speaking and singing across the internet!** ๐ถ
+
+In this update, weโve improved the [Animals model](https://huggingface.co/KwaiVGI/LivePortrait/tree/main/liveportrait_animals/base_models_v1.1) with more data. While you might notice only a slight improvement for cats (if at all ๐ผ), dogs have gotten a slightly better upgrade. For example, the model is now better at recognizing their mouths instead of mistaking them for noses. ๐ถ
+
+
+
+ Before vs. After (v1.1) |
+
+
+
+
+
+ |
+
+
+
+
+The new version (v1.1) Animals Model has been updated on [HuggingFace](https://huggingface.co/KwaiVGI/LivePortrait/tree/main/liveportrait_animals/base_models_v1.1). The new version is enabled by default.
+
+> [!IMPORTANT]
+> Note: Make sure to update your weights to use the new version.
+
+If you prefer to use the original version, simply modify the configuration in [inference_config.py](../../../src/config/inference_config.py#L29)
+```python
+version_animals = "" # old version
+# version_animals = "_v1.1" # new (v1.1) version
+```
diff --git a/assets/examples/source/s33.jpg b/assets/examples/source/s33.jpg
new file mode 100644
index 0000000..c76b18e
Binary files /dev/null and b/assets/examples/source/s33.jpg differ
diff --git a/inference.py b/inference.py
index 5c80818..97c1436 100644
--- a/inference.py
+++ b/inference.py
@@ -1,6 +1,7 @@
# coding: utf-8
+
"""
-for human
+The entrance of humans
"""
import os
diff --git a/inference_animals.py b/inference_animals.py
index 8fddf7b..20f8452 100644
--- a/inference_animals.py
+++ b/inference_animals.py
@@ -1,6 +1,7 @@
# coding: utf-8
+
"""
-for animal
+The entrance of animal
"""
import os
diff --git a/readme.md b/readme.md
index 0347dba..ba1f690 100644
--- a/readme.md
+++ b/readme.md
@@ -41,6 +41,7 @@
## ๐ฅ Updates
+- **`2025/01/01`**: ๐ถ We updated a new version of the Animals model with more data, see [**here**](./assets/docs/changelog/2025-01-01.md).
- **`2024/10/18`**: โ We have updated the versions of the `transformers` and `gradio` libraries to avoid security vulnerabilities. Details [here](https://github.com/KwaiVGI/LivePortrait/pull/421/files).
- **`2024/08/29`**: ๐ฆ We update the Windows [one-click installer](https://huggingface.co/cleardusk/LivePortrait-Windows/blob/main/LivePortrait-Windows-v20240829.zip) and support auto-updates, see [changelog](https://huggingface.co/cleardusk/LivePortrait-Windows#20240829).
- **`2024/08/19`**: ๐ผ๏ธ We support **image driven mode** and **regional control**. For details, see [**here**](./assets/docs/changelog/2024-08-19.md).
@@ -247,7 +248,7 @@ Discover the invaluable resources contributed by our community to enhance your L
| Repo (sorted by created timestamp) | Description | Author |
|------|------|--------|
-| [**AdvancedLivePortrait-WebUI**](https://github.com/jhj0517/AdvancedLivePortrait-WebUI) | Dedicated gradio based WebUI started from [ComfyUI-AdvancedLivePortrait](https://github.com/PowerHouseMan/ComfyUI-AdvancedLivePortrait) | [@jhj0517](https://github.com/jhj0517) |
+| [**AdvancedLivePortrait-WebUI**](https://github.com/jhj0517/AdvancedLivePortrait-WebUI) | Dedicated gradio based WebUI started from [ComfyUI-AdvancedLivePortrait](https://github.com/PowerHouseMan/ComfyUI-AdvancedLivePortrait) | [@jhj0517](https://github.com/jhj0517) |
| [**FacePoke**](https://github.com/jbilcke-hf/FacePoke) | A real-time head transformation app, controlled by your mouse! | [@jbilcke-hf](https://github.com/jbilcke-hf) |
| [**FaceFusion**](https://github.com/facefusion/facefusion) | FaceFusion 3.0 integregates LivePortrait as `expression_restorer` and `face_editor` processors. | [@henryruhs](https://github.com/henryruhs) |
| [**sd-webui-live-portrait**](https://github.com/dimitribarbot/sd-webui-live-portrait) | WebUI extension of LivePortrait, adding atab to the original Stable Diffusion WebUI to benefit from LivePortrait features. | [@dimitribarbot](https://github.com/dimitribarbot) |
diff --git a/src/config/inference_config.py b/src/config/inference_config.py
index c9ed197..2d9f365 100644
--- a/src/config/inference_config.py
+++ b/src/config/inference_config.py
@@ -26,10 +26,12 @@ class InferenceConfig(PrintableConfig):
checkpoint_S: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth') # path to checkpoint to S and R_eyes, R_lip
# ANIMAL MODEL CONFIG, NOT EXPORTED PARAMS
- checkpoint_F_animal: str = make_abs_path('../../pretrained_weights/liveportrait_animals/base_models/appearance_feature_extractor.pth') # path to checkpoint of F
- checkpoint_M_animal: str = make_abs_path('../../pretrained_weights/liveportrait_animals/base_models/motion_extractor.pth') # path to checkpoint pf M
- checkpoint_G_animal: str = make_abs_path('../../pretrained_weights/liveportrait_animals/base_models/spade_generator.pth') # path to checkpoint of G
- checkpoint_W_animal: str = make_abs_path('../../pretrained_weights/liveportrait_animals/base_models/warping_module.pth') # path to checkpoint of W
+ # version_animals = "" # old version
+ version_animals = "_v1.1" # new (v1.1) version
+ checkpoint_F_animal: str = make_abs_path(f'../../pretrained_weights/liveportrait_animals/base_models{version_animals}/appearance_feature_extractor.pth') # path to checkpoint of F
+ checkpoint_M_animal: str = make_abs_path(f'../../pretrained_weights/liveportrait_animals/base_models{version_animals}/motion_extractor.pth') # path to checkpoint pf M
+ checkpoint_G_animal: str = make_abs_path(f'../../pretrained_weights/liveportrait_animals/base_models{version_animals}/spade_generator.pth') # path to checkpoint of G
+ checkpoint_W_animal: str = make_abs_path(f'../../pretrained_weights/liveportrait_animals/base_models{version_animals}/warping_module.pth') # path to checkpoint of W
checkpoint_S_animal: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth') # path to checkpoint to S and R_eyes, R_lip, NOTE: use human temporarily!
# EXPORTED PARAMS
diff --git a/src/utils/animal_landmark_runner.py b/src/utils/animal_landmark_runner.py
index c66efe4..dd91aa5 100644
--- a/src/utils/animal_landmark_runner.py
+++ b/src/utils/animal_landmark_runner.py
@@ -60,7 +60,7 @@ class XPoseRunner(object):
def get_unipose_output(self, image, instance_text_prompt, keypoint_text_prompt, box_threshold, IoU_threshold):
instance_list = instance_text_prompt.split(',')
-
+
if len(keypoint_text_prompt) == 9:
# torch.Size([1, 512]) torch.Size([9, 512])
ins_text_embeddings, kpt_text_embeddings = self.ins_text_embeddings_9, self.kpt_text_embeddings_9
diff --git a/src/utils/cropper.py b/src/utils/cropper.py
index 97e26c7..64fd75e 100644
--- a/src/utils/cropper.py
+++ b/src/utils/cropper.py
@@ -207,12 +207,10 @@ class Cropper(object):
vy_ratio=crop_cfg.vy_ratio,
flag_do_rot=crop_cfg.flag_do_rot,
)
- lmk = self.human_landmark_runner.run(frame_rgb, lmk)
- ret_dct["lmk_crop"] = lmk
# update a 256x256 version for network input
ret_dct["img_crop_256x256"] = cv2.resize(ret_dct["img_crop"], (256, 256), interpolation=cv2.INTER_AREA)
- ret_dct["lmk_crop_256x256"] = ret_dct["lmk_crop"] * 256 / crop_cfg.dsize
+ ret_dct["lmk_crop_256x256"] = ret_dct["pt_crop"] * 256 / crop_cfg.dsize
trajectory.frame_rgb_crop_lst.append(ret_dct["img_crop_256x256"])
trajectory.lmk_crop_lst.append(ret_dct["lmk_crop_256x256"])