diff --git a/app.py b/app.py index cef413b..22b2f19 100644 --- a/app.py +++ b/app.py @@ -65,6 +65,10 @@ def gpu_wrapped_execute_image_retargeting(*args, **kwargs): def gpu_wrapped_execute_video_retargeting(*args, **kwargs): return gradio_pipeline.execute_video_retargeting(*args, **kwargs) +def reset_sliders(*args, **kwargs): + return 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.5, True, True + + # assets title_md = "assets/gradio/gradio_title.md" example_portrait_dir = "assets/examples/source" @@ -101,14 +105,14 @@ mov_x = gr.Slider(minimum=-0.19, maximum=0.19, value=0.0, step=0.01, label="x-ax mov_y = gr.Slider(minimum=-0.19, maximum=0.19, value=0.0, step=0.01, label="y-axis movement") mov_z = gr.Slider(minimum=0.9, maximum=1.2, value=1.0, step=0.01, label="z-axis movement") lip_variation_zero = gr.Slider(minimum=-0.09, maximum=0.09, value=0, step=0.01, label="pouting") -lip_variation_one = gr.Slider(minimum=-20.0, maximum=15.0, value=0, step=0.01, label="lip compressed<->pursing") -lip_variation_two = gr.Slider(minimum=0.0, maximum=15.0, value=0, step=0.01, label="grin") -lip_variation_three = gr.Slider(minimum=-90.0, maximum=120.0, value=0, step=1.0, label="lip close <-> lip open") -smile = gr.Slider(minimum=-0.3, maximum=1.3, value=0, step=0.01, label="smile") -wink = gr.Slider(minimum=0, maximum=39, value=0, step=0.01, label="wink") -eyebrow = gr.Slider(minimum=-30, maximum=30, value=0, step=0.01, label="eyebrow") -eyeball_direction_x = gr.Slider(minimum=-30.0, maximum=30.0, value=0, step=0.01, label="eye gaze (horizontal)") -eyeball_direction_y = gr.Slider(minimum=-63.0, maximum=63.0, value=0, step=0.01, label="eye gaze (vertical)") +lip_variation_one = gr.Slider(minimum=-20.0, maximum=15.0, value=0, step=0.01, label="pursing ๐Ÿ˜") +lip_variation_two = gr.Slider(minimum=0.0, maximum=15.0, value=0, step=0.01, label="grin ๐Ÿ˜") +lip_variation_three = gr.Slider(minimum=-90.0, maximum=120.0, value=0, step=1.0, label="lip close <-> open") +smile = gr.Slider(minimum=-0.3, maximum=1.3, value=0, step=0.01, label="smile ๐Ÿ˜„") +wink = gr.Slider(minimum=0, maximum=39, value=0, step=0.01, label="wink ๐Ÿ˜‰") +eyebrow = gr.Slider(minimum=-30, maximum=30, value=0, step=0.01, label="eyebrow ๐Ÿคจ") +eyeball_direction_x = gr.Slider(minimum=-30.0, maximum=30.0, value=0, step=0.01, label="eye gaze (horizontal) ๐Ÿ‘€") +eyeball_direction_y = gr.Slider(minimum=-63.0, maximum=63.0, value=0, step=0.01, label="eye gaze (vertical) ๐Ÿ™„") retargeting_input_image = gr.Image(type="filepath") retargeting_input_video = gr.Video() output_image = gr.Image(type="numpy") @@ -293,6 +297,17 @@ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Plus Jakarta San eyebrow.render() eyeball_direction_x.render() eyeball_direction_y.render() + with gr.Row(visible=True): + reset_button = gr.Button("๐Ÿ”„ Reset") + reset_button.click( + fn=reset_sliders, + inputs=None, + outputs=[ + head_pitch_slider, head_yaw_slider, head_roll_slider, mov_x, mov_y, mov_z, + lip_variation_zero, lip_variation_one, lip_variation_two, lip_variation_three, smile, wink, eyebrow, eyeball_direction_x, eyeball_direction_y, + retargeting_source_scale, flag_stitching_retargeting_input, flag_do_crop_input_retargeting_image + ] + ) with gr.Row(visible=True): with gr.Column(): with gr.Accordion(open=True, label="Retargeting Image Input"): @@ -306,7 +321,8 @@ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Plus Jakarta San [osp.join(example_portrait_dir, "s7.jpg")], [osp.join(example_portrait_dir, "s12.jpg")], [osp.join(example_portrait_dir, "s22.jpg")], - [osp.join(example_portrait_dir, "s23.jpg")], + # [osp.join(example_portrait_dir, "s23.jpg")], + [osp.join(example_portrait_dir, "s42.jpg")], ], inputs=[retargeting_input_image], cache_examples=False, @@ -322,7 +338,7 @@ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Plus Jakarta San [ retargeting_input_image, retargeting_output_image, - retargeting_output_image_paste_back + retargeting_output_image_paste_back, ], value="๐Ÿงน Clear" ) diff --git a/assets/docs/changelog/2024-08-06.md b/assets/docs/changelog/2024-08-06.md new file mode 100644 index 0000000..4d49e0a --- /dev/null +++ b/assets/docs/changelog/2024-08-06.md @@ -0,0 +1,7 @@ +## Portrait Editing + +

+ LivePortrait +
+ Portrait Editing in the Gradio Interface +

diff --git a/assets/docs/editing-portrait-2024-08-06.jpg b/assets/docs/editing-portrait-2024-08-06.jpg new file mode 100644 index 0000000..0b09f18 Binary files /dev/null and b/assets/docs/editing-portrait-2024-08-06.jpg differ diff --git a/assets/examples/source/s42.jpg b/assets/examples/source/s42.jpg new file mode 100644 index 0000000..15b9deb Binary files /dev/null and b/assets/examples/source/s42.jpg differ diff --git a/assets/gradio/gradio_description_retargeting.md b/assets/gradio/gradio_description_retargeting.md index 14fc0c4..e483ec6 100644 --- a/assets/gradio/gradio_description_retargeting.md +++ b/assets/gradio/gradio_description_retargeting.md @@ -6,9 +6,8 @@
-

Retargeting Image

-

Upload a Source Portrait as Retargeting Input, wait for the target eyes-open ratio and target lip-open ratio to be calculated, and then drag the sliders. You can try running it multiple times. -
+

Retargeting and Editing Portraits

+

Upload a source portrait, and the eyes-open ratio and lip-open ratio will be auto-calculated. Adjust the sliders to see instant edits. Feel free to experiment! ๐ŸŽจ

๐Ÿ˜Š Set both target eyes-open and lip-open ratios to 0.8 to see what's going on!

diff --git a/readme.md b/readme.md index 8848e5b..983b973 100644 --- a/readme.md +++ b/readme.md @@ -38,7 +38,8 @@ ## ๐Ÿ”ฅ Updates -- **`2024/08/05`**: ๐Ÿ“ฆ Windows users download the [one-click installer](https://huggingface.co/cleardusk/LivePortrait-Windows/blob/main/LivePortrait-Windows-v20240805.zip) for Humans mode and **Animals mode** now! For details, see [**here**](./assets/docs/changelog/2024-08-05.md). +- **`2024/08/06`**: ๐ŸŽจ We support precise static portrait editing in the Gradio interface, insipred by [ComfyUI-AdvancedLivePortrait](https://github.com/PowerHouseMan/ComfyUI-AdvancedLivePortrait). See [here](./assets/docs/changelog/2024-08-06.md). +- **`2024/08/05`**: ๐Ÿ“ฆ Windows users can now download the [one-click installer](https://huggingface.co/cleardusk/LivePortrait-Windows/blob/main/LivePortrait-Windows-v20240805.zip) for Humans mode and **Animals mode** now! For details, see [**here**](./assets/docs/changelog/2024-08-05.md). - **`2024/08/02`**: ๐Ÿ˜ธ We released a version of the **Animals model**, along with several other updates and improvements. Check out the details [**here**](./assets/docs/changelog/2024-08-02.md)! - **`2024/07/25`**: ๐Ÿ“ฆ Windows users can now download the package from [HuggingFace](https://huggingface.co/cleardusk/LivePortrait-Windows/tree/main) or [BaiduYun](https://pan.baidu.com/s/1FWsWqKe0eNfXrwjEhhCqlw?pwd=86q2). Simply unzip and double-click `run_windows.bat` to enjoy! - **`2024/07/24`**: ๐ŸŽจ We support pose editing for source portraits in the Gradio interface. Weโ€™ve also lowered the default detection threshold to increase recall. [Have fun](assets/docs/changelog/2024-07-24.md)!