This commit is contained in:
Ishimwe Prince 2025-03-04 10:39:56 +08:00 committed by GitHub
commit 7a94fdfd00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 92 additions and 0 deletions

17
.dockerignore Normal file
View File

@ -0,0 +1,17 @@
# The .dockerignore file excludes files from the container build process.
#
# https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Exclude Git files
.git
.github
.gitignore
# Exclude Python cache files
__pycache__
.mypy_cache
.pytest_cache
.ruff_cache
# Exclude Python virtual environment
/venv

33
cog.yaml Normal file
View File

@ -0,0 +1,33 @@
# Configuration for Cog ⚙️
# Reference: https://cog.run/yaml
build:
gpu: true
python_version: "3.10"
system_packages:
- "ffmpeg"
- "libsm6"
- "libxext6"
- "libgl1"
python_packages:
- "torch==2.3.0"
- "torchvision==0.18.0"
- "torchaudio==2.3.0"
- "numpy==1.26.4"
- "pyyaml==6.0.1"
- "opencv-python==4.10.0.84"
- "scipy==1.13.1"
- "imageio==2.34.2"
- "lmdb==1.4.1"
- "tqdm==4.66.4"
- "rich==13.7.1"
- "ffmpeg==1.4"
- "onnxruntime-gpu==1.18.0"
- "onnx==1.16.1"
- "scikit-image==0.24.0"
- "albumentations==1.4.10"
- "matplotlib==3.9.0"
- "imageio-ffmpeg==0.5.1"
- "tyro==0.8.5"
- "gradio==3.48.0"
predict: "predict.py:Predictor"

42
predict.py Normal file
View File

@ -0,0 +1,42 @@
from cog import BasePredictor, Input, Path, File
from src.config.argument_config import ArgumentConfig
from src.config.inference_config import InferenceConfig
from src.config.crop_config import CropConfig
from src.live_portrait_pipeline import LivePortraitPipeline
import requests
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
self.live_portrait_pipeline = LivePortraitPipeline(
inference_cfg=InferenceConfig(),
crop_cfg=CropConfig()
)
def predict(
self,
input_image_path: Path = Input(description="Portrait image"),
input_video_path: Path = Input(description="Driving video"),
flag_relative_input: bool = Input(description="relative motion", default=True),
flag_do_crop_input: bool = Input(description="We recommend checking the do crop option when facial areas occupy a relatively small portion of your image.", default=True),
flag_pasteback: bool = Input(description="paste-back", default=True),
) -> Path:
"""Run a single prediction on the model"""
user_args = ArgumentConfig(
flag_relative=flag_relative_input,
flag_do_crop=flag_do_crop_input,
flag_pasteback=flag_pasteback,
source_image=input_image_path,
driving_info=str(input_video_path),
output_dir="/tmp/"
)
self.live_portrait_pipeline.cropper.update_config(user_args.__dict__)
self.live_portrait_pipeline.live_portrait_wrapper.update_config(user_args.__dict__)
video_path, _ = self.live_portrait_pipeline.execute(
user_args
)
return Path(video_path)