mirror of
https://github.com/KwaiVGI/LivePortrait.git
synced 2024-12-23 13:04:23 +00:00
Generate output video directly to disk
This commit is contained in:
parent
4c643a86b6
commit
41b4430b5e
@ -220,7 +220,7 @@ class LivePortraitPipeline(object):
|
|||||||
# driving frame | source image | generation, or source image | generation
|
# driving frame | source image | generation, or source image | generation
|
||||||
frames_concatenated = concat_frames(driving_rgb_crop_256x256_lst, img_crop_256x256, I_p_lst)
|
frames_concatenated = concat_frames(driving_rgb_crop_256x256_lst, img_crop_256x256, I_p_lst)
|
||||||
wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
|
wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
|
||||||
images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps)
|
images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps, frames_count=len(driving_rgb_crop_256x256_lst))
|
||||||
|
|
||||||
if flag_has_audio:
|
if flag_has_audio:
|
||||||
# final result with concact
|
# final result with concact
|
||||||
|
@ -37,12 +37,18 @@ def images2video(images, wfp, **kwargs):
|
|||||||
codec=codec, quality=quality, ffmpeg_params=ffmpeg_params, pixelformat=pixelformat, macro_block_size=macro_block_size
|
codec=codec, quality=quality, ffmpeg_params=ffmpeg_params, pixelformat=pixelformat, macro_block_size=macro_block_size
|
||||||
)
|
)
|
||||||
|
|
||||||
n = len(images)
|
n = len(images) if hasattr(images, '__len__') else kwargs.get('frames_count')
|
||||||
|
img_it = iter(images)
|
||||||
for i in track(range(n), description='Writing', transient=True):
|
for i in track(range(n), description='Writing', transient=True):
|
||||||
|
try:
|
||||||
|
img = next(img_it)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
|
||||||
if image_mode.lower() == 'bgr':
|
if image_mode.lower() == 'bgr':
|
||||||
writer.append_data(images[i][..., ::-1])
|
writer.append_data(img[..., ::-1])
|
||||||
else:
|
else:
|
||||||
writer.append_data(images[i])
|
writer.append_data(img)
|
||||||
|
|
||||||
writer.close()
|
writer.close()
|
||||||
|
|
||||||
@ -82,10 +88,9 @@ def blend(img: np.ndarray, mask: np.ndarray, background_color=(255, 255, 255)):
|
|||||||
|
|
||||||
def concat_frames(driving_image_lst, source_image, I_p_lst):
|
def concat_frames(driving_image_lst, source_image, I_p_lst):
|
||||||
# TODO: add more concat style, e.g., left-down corner driving
|
# TODO: add more concat style, e.g., left-down corner driving
|
||||||
out_lst = []
|
|
||||||
h, w, _ = I_p_lst[0].shape
|
h, w, _ = I_p_lst[0].shape
|
||||||
|
|
||||||
for idx, _ in track(enumerate(I_p_lst), total=len(I_p_lst), description='Concatenating result...'):
|
for idx, _ in enumerate(I_p_lst):
|
||||||
I_p = I_p_lst[idx]
|
I_p = I_p_lst[idx]
|
||||||
source_image_resized = cv2.resize(source_image, (w, h))
|
source_image_resized = cv2.resize(source_image, (w, h))
|
||||||
|
|
||||||
@ -96,8 +101,7 @@ def concat_frames(driving_image_lst, source_image, I_p_lst):
|
|||||||
driving_image_resized = cv2.resize(driving_image, (w, h))
|
driving_image_resized = cv2.resize(driving_image, (w, h))
|
||||||
out = np.hstack((driving_image_resized, source_image_resized, I_p))
|
out = np.hstack((driving_image_resized, source_image_resized, I_p))
|
||||||
|
|
||||||
out_lst.append(out)
|
yield out
|
||||||
return out_lst
|
|
||||||
|
|
||||||
|
|
||||||
class VideoWriter:
|
class VideoWriter:
|
||||||
|
Loading…
Reference in New Issue
Block a user