diff --git a/src/bilogger.py b/src/bilogger.py index a97ceb7..bbc73d1 100755 --- a/src/bilogger.py +++ b/src/bilogger.py @@ -12,14 +12,14 @@ import io class BiLogger(io.TextIOWrapper): - """ A bidirectional logger that both prints the output + """A bidirectional logger that both prints the output and log all output to file. Original code from: https://stackoverflow.com/a/14906787 """ def __init__(self, terminal: io.TextIOWrapper, log_file: io.BufferedRandom): - """ initialize BiLogger + """initialize BiLogger Args: terminal (_io.TextIOWrapper): original terminal IO wrapper @@ -30,7 +30,7 @@ class BiLogger(io.TextIOWrapper): self.fileno = self.log_file.fileno def write(self, message: str): - """ write message to original terminal output and log file + """write message to original terminal output and log file Args: message (str): message to write @@ -41,6 +41,5 @@ class BiLogger(io.TextIOWrapper): self.log_file.flush() def flush(self): - """ flush logger (for compability only) - """ + """flush logger (for compability only)""" pass diff --git a/src/image_cleaner.py b/src/image_cleaner.py index afec7f9..087ce4f 100755 --- a/src/image_cleaner.py +++ b/src/image_cleaner.py @@ -22,7 +22,7 @@ import time class ImageCleaner(threading.Thread): - """ Video2X Image Cleaner + """Video2X Image Cleaner This class creates an object that keeps track of extracted frames that has already been upscaled and are not needed @@ -40,8 +40,7 @@ class ImageCleaner(threading.Thread): self.running = False def run(self): - """ Run image cleaner - """ + """Run image cleaner""" self.running = True while self.running: @@ -49,13 +48,12 @@ class ImageCleaner(threading.Thread): time.sleep(1) def stop(self): - """ Stop the image cleaner - """ + """Stop the image cleaner""" self.running = False self.join() def remove_upscaled_frames(self): - """ remove frames that have already been upscaled + """remove frames that have already been upscaled This method compares the files in the extracted frames directory with the upscaled frames directory, and removes diff --git a/src/progress_monitor.py b/src/progress_monitor.py index 1c8e7f2..2a8f0a0 100755 --- a/src/progress_monitor.py +++ b/src/progress_monitor.py @@ -17,7 +17,7 @@ from tqdm import tqdm class ProgressMonitor(threading.Thread): - """ progress monitor + """progress monitor This class provides progress monitoring functionalities by keeping track of the amount of frames in the input @@ -34,7 +34,15 @@ class ProgressMonitor(threading.Thread): def run(self): self.running = True - with tqdm(total=self.upscaler.total_frames, ascii=True, desc=_('Processing: {} (pass {}/{})').format(self.upscaler.current_input_file.name, self.upscaler.current_pass, len(self.upscaler.scaling_jobs))) as progress_bar: + with tqdm( + total=self.upscaler.total_frames, + ascii=True, + desc=_("Processing: {} (pass {}/{})").format( + self.upscaler.current_input_file.name, + self.upscaler.current_pass, + len(self.upscaler.scaling_jobs), + ), + ) as progress_bar: # tqdm update method adds the value to the progress # bar instead of setting the value. Therefore, a delta # needs to be calculated. @@ -42,7 +50,13 @@ class ProgressMonitor(threading.Thread): while self.running: with contextlib.suppress(FileNotFoundError): - upscaled_frames = [f for f in self.upscaler.upscaled_frames.iterdir() if str(f).lower().endswith(self.upscaler.extracted_frame_format.lower())] + upscaled_frames = [ + f + for f in self.upscaler.upscaled_frames.iterdir() + if str(f) + .lower() + .endswith(self.upscaler.extracted_frame_format.lower()) + ] if len(upscaled_frames) >= 1: self.upscaler.last_frame_upscaled = sorted(upscaled_frames)[-1] self.upscaler.total_frames_upscaled = len(upscaled_frames) diff --git a/src/upscaler.py b/src/upscaler.py index 306b9bc..9688a2b 100755 --- a/src/upscaler.py +++ b/src/upscaler.py @@ -43,40 +43,44 @@ from tqdm import tqdm import magic # internationalization constants -DOMAIN = 'video2x' -LOCALE_DIRECTORY = pathlib.Path(__file__).parent.absolute() / 'locale' +DOMAIN = "video2x" +LOCALE_DIRECTORY = pathlib.Path(__file__).parent.absolute() / "locale" # getting default locale settings default_locale, encoding = locale.getdefaultlocale() -language = gettext.translation(DOMAIN, LOCALE_DIRECTORY, [default_locale], fallback=True) +language = gettext.translation( + DOMAIN, LOCALE_DIRECTORY, [default_locale], fallback=True +) language.install() _ = language.gettext # version information -UPSCALER_VERSION = '4.4.1' +UPSCALER_VERSION = "4.4.1" # these names are consistent for # - driver selection in command line # - driver wrapper file names # - config file keys -AVAILABLE_DRIVERS = ['waifu2x_caffe', - 'waifu2x_converter_cpp', - 'waifu2x_ncnn_vulkan', - 'srmd_ncnn_vulkan', - 'realsr_ncnn_vulkan', - 'anime4kcpp'] +AVAILABLE_DRIVERS = [ + "waifu2x_caffe", + "waifu2x_converter_cpp", + "waifu2x_ncnn_vulkan", + "srmd_ncnn_vulkan", + "realsr_ncnn_vulkan", + "anime4kcpp", +] # fixed scaling ratios supported by the drivers # that only support certain fixed scale ratios DRIVER_FIXED_SCALING_RATIOS = { - 'waifu2x_ncnn_vulkan': [1, 2], - 'srmd_ncnn_vulkan': [2, 3, 4], - 'realsr_ncnn_vulkan': [4], + "waifu2x_ncnn_vulkan": [1, 2], + "srmd_ncnn_vulkan": [2, 3, 4], + "realsr_ncnn_vulkan": [4], } class Upscaler: - """ An instance of this class is a upscaler that will + """An instance of this class is a upscaler that will upscale all images in the given directory. Raises: @@ -91,17 +95,18 @@ class Upscaler: driver_settings: dict, ffmpeg_settings: dict, gifski_settings: dict, - driver: str = 'waifu2x_caffe', + driver: str = "waifu2x_caffe", scale_ratio: float = None, scale_width: int = None, scale_height: int = None, processes: int = 1, - video2x_cache_directory: pathlib.Path = pathlib.Path(tempfile.gettempdir()) / 'video2x', - extracted_frame_format: str = 'png', - output_file_name_format_string: str = '{original_file_name}_output{extension}', - image_output_extension: str = '.png', - video_output_extension: str = '.mp4', - preserve_frames: bool = False + video2x_cache_directory: pathlib.Path = pathlib.Path(tempfile.gettempdir()) + / "video2x", + extracted_frame_format: str = "png", + output_file_name_format_string: str = "{original_file_name}_output{extension}", + image_output_extension: str = ".png", + video_output_extension: str = ".mp4", + preserve_frames: bool = False, ): # required parameters @@ -137,109 +142,155 @@ class Upscaler: self.last_frame_upscaled = pathlib.Path() def create_temp_directories(self): - """create temporary directories - """ + """create temporary directories""" # if cache directory unspecified, use %TEMP%\video2x if self.video2x_cache_directory is None: - self.video2x_cache_directory = pathlib.Path(tempfile.gettempdir()) / 'video2x' + self.video2x_cache_directory = ( + pathlib.Path(tempfile.gettempdir()) / "video2x" + ) # if specified cache path exists and isn't a directory - if self.video2x_cache_directory.exists() and not self.video2x_cache_directory.is_dir(): - Avalon.error(_('Specified or default cache directory is a file/link')) - raise FileExistsError('Specified or default cache directory is a file/link') + if ( + self.video2x_cache_directory.exists() + and not self.video2x_cache_directory.is_dir() + ): + Avalon.error(_("Specified or default cache directory is a file/link")) + raise FileExistsError("Specified or default cache directory is a file/link") # if cache directory doesn't exist, try creating it if not self.video2x_cache_directory.exists(): try: - Avalon.debug_info(_('Creating cache directory {}').format(self.video2x_cache_directory)) + Avalon.debug_info( + _("Creating cache directory {}").format( + self.video2x_cache_directory + ) + ) self.video2x_cache_directory.mkdir(parents=True, exist_ok=True) except Exception as exception: - Avalon.error(_('Unable to create {}').format(self.video2x_cache_directory)) + Avalon.error( + _("Unable to create {}").format(self.video2x_cache_directory) + ) raise exception # create temp directories for extracted frames and upscaled frames - self.extracted_frames = pathlib.Path(tempfile.mkdtemp(dir=self.video2x_cache_directory)) - Avalon.debug_info(_('Extracted frames are being saved to: {}').format(self.extracted_frames)) - self.upscaled_frames = pathlib.Path(tempfile.mkdtemp(dir=self.video2x_cache_directory)) - Avalon.debug_info(_('Upscaled frames are being saved to: {}').format(self.upscaled_frames)) + self.extracted_frames = pathlib.Path( + tempfile.mkdtemp(dir=self.video2x_cache_directory) + ) + Avalon.debug_info( + _("Extracted frames are being saved to: {}").format(self.extracted_frames) + ) + self.upscaled_frames = pathlib.Path( + tempfile.mkdtemp(dir=self.video2x_cache_directory) + ) + Avalon.debug_info( + _("Upscaled frames are being saved to: {}").format(self.upscaled_frames) + ) def cleanup_temp_directories(self): - """delete temp directories when done - """ + """delete temp directories when done""" if not self.preserve_frames: - for directory in [self.extracted_frames, self.upscaled_frames, self.video2x_cache_directory]: + for directory in [ + self.extracted_frames, + self.upscaled_frames, + self.video2x_cache_directory, + ]: try: # avalon framework cannot be used if python is shutting down # therefore, plain print is used - print(_('Cleaning up cache directory: {}').format(directory)) + print(_("Cleaning up cache directory: {}").format(directory)) shutil.rmtree(directory) except FileNotFoundError: pass except OSError: - print(_('Unable to delete: {}').format(directory)) + print(_("Unable to delete: {}").format(directory)) traceback.print_exc() def _check_arguments(self): if isinstance(self.input, list): if self.output.exists() and not self.output.is_dir(): - Avalon.error(_('Input and output path type mismatch')) - Avalon.error(_('Input is multiple files but output is not directory')) - raise ArgumentError('input output path type mismatch') + Avalon.error(_("Input and output path type mismatch")) + Avalon.error(_("Input is multiple files but output is not directory")) + raise ArgumentError("input output path type mismatch") for input_path in self.input: if not input_path.is_file() and not input_path.is_dir(): - Avalon.error(_('Input path {} is neither a file nor a directory').format(input_path)) - raise FileNotFoundError(f'{input_path} is neither file nor directory') + Avalon.error( + _("Input path {} is neither a file nor a directory").format( + input_path + ) + ) + raise FileNotFoundError( + f"{input_path} is neither file nor directory" + ) with contextlib.suppress(FileNotFoundError): if input_path.samefile(self.output): - Avalon.error(_('Input directory and output directory cannot be the same')) - raise FileExistsError('input directory and output directory are the same') + Avalon.error( + _("Input directory and output directory cannot be the same") + ) + raise FileExistsError( + "input directory and output directory are the same" + ) # if input is a file elif self.input.is_file(): if self.output.is_dir(): - Avalon.error(_('Input and output path type mismatch')) - Avalon.error(_('Input is single file but output is directory')) - raise ArgumentError('input output path type mismatch') - if self.output.suffix == '': - Avalon.error(_('No suffix found in output file path')) - Avalon.error(_('Suffix must be specified')) - raise ArgumentError('no output file suffix specified') + Avalon.error(_("Input and output path type mismatch")) + Avalon.error(_("Input is single file but output is directory")) + raise ArgumentError("input output path type mismatch") + if self.output.suffix == "": + Avalon.error(_("No suffix found in output file path")) + Avalon.error(_("Suffix must be specified")) + raise ArgumentError("no output file suffix specified") # if input is a directory elif self.input.is_dir(): if self.output.is_file(): - Avalon.error(_('Input and output path type mismatch')) - Avalon.error(_('Input is directory but output is existing single file')) - raise ArgumentError('input output path type mismatch') + Avalon.error(_("Input and output path type mismatch")) + Avalon.error(_("Input is directory but output is existing single file")) + raise ArgumentError("input output path type mismatch") with contextlib.suppress(FileNotFoundError): if self.input.samefile(self.output): - Avalon.error(_('Input directory and output directory cannot be the same')) - raise FileExistsError('input directory and output directory are the same') + Avalon.error( + _("Input directory and output directory cannot be the same") + ) + raise FileExistsError( + "input directory and output directory are the same" + ) # if input is neither else: - Avalon.error(_('Input path is neither a file nor a directory')) - raise FileNotFoundError(f'{self.input} is neither file nor directory') + Avalon.error(_("Input path is neither a file nor a directory")) + raise FileNotFoundError(f"{self.input} is neither file nor directory") # check FFmpeg settings - ffmpeg_path = pathlib.Path(self.ffmpeg_settings['ffmpeg_path']) - if not ((pathlib.Path(ffmpeg_path / 'ffmpeg.exe').is_file() and - pathlib.Path(ffmpeg_path / 'ffprobe.exe').is_file()) or - (pathlib.Path(ffmpeg_path / 'ffmpeg').is_file() and - pathlib.Path(ffmpeg_path / 'ffprobe').is_file())): - Avalon.error(_('FFmpeg or FFprobe cannot be found under the specified path')) - Avalon.error(_('Please check the configuration file settings')) - raise FileNotFoundError(self.ffmpeg_settings['ffmpeg_path']) + ffmpeg_path = pathlib.Path(self.ffmpeg_settings["ffmpeg_path"]) + if not ( + ( + pathlib.Path(ffmpeg_path / "ffmpeg.exe").is_file() + and pathlib.Path(ffmpeg_path / "ffprobe.exe").is_file() + ) + or ( + pathlib.Path(ffmpeg_path / "ffmpeg").is_file() + and pathlib.Path(ffmpeg_path / "ffprobe").is_file() + ) + ): + Avalon.error( + _("FFmpeg or FFprobe cannot be found under the specified path") + ) + Avalon.error(_("Please check the configuration file settings")) + raise FileNotFoundError(self.ffmpeg_settings["ffmpeg_path"]) # check if driver settings driver_settings = copy.deepcopy(self.driver_settings) - driver_path = driver_settings.pop('path') + driver_path = driver_settings.pop("path") # check if driver path exists - if not (pathlib.Path(driver_path).is_file() or pathlib.Path(f'{driver_path}.exe').is_file()): - Avalon.error(_('Specified driver executable directory doesn\'t exist')) - Avalon.error(_('Please check the configuration file settings')) + if not ( + pathlib.Path(driver_path).is_file() + or pathlib.Path(f"{driver_path}.exe").is_file() + ): + Avalon.error(_("Specified driver executable directory doesn't exist")) + Avalon.error(_("Please check the configuration file settings")) raise FileNotFoundError(driver_path) # parse driver arguments using driver's parser @@ -255,21 +306,25 @@ class Upscaler: else: if len(key) == 1: - driver_arguments.append(f'-{key}') + driver_arguments.append(f"-{key}") else: - driver_arguments.append(f'--{key}') + driver_arguments.append(f"--{key}") # true means key is an option if value is not True: driver_arguments.append(str(value)) - DriverWrapperMain = getattr(importlib.import_module(f'wrappers.{self.driver}'), 'WrapperMain') + DriverWrapperMain = getattr( + importlib.import_module(f"wrappers.{self.driver}"), "WrapperMain" + ) DriverWrapperMain.parse_arguments(driver_arguments) except AttributeError as e: - Avalon.error(_('Failed to parse driver argument: {}').format(e.args[0])) + Avalon.error(_("Failed to parse driver argument: {}").format(e.args[0])) raise e - def _upscale_frames(self, input_directory: pathlib.Path, output_directory: pathlib.Path): - """ Upscale video frames with waifu2x-caffe + def _upscale_frames( + self, input_directory: pathlib.Path, output_directory: pathlib.Path + ): + """Upscale video frames with waifu2x-caffe This function upscales all the frames extracted by ffmpeg using the waifu2x-caffe binary. @@ -285,7 +340,9 @@ class Upscaler: # initialize waifu2x driver if self.driver not in AVAILABLE_DRIVERS: - raise UnrecognizedDriverError(_('Unrecognized driver: {}').format(self.driver)) + raise UnrecognizedDriverError( + _("Unrecognized driver: {}").format(self.driver) + ) # list all images in the extracted frames frames = [(input_directory / f) for f in input_directory.iterdir() if f.is_file] @@ -308,7 +365,13 @@ class Upscaler: process_directory.mkdir(parents=True, exist_ok=True) # waifu2x-converter-cpp will perform multi-threading within its own process - if self.driver in ['waifu2x_converter_cpp', 'waifu2x_ncnn_vulkan', 'srmd_ncnn_vulkan', 'realsr_ncnn_vulkan', 'anime4kcpp']: + if self.driver in [ + "waifu2x_converter_cpp", + "waifu2x_ncnn_vulkan", + "srmd_ncnn_vulkan", + "realsr_ncnn_vulkan", + "anime4kcpp", + ]: process_directories = [input_directory] else: @@ -318,20 +381,26 @@ class Upscaler: # move image image.rename(process_directories[0] / image.name) # rotate list - process_directories = process_directories[-1:] + process_directories[:-1] + process_directories = ( + process_directories[-1:] + process_directories[:-1] + ) # create driver processes and start them for process_directory in process_directories: - self.process_pool.append(self.driver_object.upscale(process_directory, output_directory)) + self.process_pool.append( + self.driver_object.upscale(process_directory, output_directory) + ) # start progress bar in a different thread - Avalon.debug_info(_('Starting progress monitor')) + Avalon.debug_info(_("Starting progress monitor")) self.progress_monitor = ProgressMonitor(self, process_directories) self.progress_monitor.start() # create the clearer and start it - Avalon.debug_info(_('Starting upscaled image cleaner')) - self.image_cleaner = ImageCleaner(input_directory, output_directory, len(self.process_pool)) + Avalon.debug_info(_("Starting upscaled image cleaner")) + self.image_cleaner = ImageCleaner( + input_directory, output_directory, len(self.process_pool) + ) self.image_cleaner.start() # wait for all process to exit @@ -339,38 +408,39 @@ class Upscaler: self._wait() except (Exception, KeyboardInterrupt, SystemExit) as e: # cleanup - Avalon.debug_info(_('Killing progress monitor')) + Avalon.debug_info(_("Killing progress monitor")) self.progress_monitor.stop() - Avalon.debug_info(_('Killing upscaled image cleaner')) + Avalon.debug_info(_("Killing upscaled image cleaner")) self.image_cleaner.stop() raise e # if the driver is waifu2x-converter-cpp # images need to be renamed to be recognizable for FFmpeg - if self.driver == 'waifu2x_converter_cpp': + if self.driver == "waifu2x_converter_cpp": for image in [f for f in output_directory.iterdir() if f.is_file()]: - renamed = re.sub(f'_\\[.*\\]\\[x(\\d+(\\.\\d+)?)\\]\\.{self.extracted_frame_format}', - f'.{self.extracted_frame_format}', - str(image.name)) + renamed = re.sub( + f"_\\[.*\\]\\[x(\\d+(\\.\\d+)?)\\]\\.{self.extracted_frame_format}", + f".{self.extracted_frame_format}", + str(image.name), + ) (output_directory / image).rename(output_directory / renamed) # upscaling done, kill helper threads - Avalon.debug_info(_('Killing progress monitor')) + Avalon.debug_info(_("Killing progress monitor")) self.progress_monitor.stop() - Avalon.debug_info(_('Killing upscaled image cleaner')) + Avalon.debug_info(_("Killing upscaled image cleaner")) self.image_cleaner.stop() def _terminate_subprocesses(self): - Avalon.warning(_('Terminating all processes')) + Avalon.warning(_("Terminating all processes")) for process in self.process_pool: process.terminate() def _wait(self): - """ wait for subprocesses in process pool to complete - """ - Avalon.debug_info(_('Main process waiting for subprocesses to exit')) + """wait for subprocesses in process pool to complete""" + Avalon.debug_info(_("Main process waiting for subprocesses to exit")) try: # while process pool not empty @@ -389,27 +459,37 @@ class Upscaler: # if return code is not 0 elif process_status != 0: - Avalon.error(_('Subprocess {} exited with code {}').format(process.pid, process_status)) - raise subprocess.CalledProcessError(process_status, process.args) + Avalon.error( + _("Subprocess {} exited with code {}").format( + process.pid, process_status + ) + ) + raise subprocess.CalledProcessError( + process_status, process.args + ) else: - Avalon.debug_info(_('Subprocess {} exited with code {}').format(process.pid, process_status)) + Avalon.debug_info( + _("Subprocess {} exited with code {}").format( + process.pid, process_status + ) + ) self.process_pool.remove(process) time.sleep(0.1) except (KeyboardInterrupt, SystemExit) as e: - Avalon.warning(_('Stop signal received')) + Avalon.warning(_("Stop signal received")) self._terminate_subprocesses() raise e except (Exception, subprocess.CalledProcessError) as e: - Avalon.error(_('Subprocess execution ran into an error')) + Avalon.error(_("Subprocess execution ran into an error")) self._terminate_subprocesses() raise e def run(self): - """ Main controller for Video2X + """Main controller for Video2X This function controls the flow of video conversion and handles all necessary functions. @@ -422,20 +502,24 @@ class Upscaler: self.process_pool = [] # load driver modules - DriverWrapperMain = getattr(importlib.import_module(f'wrappers.{self.driver}'), 'WrapperMain') + DriverWrapperMain = getattr( + importlib.import_module(f"wrappers.{self.driver}"), "WrapperMain" + ) self.driver_object = DriverWrapperMain(self.driver_settings) # load options from upscaler class into driver settings self.driver_object.load_configurations(self) # initialize FFmpeg object - self.ffmpeg_object = Ffmpeg(self.ffmpeg_settings, extracted_frame_format=self.extracted_frame_format) + self.ffmpeg_object = Ffmpeg( + self.ffmpeg_settings, extracted_frame_format=self.extracted_frame_format + ) # define processing queue self.processing_queue = queue.Queue() - Avalon.info(_('Loading files into processing queue')) - Avalon.debug_info(_('Input path(s): {}').format(self.input)) + Avalon.info(_("Loading files into processing queue")) + Avalon.debug_info(_("Input path(s): {}").format(self.input)) # make output directory if the input is a list or a directory if isinstance(self.input, list) or self.input.is_dir(): @@ -470,39 +554,53 @@ class Upscaler: # get file type # try python-magic if it's available try: - input_file_mime_type = magic.from_file(str(input_path.absolute()), mime=True) - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_mime_type = magic.from_file( + str(input_path.absolute()), mime=True + ) + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] except Exception: - input_file_mime_type = input_file_type = input_file_subtype = '' + input_file_mime_type = input_file_type = input_file_subtype = "" # if python-magic doesn't determine the file to be an image/video file # fall back to mimetypes to guess the file type based on the extension - if input_file_type not in ['image', 'video']: + if input_file_type not in ["image", "video"]: # in case python-magic fails to detect file type # try guessing file mime type with mimetypes input_file_mime_type = mimetypes.guess_type(input_path.name)[0] - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] - Avalon.debug_info(_('File MIME type: {}').format(input_file_mime_type)) + Avalon.debug_info(_("File MIME type: {}").format(input_file_mime_type)) # set default output file suffixes # if image type is GIF, default output suffix is also .gif - if input_file_mime_type == 'image/gif': - output_path = self.output / self.output_file_name_format_string.format(original_file_name=input_path.stem, extension='.gif') + if input_file_mime_type == "image/gif": + output_path = self.output / self.output_file_name_format_string.format( + original_file_name=input_path.stem, extension=".gif" + ) - elif input_file_type == 'image': - output_path = self.output / self.output_file_name_format_string.format(original_file_name=input_path.stem, extension=self.image_output_extension) + elif input_file_type == "image": + output_path = self.output / self.output_file_name_format_string.format( + original_file_name=input_path.stem, + extension=self.image_output_extension, + ) - elif input_file_type == 'video': - output_path = self.output / self.output_file_name_format_string.format(original_file_name=input_path.stem, extension=self.video_output_extension) + elif input_file_type == "video": + output_path = self.output / self.output_file_name_format_string.format( + original_file_name=input_path.stem, + extension=self.video_output_extension, + ) # if file is none of: image, image/gif, video # skip to the next task else: - Avalon.error(_('File {} ({}) neither an image nor a video').format(input_path, input_file_mime_type)) - Avalon.warning(_('Skipping this file')) + Avalon.error( + _("File {} ({}) neither an image nor a video").format( + input_path, input_file_mime_type + ) + ) + Avalon.warning(_("Skipping this file")) continue # if there is only one input file @@ -512,14 +610,24 @@ class Upscaler: output_path_id = 0 while str(output_path) in output_paths: - output_path = output_path.parent / pathlib.Path(f'{output_path.stem}_{output_path_id}{output_path.suffix}') + output_path = output_path.parent / pathlib.Path( + f"{output_path.stem}_{output_path_id}{output_path.suffix}" + ) output_path_id += 1 # record output path output_paths.append(str(output_path)) # push file information into processing queue - self.processing_queue.put((input_path.absolute(), output_path.absolute(), input_file_mime_type, input_file_type, input_file_subtype)) + self.processing_queue.put( + ( + input_path.absolute(), + output_path.absolute(), + input_file_mime_type, + input_file_type, + input_file_subtype, + ) + ) # check argument sanity before running self._check_arguments() @@ -527,22 +635,28 @@ class Upscaler: # record file count for external calls self.total_files = self.processing_queue.qsize() - Avalon.info(_('Loaded files into processing queue')) + Avalon.info(_("Loaded files into processing queue")) # print all files in queue for debugging for job in self.processing_queue.queue: - Avalon.debug_info(_('Input file: {}').format(job[0].absolute())) + Avalon.debug_info(_("Input file: {}").format(job[0].absolute())) try: while not self.processing_queue.empty(): # get new job from queue - self.current_input_file, output_path, input_file_mime_type, input_file_type, input_file_subtype = self.processing_queue.get() + ( + self.current_input_file, + output_path, + input_file_mime_type, + input_file_type, + input_file_subtype, + ) = self.processing_queue.get() # get current job starting time for GUI calculations self.current_processing_starting_time = time.time() # get video information JSON using FFprobe - Avalon.info(_('Reading file information')) + Avalon.info(_("Reading file information")) file_info = self.ffmpeg_object.probe_file_info(self.current_input_file) # create temporary directories for storing frames @@ -550,50 +664,61 @@ class Upscaler: # start handling input # if input file is a static image - if input_file_type == 'image' and input_file_subtype != 'gif': - Avalon.info(_('Starting upscaling image')) + if input_file_type == "image" and input_file_subtype != "gif": + Avalon.info(_("Starting upscaling image")) # copy original file into the pre-processing directory - shutil.copy(self.current_input_file, self.extracted_frames / self.current_input_file.name) + shutil.copy( + self.current_input_file, + self.extracted_frames / self.current_input_file.name, + ) - width = int(file_info['streams'][0]['width']) - height = int(file_info['streams'][0]['height']) + width = int(file_info["streams"][0]["width"]) + height = int(file_info["streams"][0]["height"]) framerate = self.total_frames = 1 # elif input_file_mime_type == 'image/gif' or input_file_type == 'video': else: - Avalon.info(_('Starting upscaling video/GIF')) + Avalon.info(_("Starting upscaling video/GIF")) # find index of video stream video_stream_index = None - for stream in file_info['streams']: - if stream['codec_type'] == 'video': - video_stream_index = stream['index'] + for stream in file_info["streams"]: + if stream["codec_type"] == "video": + video_stream_index = stream["index"] break # exit if no video stream found if video_stream_index is None: - Avalon.error(_('Aborting: No video stream found')) - raise StreamNotFoundError('no video stream found') + Avalon.error(_("Aborting: No video stream found")) + raise StreamNotFoundError("no video stream found") # get average frame rate of video stream - framerate = float(Fraction(file_info['streams'][video_stream_index]['r_frame_rate'])) - width = int(file_info['streams'][video_stream_index]['width']) - height = int(file_info['streams'][video_stream_index]['height']) + framerate = float( + Fraction( + file_info["streams"][video_stream_index]["r_frame_rate"] + ) + ) + width = int(file_info["streams"][video_stream_index]["width"]) + height = int(file_info["streams"][video_stream_index]["height"]) # get total number of frames - Avalon.info(_('Getting total number of frames in the file')) + Avalon.info(_("Getting total number of frames in the file")) # if container stores total number of frames in nb_frames, fetch it directly - if 'nb_frames' in file_info['streams'][video_stream_index]: - self.total_frames = int(file_info['streams'][video_stream_index]['nb_frames']) + if "nb_frames" in file_info["streams"][video_stream_index]: + self.total_frames = int( + file_info["streams"][video_stream_index]["nb_frames"] + ) # otherwise call FFprobe to count the total number of frames else: - self.total_frames = self.ffmpeg_object.get_number_of_frames(self.current_input_file, video_stream_index) + self.total_frames = self.ffmpeg_object.get_number_of_frames( + self.current_input_file, video_stream_index + ) # calculate scale width/height/ratio and scaling jobs if required - Avalon.info(_('Calculating scaling parameters')) + Avalon.info(_("Calculating scaling parameters")) # create a local copy of the global output settings output_scale = self.scale_ratio @@ -624,7 +749,9 @@ class Upscaler: if self.driver in DRIVER_FIXED_SCALING_RATIOS: # select the optimal driver scaling ratio to use - supported_scaling_ratios = sorted(DRIVER_FIXED_SCALING_RATIOS[self.driver]) + supported_scaling_ratios = sorted( + DRIVER_FIXED_SCALING_RATIOS[self.driver] + ) remaining_scaling_ratio = math.ceil(output_scale) self.scaling_jobs = [] @@ -654,46 +781,68 @@ class Upscaler: break if found is False: - self.scaling_jobs.append(supported_scaling_ratios[-1]) - remaining_scaling_ratio /= supported_scaling_ratios[-1] + self.scaling_jobs.append( + supported_scaling_ratios[-1] + ) + remaining_scaling_ratio /= supported_scaling_ratios[ + -1 + ] else: self.scaling_jobs = [output_scale] # print file information - Avalon.debug_info(_('Framerate: {}').format(framerate)) - Avalon.debug_info(_('Width: {}').format(width)) - Avalon.debug_info(_('Height: {}').format(height)) - Avalon.debug_info(_('Total number of frames: {}').format(self.total_frames)) - Avalon.debug_info(_('Output width: {}').format(output_width)) - Avalon.debug_info(_('Output height: {}').format(output_height)) - Avalon.debug_info(_('Required scale ratio: {}').format(output_scale)) - Avalon.debug_info(_('Upscaling jobs queue: {}').format(self.scaling_jobs)) + Avalon.debug_info(_("Framerate: {}").format(framerate)) + Avalon.debug_info(_("Width: {}").format(width)) + Avalon.debug_info(_("Height: {}").format(height)) + Avalon.debug_info( + _("Total number of frames: {}").format(self.total_frames) + ) + Avalon.debug_info(_("Output width: {}").format(output_width)) + Avalon.debug_info(_("Output height: {}").format(output_height)) + Avalon.debug_info(_("Required scale ratio: {}").format(output_scale)) + Avalon.debug_info( + _("Upscaling jobs queue: {}").format(self.scaling_jobs) + ) # extract frames from video - if input_file_mime_type == 'image/gif' or input_file_type == 'video': - self.process_pool.append((self.ffmpeg_object.extract_frames(self.current_input_file, self.extracted_frames))) + if input_file_mime_type == "image/gif" or input_file_type == "video": + self.process_pool.append( + ( + self.ffmpeg_object.extract_frames( + self.current_input_file, self.extracted_frames + ) + ) + ) self._wait() # if driver is waifu2x-caffe # pass pixel format output depth information - if self.driver == 'waifu2x_caffe': + if self.driver == "waifu2x_caffe": # get a dict of all pixel formats and corresponding bit depth pixel_formats = self.ffmpeg_object.get_pixel_formats() # try getting pixel format's corresponding bti depth try: - self.driver_settings['output_depth'] = pixel_formats[self.ffmpeg_object.pixel_format] + self.driver_settings["output_depth"] = pixel_formats[ + self.ffmpeg_object.pixel_format + ] except KeyError: - Avalon.error(_('Unsupported pixel format: {}').format(self.ffmpeg_object.pixel_format)) - raise UnsupportedPixelError(f'unsupported pixel format {self.ffmpeg_object.pixel_format}') + Avalon.error( + _("Unsupported pixel format: {}").format( + self.ffmpeg_object.pixel_format + ) + ) + raise UnsupportedPixelError( + f"unsupported pixel format {self.ffmpeg_object.pixel_format}" + ) # upscale images one by one using waifu2x - Avalon.info(_('Starting to upscale extracted frames')) + Avalon.info(_("Starting to upscale extracted frames")) upscale_begin_time = time.time() self.current_pass = 1 - if self.driver == 'waifu2x_caffe': + if self.driver == "waifu2x_caffe": self.driver_object.set_scale_resolution(output_width, output_height) else: self.driver_object.set_scale_ratio(self.scaling_jobs[0]) @@ -706,22 +855,39 @@ class Upscaler: self.upscaled_frames.mkdir(parents=True, exist_ok=True) self._upscale_frames(self.extracted_frames, self.upscaled_frames) - Avalon.info(_('Upscaling completed')) - Avalon.info(_('Average processing speed: {} seconds per frame').format(self.total_frames / (time.time() - upscale_begin_time))) + Avalon.info(_("Upscaling completed")) + Avalon.info( + _("Average processing speed: {} seconds per frame").format( + self.total_frames / (time.time() - upscale_begin_time) + ) + ) # downscale frames with Lanczos - Avalon.info(_('Lanczos downscaling frames')) + Avalon.info(_("Lanczos downscaling frames")) shutil.rmtree(self.extracted_frames) shutil.move(self.upscaled_frames, self.extracted_frames) self.upscaled_frames.mkdir(parents=True, exist_ok=True) - for image in tqdm([i for i in self.extracted_frames.iterdir() if i.is_file() and i.name.endswith(self.extracted_frame_format)], ascii=True, desc=_('Downscaling')): + for image in tqdm( + [ + i + for i in self.extracted_frames.iterdir() + if i.is_file() and i.name.endswith(self.extracted_frame_format) + ], + ascii=True, + desc=_("Downscaling"), + ): image_object = Image.open(image) # if the image dimensions are not equal to the output size # resize the image using Lanczos - if (image_object.width, image_object.height) != (output_width, output_height): - image_object.resize((output_width, output_height), Image.LANCZOS).save(self.upscaled_frames / image.name) + if (image_object.width, image_object.height) != ( + output_width, + output_height, + ): + image_object.resize( + (output_width, output_height), Image.LANCZOS + ).save(self.upscaled_frames / image.name) image_object.close() # if the image's dimensions are already equal to the output size @@ -732,71 +898,117 @@ class Upscaler: # start handling output # output can be either GIF or video - if input_file_type == 'image' and input_file_subtype != 'gif': + if input_file_type == "image" and input_file_subtype != "gif": - Avalon.info(_('Exporting image')) + Avalon.info(_("Exporting image")) # there should be only one image in the directory - shutil.move([f for f in self.upscaled_frames.iterdir() if f.is_file()][0], output_path) + shutil.move( + [f for f in self.upscaled_frames.iterdir() if f.is_file()][0], + output_path, + ) # elif input_file_mime_type == 'image/gif' or input_file_type == 'video': else: # if the desired output is gif file - if output_path.suffix.lower() == '.gif': - Avalon.info(_('Converting extracted frames into GIF image')) + if output_path.suffix.lower() == ".gif": + Avalon.info(_("Converting extracted frames into GIF image")) gifski_object = Gifski(self.gifski_settings) - self.process_pool.append(gifski_object.make_gif(self.upscaled_frames, output_path, framerate, self.extracted_frame_format, output_width, output_height)) + self.process_pool.append( + gifski_object.make_gif( + self.upscaled_frames, + output_path, + framerate, + self.extracted_frame_format, + output_width, + output_height, + ) + ) self._wait() - Avalon.info(_('Conversion completed')) + Avalon.info(_("Conversion completed")) # if the desired output is video else: # frames to video - Avalon.info(_('Converting extracted frames into video')) - self.process_pool.append(self.ffmpeg_object.assemble_video(framerate, self.upscaled_frames)) + Avalon.info(_("Converting extracted frames into video")) + self.process_pool.append( + self.ffmpeg_object.assemble_video( + framerate, self.upscaled_frames + ) + ) # f'{scale_width}x{scale_height}' self._wait() - Avalon.info(_('Conversion completed')) + Avalon.info(_("Conversion completed")) try: # migrate audio tracks and subtitles - Avalon.info(_('Migrating audio, subtitles and other streams to upscaled video')) - self.process_pool.append(self.ffmpeg_object.migrate_streams(self.current_input_file, - output_path, - self.upscaled_frames)) + Avalon.info( + _( + "Migrating audio, subtitles and other streams to upscaled video" + ) + ) + self.process_pool.append( + self.ffmpeg_object.migrate_streams( + self.current_input_file, + output_path, + self.upscaled_frames, + ) + ) self._wait() # if failed to copy streams # use file with only video stream except subprocess.CalledProcessError: traceback.print_exc() - Avalon.error(_('Failed to migrate streams')) - Avalon.warning(_('Trying to output video without additional streams')) + Avalon.error(_("Failed to migrate streams")) + Avalon.warning( + _("Trying to output video without additional streams") + ) - if input_file_mime_type == 'image/gif': + if input_file_mime_type == "image/gif": # copy will overwrite destination content if exists - shutil.copy(self.upscaled_frames / self.ffmpeg_object.intermediate_file_name, output_path) + shutil.copy( + self.upscaled_frames + / self.ffmpeg_object.intermediate_file_name, + output_path, + ) else: # construct output file path - output_file_name = f'{output_path.stem}{self.ffmpeg_object.intermediate_file_name.suffix}' - output_video_path = output_path.parent / output_file_name + output_file_name = f"{output_path.stem}{self.ffmpeg_object.intermediate_file_name.suffix}" + output_video_path = ( + output_path.parent / output_file_name + ) # if output file already exists # create temporary directory in output folder # temporary directories generated by tempfile are guaranteed to be unique # and won't conflict with other files if output_video_path.exists(): - Avalon.error(_('Output video file exists')) + Avalon.error(_("Output video file exists")) - temporary_directory = pathlib.Path(tempfile.mkdtemp(dir=output_path.parent)) - output_video_path = temporary_directory / output_file_name - Avalon.info(_('Created temporary directory to contain file')) + temporary_directory = pathlib.Path( + tempfile.mkdtemp(dir=output_path.parent) + ) + output_video_path = ( + temporary_directory / output_file_name + ) + Avalon.info( + _("Created temporary directory to contain file") + ) # move file to new destination - Avalon.info(_('Writing intermediate file to: {}').format(output_video_path.absolute())) - shutil.move(self.upscaled_frames / self.ffmpeg_object.intermediate_file_name, output_video_path) + Avalon.info( + _("Writing intermediate file to: {}").format( + output_video_path.absolute() + ) + ) + shutil.move( + self.upscaled_frames + / self.ffmpeg_object.intermediate_file_name, + output_video_path, + ) # increment total number of files processed self.cleanup_temp_directories() diff --git a/src/video2x.py b/src/video2x.py index c2e6ed9..cd1a597 100755 --- a/src/video2x.py +++ b/src/video2x.py @@ -71,75 +71,143 @@ import yaml from avalon_framework import Avalon # internationalization constants -DOMAIN = 'video2x' -LOCALE_DIRECTORY = pathlib.Path(__file__).parent.absolute() / 'locale' +DOMAIN = "video2x" +LOCALE_DIRECTORY = pathlib.Path(__file__).parent.absolute() / "locale" # getting default locale settings default_locale, encoding = locale.getdefaultlocale() -language = gettext.translation(DOMAIN, LOCALE_DIRECTORY, [default_locale], fallback=True) +language = gettext.translation( + DOMAIN, LOCALE_DIRECTORY, [default_locale], fallback=True +) language.install() _ = language.gettext -CLI_VERSION = '4.3.1' +CLI_VERSION = "4.3.1" -LEGAL_INFO = _('''Video2X CLI Version: {} +LEGAL_INFO = _( + """Video2X CLI Version: {} Upscaler Version: {} Author: K4YT3X License: GNU GPL v3 Github Page: https://github.com/k4yt3x/video2x -Contact: k4yt3x@k4yt3x.com''').format(CLI_VERSION, UPSCALER_VERSION) +Contact: k4yt3x@k4yt3x.com""" +).format(CLI_VERSION, UPSCALER_VERSION) -LOGO = r''' +LOGO = r""" __ __ _ _ ___ __ __ \ \ / / (_) | | |__ \ \ \ / / \ \ / / _ __| | ___ ___ ) | \ V / \ \/ / | | / _` | / _ \ / _ \ / / > < \ / | | | (_| | | __/ | (_) | / /_ / . \ \/ |_| \__,_| \___| \___/ |____| /_/ \_\ -''' +""" def parse_arguments(): - """ parse CLI arguments - """ - parser = argparse.ArgumentParser(prog='video2x', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) + """parse CLI arguments""" + parser = argparse.ArgumentParser( + prog="video2x", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + add_help=False, + ) # video options - video2x_options = parser.add_argument_group(_('Video2X Options')) - video2x_options.add_argument('--help', action='help', help=_('show this help message and exit')) + video2x_options = parser.add_argument_group(_("Video2X Options")) + + video2x_options.add_argument( + "--help", action="help", help=_("show this help message and exit") + ) # if help is in arguments list # do not require input and output path to be specified require_input_output = True - if '-h' in sys.argv or '--help' in sys.argv: + if "-h" in sys.argv or "--help" in sys.argv: require_input_output = False - video2x_options.add_argument('-i', '--input', type=pathlib.Path, help=_('source video file/directory'), required=require_input_output) - video2x_options.add_argument('-o', '--output', type=pathlib.Path, help=_('output video file/directory'), required=require_input_output) - video2x_options.add_argument('-c', '--config', type=pathlib.Path, help=_('Video2X config file path'), action='store', - default=pathlib.Path(__file__).parent.absolute() / 'video2x.yaml') - video2x_options.add_argument('--log', type=pathlib.Path, help=_('log file path')) - video2x_options.add_argument('-v', '--version', help=_('display version, lawful information and exit'), action='store_true') + video2x_options.add_argument( + "-i", + "--input", + type=pathlib.Path, + help=_("source video file/directory"), + required=require_input_output, + ) + + video2x_options.add_argument( + "-o", + "--output", + type=pathlib.Path, + help=_("output video file/directory"), + required=require_input_output, + ) + + video2x_options.add_argument( + "-c", + "--config", + type=pathlib.Path, + help=_("Video2X config file path"), + action="store", + default=pathlib.Path(__file__).parent.absolute() / "video2x.yaml", + ) + + video2x_options.add_argument("--log", type=pathlib.Path, help=_("log file path")) + + video2x_options.add_argument( + "-v", + "--version", + help=_("display version, lawful information and exit"), + action="store_true", + ) # scaling options - upscaling_options = parser.add_argument_group(_('Upscaling Options')) - upscaling_options.add_argument('-r', '--ratio', help=_('scaling ratio'), action='store', type=float) - upscaling_options.add_argument('-w', '--width', help=_('output width'), action='store', type=float) - upscaling_options.add_argument('-h', '--height', help=_('output height'), action='store', type=float) - upscaling_options.add_argument('-d', '--driver', help=_('upscaling driver'), choices=AVAILABLE_DRIVERS, default='waifu2x_ncnn_vulkan') - upscaling_options.add_argument('-p', '--processes', help=_('number of processes to use for upscaling'), action='store', type=int, default=1) - upscaling_options.add_argument('--preserve_frames', help=_('preserve extracted and upscaled frames'), action='store_true') + upscaling_options = parser.add_argument_group(_("Upscaling Options")) + + upscaling_options.add_argument( + "-r", "--ratio", help=_("scaling ratio"), action="store", type=float + ) + + upscaling_options.add_argument( + "-w", "--width", help=_("output width"), action="store", type=float + ) + + upscaling_options.add_argument( + "-h", "--height", help=_("output height"), action="store", type=float + ) + + upscaling_options.add_argument( + "-d", + "--driver", + help=_("upscaling driver"), + choices=AVAILABLE_DRIVERS, + default="waifu2x_ncnn_vulkan", + ) + + upscaling_options.add_argument( + "-p", + "--processes", + help=_("number of processes to use for upscaling"), + action="store", + type=int, + default=1, + ) + + upscaling_options.add_argument( + "--preserve_frames", + help=_("preserve extracted and upscaled frames"), + action="store_true", + ) # if no driver arguments are specified - if '--' not in sys.argv: + if "--" not in sys.argv: video2x_args = parser.parse_args() return video2x_args, None # if driver arguments are specified else: - video2x_args = parser.parse_args(sys.argv[1:sys.argv.index('--')]) - wrapper = getattr(importlib.import_module(f'wrappers.{video2x_args.driver}'), 'WrapperMain') - driver_args = wrapper.parse_arguments(sys.argv[sys.argv.index('--') + 1:]) + video2x_args = parser.parse_args(sys.argv[1 : sys.argv.index("--")]) + wrapper = getattr( + importlib.import_module(f"wrappers.{video2x_args.driver}"), "WrapperMain" + ) + driver_args = wrapper.parse_arguments(sys.argv[sys.argv.index("--") + 1 :]) return video2x_args, driver_args @@ -151,7 +219,7 @@ def print_logo(): def read_config(config_file: pathlib.Path) -> dict: - """ read video2x configurations from config file + """read video2x configurations from config file Arguments: config_file {pathlib.Path} -- video2x configuration file pathlib.Path @@ -160,16 +228,16 @@ def read_config(config_file: pathlib.Path) -> dict: dict -- dictionary of video2x configuration """ - with open(config_file, 'r') as config: + with open(config_file, "r") as config: return yaml.load(config, Loader=yaml.FullLoader) # /////////////////// Execution /////////////////// # # this is not a library -if __name__ != '__main__': - Avalon.error(_('This file cannot be imported')) - raise ImportError(f'{__file__} cannot be imported') +if __name__ != "__main__": + Avalon.error(_("This file cannot be imported")) + raise ImportError(f"{__file__} cannot be imported") # print video2x logo print_logo() @@ -183,15 +251,19 @@ if video2x_args.version: sys.exit(0) # additional checks on upscaling arguments -if video2x_args.ratio is not None and (video2x_args.width is not None or video2x_args.height is not None): - Avalon.error(_('Specify either scaling ratio or scaling resolution, not both')) +if video2x_args.ratio is not None and ( + video2x_args.width is not None or video2x_args.height is not None +): + Avalon.error(_("Specify either scaling ratio or scaling resolution, not both")) sys.exit(1) # redirect output to both terminal and log file if video2x_args.log is not None: - log_file = video2x_args.log.open(mode='a+', encoding='utf-8') + log_file = video2x_args.log.open(mode="a+", encoding="utf-8") else: - log_file = tempfile.TemporaryFile(mode='a+', suffix='.log', prefix='video2x_', encoding='utf-8') + log_file = tempfile.TemporaryFile( + mode="a+", suffix=".log", prefix="video2x_", encoding="utf-8" + ) original_stdout = sys.stdout original_stderr = sys.stderr @@ -203,22 +275,22 @@ config = read_config(video2x_args.config) # load waifu2x configuration driver_settings = config[video2x_args.driver] -driver_settings['path'] = os.path.expandvars(driver_settings['path']) +driver_settings["path"] = os.path.expandvars(driver_settings["path"]) # read FFmpeg configuration -ffmpeg_settings = config['ffmpeg'] -ffmpeg_settings['ffmpeg_path'] = os.path.expandvars(ffmpeg_settings['ffmpeg_path']) +ffmpeg_settings = config["ffmpeg"] +ffmpeg_settings["ffmpeg_path"] = os.path.expandvars(ffmpeg_settings["ffmpeg_path"]) # read Gifski configuration -gifski_settings = config['gifski'] -gifski_settings['gifski_path'] = os.path.expandvars(gifski_settings['gifski_path']) +gifski_settings = config["gifski"] +gifski_settings["gifski_path"] = os.path.expandvars(gifski_settings["gifski_path"]) # load video2x settings -extracted_frame_format = config['video2x']['extracted_frame_format'].lower() -output_file_name_format_string = config['video2x']['output_file_name_format_string'] -image_output_extension = config['video2x']['image_output_extension'] -video_output_extension = config['video2x']['video_output_extension'] -preserve_frames = config['video2x']['preserve_frames'] +extracted_frame_format = config["video2x"]["extracted_frame_format"].lower() +output_file_name_format_string = config["video2x"]["output_file_name_format_string"] +image_output_extension = config["video2x"]["image_output_extension"] +video_output_extension = config["video2x"]["video_output_extension"] +preserve_frames = config["video2x"]["preserve_frames"] # if preserve frames specified in command line # overwrite config file options @@ -227,10 +299,10 @@ if video2x_args.preserve_frames is True: # if cache directory not specified # use default path: %TEMP%\video2x -if config['video2x']['video2x_cache_directory'] is None: - video2x_cache_directory = (pathlib.Path(tempfile.gettempdir()) / 'video2x') +if config["video2x"]["video2x_cache_directory"] is None: + video2x_cache_directory = pathlib.Path(tempfile.gettempdir()) / "video2x" else: - video2x_cache_directory = pathlib.Path(config['video2x']['video2x_cache_directory']) + video2x_cache_directory = pathlib.Path(config["video2x"]["video2x_cache_directory"]) # overwrite driver_settings with driver_args if driver_args is not None: @@ -252,7 +324,6 @@ try: driver_settings=driver_settings, ffmpeg_settings=ffmpeg_settings, gifski_settings=gifski_settings, - # optional parameters driver=video2x_args.driver, scale_ratio=video2x_args.ratio, @@ -264,17 +335,21 @@ try: output_file_name_format_string=output_file_name_format_string, image_output_extension=image_output_extension, video_output_extension=video_output_extension, - preserve_frames=preserve_frames + preserve_frames=preserve_frames, ) # run upscaler upscaler.run() - Avalon.info(_('Program completed, taking {} seconds').format(round((time.time() - begin_time), 5))) + Avalon.info( + _("Program completed, taking {} seconds").format( + round((time.time() - begin_time), 5) + ) + ) except Exception: - Avalon.error(_('An exception has occurred')) + Avalon.error(_("An exception has occurred")) traceback.print_exc() if video2x_args.log is not None: @@ -284,12 +359,12 @@ except Exception: # tempfile.TempFile does not have a name attribute and is not guaranteed to have # a visible name on the file system else: - log_file_path = tempfile.mkstemp(suffix='.log', prefix='video2x_')[1] - with open(log_file_path, 'w', encoding='utf-8') as permanent_log_file: + log_file_path = tempfile.mkstemp(suffix=".log", prefix="video2x_")[1] + with open(log_file_path, "w", encoding="utf-8") as permanent_log_file: log_file.seek(0) permanent_log_file.write(log_file.read()) - Avalon.error(_('The error log file can be found at: {}').format(log_file_path)) + Avalon.error(_("The error log file can be found at: {}").format(log_file_path)) finally: sys.stdout = original_stdout diff --git a/src/video2x_gui.py b/src/video2x_gui.py index 3c570d6..d97fa7c 100755 --- a/src/video2x_gui.py +++ b/src/video2x_gui.py @@ -33,22 +33,22 @@ from PyQt5.QtGui import * from PyQt5.QtWidgets import * import magic -GUI_VERSION = '2.8.1' +GUI_VERSION = "2.8.1" -LEGAL_INFO = f'''Video2X GUI Version: {GUI_VERSION}\\ +LEGAL_INFO = f"""Video2X GUI Version: {GUI_VERSION}\\ Upscaler Version: {UPSCALER_VERSION}\\ Author: K4YT3X\\ License: GNU GPL v3\\ Github Page: [https://github.com/k4yt3x/video2x](https://github.com/k4yt3x/video2x)\\ -Contact: [k4yt3x@k4yt3x.com](mailto:k4yt3x@k4yt3x.com)''' +Contact: [k4yt3x@k4yt3x.com](mailto:k4yt3x@k4yt3x.com)""" AVAILABLE_DRIVERS = { - 'Waifu2X Caffe': 'waifu2x_caffe', - 'Waifu2X Converter CPP': 'waifu2x_converter_cpp', - 'Waifu2X NCNN Vulkan': 'waifu2x_ncnn_vulkan', - 'SRMD NCNN Vulkan': 'srmd_ncnn_vulkan', - 'RealSR NCNN Vulkan': 'realsr_ncnn_vulkan', - 'Anime4KCPP': 'anime4kcpp' + "Waifu2X Caffe": "waifu2x_caffe", + "Waifu2X Converter CPP": "waifu2x_converter_cpp", + "Waifu2X NCNN Vulkan": "waifu2x_ncnn_vulkan", + "SRMD NCNN Vulkan": "srmd_ncnn_vulkan", + "RealSR NCNN Vulkan": "realsr_ncnn_vulkan", + "Anime4KCPP": "anime4kcpp", } # get current working directory before it is changed by drivers @@ -77,7 +77,7 @@ class ProgressMonitorWorkder(QRunnable): self.args = args self.kwargs = kwargs self.signals = WorkerSignals() - self.kwargs['progress_callback'] = self.signals.progress + self.kwargs["progress_callback"] = self.signals.progress @pyqtSlot() def run(self): @@ -88,7 +88,6 @@ class ProgressMonitorWorkder(QRunnable): class UpscalerWorker(QRunnable): - def __init__(self, fn, *args, **kwargs): super(UpscalerWorker, self).__init__() @@ -130,38 +129,40 @@ class InputTableModel(QAbstractTableModel): # determine file type # if path is a folder if file_path.is_dir(): - return 'Folder' + return "Folder" # if path is single file # determine file type elif file_path.is_file(): try: - input_file_mime_type = magic.from_file(str(file_path.absolute()), mime=True) - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_mime_type = magic.from_file( + str(file_path.absolute()), mime=True + ) + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] except Exception: input_file_type = input_file_subtype = None # in case python-magic fails to detect file type # try guessing file mime type with mimetypes - if input_file_type not in ['image', 'video']: + if input_file_type not in ["image", "video"]: input_file_mime_type = mimetypes.guess_type(file_path.name)[0] - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] - if input_file_type == 'image': - if input_file_subtype == 'gif': - return 'GIF' - return 'Image' + if input_file_type == "image": + if input_file_subtype == "gif": + return "GIF" + return "Image" - elif input_file_type == 'video': - return 'Video' + elif input_file_type == "video": + return "Video" else: - return 'Unknown' + return "Unknown" else: - return 'Unknown' + return "Unknown" def rowCount(self, index): return len(self._data) @@ -177,7 +178,7 @@ class InputTableModel(QAbstractTableModel): if role != Qt.DisplayRole: return None - horizontal_headers = ['File Path', 'Type'] + horizontal_headers = ["File Path", "Type"] # return the correspondign header if orientation == Qt.Horizontal: @@ -189,13 +190,14 @@ class InputTableModel(QAbstractTableModel): class Video2XMainWindow(QMainWindow): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - uic.loadUi(str(resource_path('video2x_gui.ui')), self) + uic.loadUi(str(resource_path("video2x_gui.ui")), self) # redirect output to both terminal and log file - self.log_file = tempfile.TemporaryFile(mode='a+', suffix='.log', prefix='video2x_', encoding='utf-8') + self.log_file = tempfile.TemporaryFile( + mode="a+", suffix=".log", prefix="video2x_", encoding="utf-8" + ) sys.stdout = BiLogger(sys.stdout, self.log_file) sys.stderr = BiLogger(sys.stderr, self.log_file) @@ -203,8 +205,8 @@ class Video2XMainWindow(QMainWindow): self.threadpool = QThreadPool() # set window title and icon - self.video2x_icon_path = str(resource_path('images/video2x.png')) - self.setWindowTitle(f'Video2X GUI {GUI_VERSION}') + self.video2x_icon_path = str(resource_path("images/video2x.png")) + self.setWindowTitle(f"Video2X GUI {GUI_VERSION}") self.setWindowIcon(QIcon(self.video2x_icon_path)) # register shortcut keys @@ -212,22 +214,26 @@ class Video2XMainWindow(QMainWindow): QShortcut(QKeySequence(Qt.CTRL + Qt.Key_Q), self, self.close) QShortcut(QKeySequence(Qt.CTRL + Qt.Key_I), self, self.select_input_file) QShortcut(QKeySequence(Qt.CTRL + Qt.Key_O), self, self.select_output_file) - QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_I), self, self.select_input_folder) - QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_O), self, self.select_output_folder) + QShortcut( + QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_I), self, self.select_input_folder + ) + QShortcut( + QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_O), self, self.select_output_folder + ) # menu bar - self.action_exit = self.findChild(QAction, 'actionExit') + self.action_exit = self.findChild(QAction, "actionExit") self.action_exit.triggered.connect(self.close) - self.action_shortcuts = self.findChild(QAction, 'actionShortcuts') + self.action_shortcuts = self.findChild(QAction, "actionShortcuts") self.action_shortcuts.triggered.connect(self.show_shortcuts) - self.action_about = self.findChild(QAction, 'actionAbout') + self.action_about = self.findChild(QAction, "actionAbout") self.action_about.triggered.connect(self.show_about) # main tab # select input file/folder - self.input_table_view = self.findChild(QTableView, 'inputTableView') + self.input_table_view = self.findChild(QTableView, "inputTableView") self.input_table_view.dragEnterEvent = self.dragEnterEvent self.input_table_view.dropEvent = self.dropEvent @@ -236,223 +242,502 @@ class Video2XMainWindow(QMainWindow): self.input_table_model = InputTableModel(self.input_table_data) self.input_table_view.setModel(self.input_table_model) # stretch file path and fill columns horizontally - self.input_table_view.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch) + self.input_table_view.horizontalHeader().setSectionResizeMode( + 0, QHeaderView.Stretch + ) # input table buttons - self.input_select_file_button = self.findChild(QPushButton, 'inputSelectFileButton') + self.input_select_file_button = self.findChild( + QPushButton, "inputSelectFileButton" + ) self.input_select_file_button.clicked.connect(self.select_input_file) - self.input_select_folder_button = self.findChild(QPushButton, 'inputSelectFolderButton') + self.input_select_folder_button = self.findChild( + QPushButton, "inputSelectFolderButton" + ) self.input_select_folder_button.clicked.connect(self.select_input_folder) - self.input_delete_selected_button = self.findChild(QPushButton, 'inputDeleteSelectedButton') - self.input_delete_selected_button.clicked.connect(self.input_table_delete_selected) - self.input_clear_all_button = self.findChild(QPushButton, 'inputClearAllButton') + self.input_delete_selected_button = self.findChild( + QPushButton, "inputDeleteSelectedButton" + ) + self.input_delete_selected_button.clicked.connect( + self.input_table_delete_selected + ) + self.input_clear_all_button = self.findChild(QPushButton, "inputClearAllButton") self.input_clear_all_button.clicked.connect(self.input_table_clear_all) # other paths selection # select output file/folder - self.output_line_edit = self.findChild(QLineEdit, 'outputLineEdit') + self.output_line_edit = self.findChild(QLineEdit, "outputLineEdit") self.enable_line_edit_file_drop(self.output_line_edit) - self.output_line_edit.setText(str((CWD / 'output').absolute())) - self.output_select_file_button = self.findChild(QPushButton, 'outputSelectFileButton') + self.output_line_edit.setText(str((CWD / "output").absolute())) + self.output_select_file_button = self.findChild( + QPushButton, "outputSelectFileButton" + ) self.output_select_file_button.clicked.connect(self.select_output_file) - self.output_select_folder_button = self.findChild(QPushButton, 'outputSelectFolderButton') + self.output_select_folder_button = self.findChild( + QPushButton, "outputSelectFolderButton" + ) self.output_select_folder_button.clicked.connect(self.select_output_folder) # config file - self.config_line_edit = self.findChild(QLineEdit, 'configLineEdit') + self.config_line_edit = self.findChild(QLineEdit, "configLineEdit") self.enable_line_edit_file_drop(self.config_line_edit) - if getattr(sys, 'frozen', False): - self.config_line_edit.setText(str((pathlib.Path(sys.executable).parent / 'video2x.yaml').absolute())) + if getattr(sys, "frozen", False): + self.config_line_edit.setText( + str((pathlib.Path(sys.executable).parent / "video2x.yaml").absolute()) + ) elif __file__: - self.config_line_edit.setText(str((pathlib.Path(__file__).parent / 'video2x.yaml').absolute())) + self.config_line_edit.setText( + str((pathlib.Path(__file__).parent / "video2x.yaml").absolute()) + ) - self.config_select_file_button = self.findChild(QPushButton, 'configSelectButton') + self.config_select_file_button = self.findChild( + QPushButton, "configSelectButton" + ) self.config_select_file_button.clicked.connect(self.select_config_file) # cache directory - self.cache_line_edit = self.findChild(QLineEdit, 'cacheLineEdit') + self.cache_line_edit = self.findChild(QLineEdit, "cacheLineEdit") self.enable_line_edit_file_drop(self.cache_line_edit) - self.cache_select_folder_button = self.findChild(QPushButton, 'cacheSelectFolderButton') + self.cache_select_folder_button = self.findChild( + QPushButton, "cacheSelectFolderButton" + ) self.cache_select_folder_button.clicked.connect(self.select_cache_folder) # express settings - self.driver_combo_box = self.findChild(QComboBox, 'driverComboBox') + self.driver_combo_box = self.findChild(QComboBox, "driverComboBox") self.driver_combo_box.currentTextChanged.connect(self.update_gui_for_driver) - self.processes_spin_box = self.findChild(QSpinBox, 'processesSpinBox') - self.scale_ratio_double_spin_box = self.findChild(QDoubleSpinBox, 'scaleRatioDoubleSpinBox') - self.output_width_spin_box = self.findChild(QSpinBox, 'outputWidthSpinBox') - self.output_width_spin_box.valueChanged.connect(self.mutually_exclude_scale_ratio_resolution) - self.output_height_spin_box = self.findChild(QSpinBox, 'outputHeightSpinBox') - self.output_height_spin_box.valueChanged.connect(self.mutually_exclude_scale_ratio_resolution) - self.output_file_name_format_string_line_edit = self.findChild(QLineEdit, 'outputFileNameFormatStringLineEdit') - self.image_output_extension_line_edit = self.findChild(QLineEdit, 'imageOutputExtensionLineEdit') - self.video_output_extension_line_edit = self.findChild(QLineEdit, 'videoOutputExtensionLineEdit') - self.preserve_frames_check_box = self.findChild(QCheckBox, 'preserveFramesCheckBox') + self.processes_spin_box = self.findChild(QSpinBox, "processesSpinBox") + self.scale_ratio_double_spin_box = self.findChild( + QDoubleSpinBox, "scaleRatioDoubleSpinBox" + ) + self.output_width_spin_box = self.findChild(QSpinBox, "outputWidthSpinBox") + self.output_width_spin_box.valueChanged.connect( + self.mutually_exclude_scale_ratio_resolution + ) + self.output_height_spin_box = self.findChild(QSpinBox, "outputHeightSpinBox") + self.output_height_spin_box.valueChanged.connect( + self.mutually_exclude_scale_ratio_resolution + ) + self.output_file_name_format_string_line_edit = self.findChild( + QLineEdit, "outputFileNameFormatStringLineEdit" + ) + self.image_output_extension_line_edit = self.findChild( + QLineEdit, "imageOutputExtensionLineEdit" + ) + self.video_output_extension_line_edit = self.findChild( + QLineEdit, "videoOutputExtensionLineEdit" + ) + self.preserve_frames_check_box = self.findChild( + QCheckBox, "preserveFramesCheckBox" + ) # frame preview - self.frame_preview_show_preview_check_box = self.findChild(QCheckBox, 'framePreviewShowPreviewCheckBox') - self.frame_preview_keep_aspect_ratio_check_box = self.findChild(QCheckBox, 'framePreviewKeepAspectRatioCheckBox') - self.frame_preview_label = self.findChild(QLabel, 'framePreviewLabel') + self.frame_preview_show_preview_check_box = self.findChild( + QCheckBox, "framePreviewShowPreviewCheckBox" + ) + self.frame_preview_keep_aspect_ratio_check_box = self.findChild( + QCheckBox, "framePreviewKeepAspectRatioCheckBox" + ) + self.frame_preview_label = self.findChild(QLabel, "framePreviewLabel") # currently processing - self.currently_processing_label = self.findChild(QLabel, 'currentlyProcessingLabel') - self.current_progress_bar = self.findChild(QProgressBar, 'currentProgressBar') - self.time_elapsed_label = self.findChild(QLabel, 'timeElapsedLabel') - self.time_remaining_label = self.findChild(QLabel, 'timeRemainingLabel') - self.rate_label = self.findChild(QLabel, 'rateLabel') - self.frames_label = self.findChild(QLabel, 'framesLabel') + self.currently_processing_label = self.findChild( + QLabel, "currentlyProcessingLabel" + ) + self.current_progress_bar = self.findChild(QProgressBar, "currentProgressBar") + self.time_elapsed_label = self.findChild(QLabel, "timeElapsedLabel") + self.time_remaining_label = self.findChild(QLabel, "timeRemainingLabel") + self.rate_label = self.findChild(QLabel, "rateLabel") + self.frames_label = self.findChild(QLabel, "framesLabel") # overall progress - self.overall_progress_bar = self.findChild(QProgressBar, 'overallProgressBar') - self.overall_progress_label = self.findChild(QLabel, 'overallProgressLabel') - self.start_button = self.findChild(QPushButton, 'startButton') + self.overall_progress_bar = self.findChild(QProgressBar, "overallProgressBar") + self.overall_progress_label = self.findChild(QLabel, "overallProgressLabel") + self.start_button = self.findChild(QPushButton, "startButton") self.start_button.clicked.connect(self.start) - self.stop_button = self.findChild(QPushButton, 'stopButton') + self.stop_button = self.findChild(QPushButton, "stopButton") self.stop_button.clicked.connect(self.stop) # driver settings # waifu2x-caffe - self.waifu2x_caffe_path_line_edit = self.findChild(QLineEdit, 'waifu2xCaffePathLineEdit') + self.waifu2x_caffe_path_line_edit = self.findChild( + QLineEdit, "waifu2xCaffePathLineEdit" + ) self.enable_line_edit_file_drop(self.waifu2x_caffe_path_line_edit) - self.waifu2x_caffe_path_select_button = self.findChild(QPushButton, 'waifu2xCaffePathSelectButton') - self.waifu2x_caffe_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.waifu2x_caffe_path_line_edit)) - self.waifu2x_caffe_mode_combo_box = self.findChild(QComboBox, 'waifu2xCaffeModeComboBox') - self.waifu2x_caffe_noise_level_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeNoiseLevelSpinBox') - self.waifu2x_caffe_process_combo_box = self.findChild(QComboBox, 'waifu2xCaffeProcessComboBox') - self.waifu2x_caffe_model_combobox = self.findChild(QComboBox, 'waifu2xCaffeModelComboBox') - self.waifu2x_caffe_crop_size_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeCropSizeSpinBox') - self.waifu2x_caffe_output_quality_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeOutputQualitySpinBox') - self.waifu2x_caffe_output_depth_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeOutputDepthSpinBox') - self.waifu2x_caffe_batch_size_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeBatchSizeSpinBox') - self.waifu2x_caffe_gpu_spin_box = self.findChild(QSpinBox, 'waifu2xCaffeGpuSpinBox') - self.waifu2x_caffe_tta_check_box = self.findChild(QCheckBox, 'waifu2xCaffeTtaCheckBox') + self.waifu2x_caffe_path_select_button = self.findChild( + QPushButton, "waifu2xCaffePathSelectButton" + ) + self.waifu2x_caffe_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path(self.waifu2x_caffe_path_line_edit) + ) + self.waifu2x_caffe_mode_combo_box = self.findChild( + QComboBox, "waifu2xCaffeModeComboBox" + ) + self.waifu2x_caffe_noise_level_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeNoiseLevelSpinBox" + ) + self.waifu2x_caffe_process_combo_box = self.findChild( + QComboBox, "waifu2xCaffeProcessComboBox" + ) + self.waifu2x_caffe_model_combobox = self.findChild( + QComboBox, "waifu2xCaffeModelComboBox" + ) + self.waifu2x_caffe_crop_size_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeCropSizeSpinBox" + ) + self.waifu2x_caffe_output_quality_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeOutputQualitySpinBox" + ) + self.waifu2x_caffe_output_depth_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeOutputDepthSpinBox" + ) + self.waifu2x_caffe_batch_size_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeBatchSizeSpinBox" + ) + self.waifu2x_caffe_gpu_spin_box = self.findChild( + QSpinBox, "waifu2xCaffeGpuSpinBox" + ) + self.waifu2x_caffe_tta_check_box = self.findChild( + QCheckBox, "waifu2xCaffeTtaCheckBox" + ) # waifu2x-converter-cpp - self.waifu2x_converter_cpp_path_line_edit = self.findChild(QLineEdit, 'waifu2xConverterCppPathLineEdit') + self.waifu2x_converter_cpp_path_line_edit = self.findChild( + QLineEdit, "waifu2xConverterCppPathLineEdit" + ) self.enable_line_edit_file_drop(self.waifu2x_converter_cpp_path_line_edit) - self.waifu2x_converter_cpp_path_edit_button = self.findChild(QPushButton, 'waifu2xConverterCppPathSelectButton') - self.waifu2x_converter_cpp_path_edit_button.clicked.connect(lambda: self.select_driver_binary_path(self.waifu2x_converter_cpp_path_line_edit)) - self.waifu2x_converter_cpp_png_compression_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppPngCompressionSpinBox') - self.waifu2x_converter_cpp_image_quality_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppImageQualitySpinBox') - self.waifu2x_converter_cpp_block_size_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppBlockSizeSpinBox') - self.waifu2x_converter_cpp_processor_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppProcessorSpinBox') - self.waifu2x_converter_cpp_model_combo_box = self.findChild(QComboBox, 'waifu2xConverterCppModelComboBox') - self.waifu2x_converter_cpp_noise_level_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppNoiseLevelSpinBox') - self.waifu2x_converter_cpp_mode_combo_box = self.findChild(QComboBox, 'waifu2xConverterCppModeComboBox') - self.waifu2x_converter_cpp_log_level_spin_box = self.findChild(QSpinBox, 'waifu2xConverterCppLogLevelSpinBox') - self.waifu2x_converter_cpp_disable_gpu_check_box = self.findChild(QCheckBox, 'waifu2xConverterCppDisableGpuCheckBox') - self.waifu2x_converter_cpp_force_opencl_check_box = self.findChild(QCheckBox, 'waifu2xConverterCppForceOpenclCheckBox') - self.waifu2x_converter_cpp_tta_check_box = self.findChild(QCheckBox, 'waifu2xConverterCppTtaCheckBox') + self.waifu2x_converter_cpp_path_edit_button = self.findChild( + QPushButton, "waifu2xConverterCppPathSelectButton" + ) + self.waifu2x_converter_cpp_path_edit_button.clicked.connect( + lambda: self.select_driver_binary_path( + self.waifu2x_converter_cpp_path_line_edit + ) + ) + self.waifu2x_converter_cpp_png_compression_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppPngCompressionSpinBox" + ) + self.waifu2x_converter_cpp_image_quality_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppImageQualitySpinBox" + ) + self.waifu2x_converter_cpp_block_size_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppBlockSizeSpinBox" + ) + self.waifu2x_converter_cpp_processor_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppProcessorSpinBox" + ) + self.waifu2x_converter_cpp_model_combo_box = self.findChild( + QComboBox, "waifu2xConverterCppModelComboBox" + ) + self.waifu2x_converter_cpp_noise_level_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppNoiseLevelSpinBox" + ) + self.waifu2x_converter_cpp_mode_combo_box = self.findChild( + QComboBox, "waifu2xConverterCppModeComboBox" + ) + self.waifu2x_converter_cpp_log_level_spin_box = self.findChild( + QSpinBox, "waifu2xConverterCppLogLevelSpinBox" + ) + self.waifu2x_converter_cpp_disable_gpu_check_box = self.findChild( + QCheckBox, "waifu2xConverterCppDisableGpuCheckBox" + ) + self.waifu2x_converter_cpp_force_opencl_check_box = self.findChild( + QCheckBox, "waifu2xConverterCppForceOpenclCheckBox" + ) + self.waifu2x_converter_cpp_tta_check_box = self.findChild( + QCheckBox, "waifu2xConverterCppTtaCheckBox" + ) # waifu2x-ncnn-vulkan - self.waifu2x_ncnn_vulkan_path_line_edit = self.findChild(QLineEdit, 'waifu2xNcnnVulkanPathLineEdit') + self.waifu2x_ncnn_vulkan_path_line_edit = self.findChild( + QLineEdit, "waifu2xNcnnVulkanPathLineEdit" + ) self.enable_line_edit_file_drop(self.waifu2x_ncnn_vulkan_path_line_edit) - self.waifu2x_ncnn_vulkan_path_select_button = self.findChild(QPushButton, 'waifu2xNcnnVulkanPathSelectButton') - self.waifu2x_ncnn_vulkan_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.waifu2x_ncnn_vulkan_path_line_edit)) - self.waifu2x_ncnn_vulkan_noise_level_spin_box = self.findChild(QSpinBox, 'waifu2xNcnnVulkanNoiseLevelSpinBox') - self.waifu2x_ncnn_vulkan_tile_size_spin_box = self.findChild(QSpinBox, 'waifu2xNcnnVulkanTileSizeSpinBox') - self.waifu2x_ncnn_vulkan_model_combo_box = self.findChild(QComboBox, 'waifu2xNcnnVulkanModelComboBox') - self.waifu2x_ncnn_vulkan_gpu_id_spin_box = self.findChild(QSpinBox, 'waifu2xNcnnVulkanGpuIdSpinBox') - self.waifu2x_ncnn_vulkan_jobs_line_edit = self.findChild(QLineEdit, 'waifu2xNcnnVulkanJobsLineEdit') - self.waifu2x_ncnn_vulkan_tta_check_box = self.findChild(QCheckBox, 'waifu2xNcnnVulkanTtaCheckBox') + self.waifu2x_ncnn_vulkan_path_select_button = self.findChild( + QPushButton, "waifu2xNcnnVulkanPathSelectButton" + ) + self.waifu2x_ncnn_vulkan_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path( + self.waifu2x_ncnn_vulkan_path_line_edit + ) + ) + self.waifu2x_ncnn_vulkan_noise_level_spin_box = self.findChild( + QSpinBox, "waifu2xNcnnVulkanNoiseLevelSpinBox" + ) + self.waifu2x_ncnn_vulkan_tile_size_spin_box = self.findChild( + QSpinBox, "waifu2xNcnnVulkanTileSizeSpinBox" + ) + self.waifu2x_ncnn_vulkan_model_combo_box = self.findChild( + QComboBox, "waifu2xNcnnVulkanModelComboBox" + ) + self.waifu2x_ncnn_vulkan_gpu_id_spin_box = self.findChild( + QSpinBox, "waifu2xNcnnVulkanGpuIdSpinBox" + ) + self.waifu2x_ncnn_vulkan_jobs_line_edit = self.findChild( + QLineEdit, "waifu2xNcnnVulkanJobsLineEdit" + ) + self.waifu2x_ncnn_vulkan_tta_check_box = self.findChild( + QCheckBox, "waifu2xNcnnVulkanTtaCheckBox" + ) # srmd-ncnn-vulkan - self.srmd_ncnn_vulkan_path_line_edit = self.findChild(QLineEdit, 'srmdNcnnVulkanPathLineEdit') + self.srmd_ncnn_vulkan_path_line_edit = self.findChild( + QLineEdit, "srmdNcnnVulkanPathLineEdit" + ) self.enable_line_edit_file_drop(self.srmd_ncnn_vulkan_path_line_edit) - self.srmd_ncnn_vulkan_path_select_button = self.findChild(QPushButton, 'srmdNcnnVulkanPathSelectButton') - self.srmd_ncnn_vulkan_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.srmd_ncnn_vulkan_path_line_edit)) - self.srmd_ncnn_vulkan_noise_level_spin_box = self.findChild(QSpinBox, 'srmdNcnnVulkanNoiseLevelSpinBox') - self.srmd_ncnn_vulkan_tile_size_spin_box = self.findChild(QSpinBox, 'srmdNcnnVulkanTileSizeSpinBox') - self.srmd_ncnn_vulkan_model_combo_box = self.findChild(QComboBox, 'srmdNcnnVulkanModelComboBox') - self.srmd_ncnn_vulkan_gpu_id_spin_box = self.findChild(QSpinBox, 'srmdNcnnVulkanGpuIdSpinBox') - self.srmd_ncnn_vulkan_jobs_line_edit = self.findChild(QLineEdit, 'srmdNcnnVulkanJobsLineEdit') - self.srmd_ncnn_vulkan_tta_check_box = self.findChild(QCheckBox, 'srmdNcnnVulkanTtaCheckBox') + self.srmd_ncnn_vulkan_path_select_button = self.findChild( + QPushButton, "srmdNcnnVulkanPathSelectButton" + ) + self.srmd_ncnn_vulkan_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path(self.srmd_ncnn_vulkan_path_line_edit) + ) + self.srmd_ncnn_vulkan_noise_level_spin_box = self.findChild( + QSpinBox, "srmdNcnnVulkanNoiseLevelSpinBox" + ) + self.srmd_ncnn_vulkan_tile_size_spin_box = self.findChild( + QSpinBox, "srmdNcnnVulkanTileSizeSpinBox" + ) + self.srmd_ncnn_vulkan_model_combo_box = self.findChild( + QComboBox, "srmdNcnnVulkanModelComboBox" + ) + self.srmd_ncnn_vulkan_gpu_id_spin_box = self.findChild( + QSpinBox, "srmdNcnnVulkanGpuIdSpinBox" + ) + self.srmd_ncnn_vulkan_jobs_line_edit = self.findChild( + QLineEdit, "srmdNcnnVulkanJobsLineEdit" + ) + self.srmd_ncnn_vulkan_tta_check_box = self.findChild( + QCheckBox, "srmdNcnnVulkanTtaCheckBox" + ) # realsr-ncnn-vulkan - self.realsr_ncnn_vulkan_path_line_edit = self.findChild(QLineEdit, 'realsrNcnnVulkanPathLineEdit') + self.realsr_ncnn_vulkan_path_line_edit = self.findChild( + QLineEdit, "realsrNcnnVulkanPathLineEdit" + ) self.enable_line_edit_file_drop(self.realsr_ncnn_vulkan_path_line_edit) - self.realsr_ncnn_vulkan_path_select_button = self.findChild(QPushButton, 'realsrNcnnVulkanPathSelectButton') - self.realsr_ncnn_vulkan_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.realsr_ncnn_vulkan_path_line_edit)) - self.realsr_ncnn_vulkan_tile_size_spin_box = self.findChild(QSpinBox, 'realsrNcnnVulkanTileSizeSpinBox') - self.realsr_ncnn_vulkan_model_combo_box = self.findChild(QComboBox, 'realsrNcnnVulkanModelComboBox') - self.realsr_ncnn_vulkan_gpu_id_spin_box = self.findChild(QSpinBox, 'realsrNcnnVulkanGpuIdSpinBox') - self.realsr_ncnn_vulkan_jobs_line_edit = self.findChild(QLineEdit, 'realsrNcnnVulkanJobsLineEdit') - self.realsr_ncnn_vulkan_tta_check_box = self.findChild(QCheckBox, 'realsrNcnnVulkanTtaCheckBox') + self.realsr_ncnn_vulkan_path_select_button = self.findChild( + QPushButton, "realsrNcnnVulkanPathSelectButton" + ) + self.realsr_ncnn_vulkan_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path( + self.realsr_ncnn_vulkan_path_line_edit + ) + ) + self.realsr_ncnn_vulkan_tile_size_spin_box = self.findChild( + QSpinBox, "realsrNcnnVulkanTileSizeSpinBox" + ) + self.realsr_ncnn_vulkan_model_combo_box = self.findChild( + QComboBox, "realsrNcnnVulkanModelComboBox" + ) + self.realsr_ncnn_vulkan_gpu_id_spin_box = self.findChild( + QSpinBox, "realsrNcnnVulkanGpuIdSpinBox" + ) + self.realsr_ncnn_vulkan_jobs_line_edit = self.findChild( + QLineEdit, "realsrNcnnVulkanJobsLineEdit" + ) + self.realsr_ncnn_vulkan_tta_check_box = self.findChild( + QCheckBox, "realsrNcnnVulkanTtaCheckBox" + ) # anime4k - self.anime4kcpp_path_line_edit = self.findChild(QLineEdit, 'anime4kCppPathLineEdit') + self.anime4kcpp_path_line_edit = self.findChild( + QLineEdit, "anime4kCppPathLineEdit" + ) self.enable_line_edit_file_drop(self.anime4kcpp_path_line_edit) - self.anime4kcpp_path_select_button = self.findChild(QPushButton, 'anime4kCppPathSelectButton') - self.anime4kcpp_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.anime4kcpp_path_line_edit)) - self.anime4kcpp_passes_spin_box = self.findChild(QSpinBox, 'anime4kCppPassesSpinBox') - self.anime4kcpp_push_color_count_spin_box = self.findChild(QSpinBox, 'anime4kCppPushColorCountSpinBox') - self.anime4kcpp_strength_color_spin_box = self.findChild(QDoubleSpinBox, 'anime4kCppStrengthColorSpinBox') - self.anime4kcpp_strength_gradient_spin_box = self.findChild(QDoubleSpinBox, 'anime4kCppStrengthGradientSpinBox') - self.anime4kcpp_threads_spin_box = self.findChild(QSpinBox, 'anime4kCppThreadsSpinBox') - self.anime4kcpp_pre_filters_spin_box = self.findChild(QSpinBox, 'anime4kCppPreFiltersSpinBox') - self.anime4kcpp_post_filters_spin_box = self.findChild(QSpinBox, 'anime4kCppPostFiltersSpinBox') - self.anime4kcpp_platform_id_spin_box = self.findChild(QSpinBox, 'anime4kCppPlatformIdSpinBox') - self.anime4kcpp_device_id_spin_box = self.findChild(QSpinBox, 'anime4kCppDeviceIdSpinBox') - self.anime4kcpp_codec_combo_box = self.findChild(QComboBox, 'anime4kCppCodecComboBox') - self.anime4kcpp_fast_mode_check_box = self.findChild(QCheckBox, 'anime4kCppFastModeCheckBox') - self.anime4kcpp_pre_processing_check_box = self.findChild(QCheckBox, 'anime4kCppPreProcessingCheckBox') - self.anime4kcpp_post_processing_check_box = self.findChild(QCheckBox, 'anime4kCppPostProcessingCheckBox') - self.anime4kcpp_gpu_mode_check_box = self.findChild(QCheckBox, 'anime4kCppGpuModeCheckBox') - self.anime4kcpp_cnn_mode_check_box = self.findChild(QCheckBox, 'anime4kCppCnnModeCheckBox') - self.anime4kcpp_hdn_check_box = self.findChild(QCheckBox, 'anime4kCppHdnCheckBox') - self.anime4kcpp_hdn_level_spin_box = self.findChild(QSpinBox, 'anime4kCppHdnLevelSpinBox') - self.anime4kcpp_force_fps_double_spin_box = self.findChild(QDoubleSpinBox, 'anime4kCppForceFpsDoubleSpinBox') - self.anime4kcpp_disable_progress_check_box = self.findChild(QCheckBox, 'anime4kCppDisableProgressCheckBox') - self.anime4kcpp_alpha_check_box = self.findChild(QCheckBox, 'anime4kCppAlphaCheckBox') + self.anime4kcpp_path_select_button = self.findChild( + QPushButton, "anime4kCppPathSelectButton" + ) + self.anime4kcpp_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path(self.anime4kcpp_path_line_edit) + ) + self.anime4kcpp_passes_spin_box = self.findChild( + QSpinBox, "anime4kCppPassesSpinBox" + ) + self.anime4kcpp_push_color_count_spin_box = self.findChild( + QSpinBox, "anime4kCppPushColorCountSpinBox" + ) + self.anime4kcpp_strength_color_spin_box = self.findChild( + QDoubleSpinBox, "anime4kCppStrengthColorSpinBox" + ) + self.anime4kcpp_strength_gradient_spin_box = self.findChild( + QDoubleSpinBox, "anime4kCppStrengthGradientSpinBox" + ) + self.anime4kcpp_threads_spin_box = self.findChild( + QSpinBox, "anime4kCppThreadsSpinBox" + ) + self.anime4kcpp_pre_filters_spin_box = self.findChild( + QSpinBox, "anime4kCppPreFiltersSpinBox" + ) + self.anime4kcpp_post_filters_spin_box = self.findChild( + QSpinBox, "anime4kCppPostFiltersSpinBox" + ) + self.anime4kcpp_platform_id_spin_box = self.findChild( + QSpinBox, "anime4kCppPlatformIdSpinBox" + ) + self.anime4kcpp_device_id_spin_box = self.findChild( + QSpinBox, "anime4kCppDeviceIdSpinBox" + ) + self.anime4kcpp_codec_combo_box = self.findChild( + QComboBox, "anime4kCppCodecComboBox" + ) + self.anime4kcpp_fast_mode_check_box = self.findChild( + QCheckBox, "anime4kCppFastModeCheckBox" + ) + self.anime4kcpp_pre_processing_check_box = self.findChild( + QCheckBox, "anime4kCppPreProcessingCheckBox" + ) + self.anime4kcpp_post_processing_check_box = self.findChild( + QCheckBox, "anime4kCppPostProcessingCheckBox" + ) + self.anime4kcpp_gpu_mode_check_box = self.findChild( + QCheckBox, "anime4kCppGpuModeCheckBox" + ) + self.anime4kcpp_cnn_mode_check_box = self.findChild( + QCheckBox, "anime4kCppCnnModeCheckBox" + ) + self.anime4kcpp_hdn_check_box = self.findChild( + QCheckBox, "anime4kCppHdnCheckBox" + ) + self.anime4kcpp_hdn_level_spin_box = self.findChild( + QSpinBox, "anime4kCppHdnLevelSpinBox" + ) + self.anime4kcpp_force_fps_double_spin_box = self.findChild( + QDoubleSpinBox, "anime4kCppForceFpsDoubleSpinBox" + ) + self.anime4kcpp_disable_progress_check_box = self.findChild( + QCheckBox, "anime4kCppDisableProgressCheckBox" + ) + self.anime4kcpp_alpha_check_box = self.findChild( + QCheckBox, "anime4kCppAlphaCheckBox" + ) # FFmpeg settings # global options - self.ffmpeg_path_line_edit = self.findChild(QLineEdit, 'ffmpegPathLineEdit') + self.ffmpeg_path_line_edit = self.findChild(QLineEdit, "ffmpegPathLineEdit") self.enable_line_edit_file_drop(self.ffmpeg_path_line_edit) - self.ffmpeg_path_select_button = self.findChild(QPushButton, 'ffmpegPathSelectButton') - self.ffmpeg_path_select_button.clicked.connect(lambda: self.select_driver_binary_path(self.ffmpeg_path_line_edit)) - self.ffmpeg_intermediate_file_name_line_edit = self.findChild(QLineEdit, 'ffmpegIntermediateFileNameLineEdit') + self.ffmpeg_path_select_button = self.findChild( + QPushButton, "ffmpegPathSelectButton" + ) + self.ffmpeg_path_select_button.clicked.connect( + lambda: self.select_driver_binary_path(self.ffmpeg_path_line_edit) + ) + self.ffmpeg_intermediate_file_name_line_edit = self.findChild( + QLineEdit, "ffmpegIntermediateFileNameLineEdit" + ) # extract frames - self.ffmpeg_extract_frames_output_options_pixel_format_line_edit = self.findChild(QLineEdit, 'ffmpegExtractFramesOutputOptionsPixelFormatLineEdit') - self.ffmpeg_extract_frames_hardware_acceleration_check_box = self.findChild(QCheckBox, 'ffmpegExtractFramesHardwareAccelerationCheckBox') + self.ffmpeg_extract_frames_output_options_pixel_format_line_edit = ( + self.findChild( + QLineEdit, "ffmpegExtractFramesOutputOptionsPixelFormatLineEdit" + ) + ) + self.ffmpeg_extract_frames_hardware_acceleration_check_box = self.findChild( + QCheckBox, "ffmpegExtractFramesHardwareAccelerationCheckBox" + ) # assemble video - self.ffmpeg_assemble_video_input_options_force_format_line_edit = self.findChild(QLineEdit, 'ffmpegAssembleVideoInputOptionsForceFormatLineEdit') - self.ffmpeg_assemble_video_output_options_video_codec_line_edit = self.findChild(QLineEdit, 'ffmpegAssembleVideoOutputOptionsVideoCodecLineEdit') - self.ffmpeg_assemble_video_output_options_pixel_format_line_edit = self.findChild(QLineEdit, 'ffmpegAssembleVideoOutputOptionsPixelFormatLineEdit') - self.ffmpeg_assemble_video_output_options_crf_spin_box = self.findChild(QSpinBox, 'ffmpegAssembleVideoOutputOptionsCrfSpinBox') - self.ffmpeg_assemble_video_output_options_tune_combo_box = self.findChild(QComboBox, 'ffmpegAssembleVideoOutputOptionsTuneComboBox') - self.ffmpeg_assemble_video_output_options_bitrate_line_edit = self.findChild(QLineEdit, 'ffmpegAssembleVideoOutputOptionsBitrateLineEdit') - self.ffmpeg_assemble_video_output_options_ensure_divisible_check_box = self.findChild(QCheckBox, 'ffmpegAssembleVideoOutputOptionsEnsureDivisibleCheckBox') - self.ffmpeg_assemble_video_hardware_acceleration_check_box = self.findChild(QCheckBox, 'ffmpegAssembleVideoHardwareAccelerationCheckBox') + self.ffmpeg_assemble_video_input_options_force_format_line_edit = ( + self.findChild( + QLineEdit, "ffmpegAssembleVideoInputOptionsForceFormatLineEdit" + ) + ) + self.ffmpeg_assemble_video_output_options_video_codec_line_edit = ( + self.findChild( + QLineEdit, "ffmpegAssembleVideoOutputOptionsVideoCodecLineEdit" + ) + ) + self.ffmpeg_assemble_video_output_options_pixel_format_line_edit = ( + self.findChild( + QLineEdit, "ffmpegAssembleVideoOutputOptionsPixelFormatLineEdit" + ) + ) + self.ffmpeg_assemble_video_output_options_crf_spin_box = self.findChild( + QSpinBox, "ffmpegAssembleVideoOutputOptionsCrfSpinBox" + ) + self.ffmpeg_assemble_video_output_options_tune_combo_box = self.findChild( + QComboBox, "ffmpegAssembleVideoOutputOptionsTuneComboBox" + ) + self.ffmpeg_assemble_video_output_options_bitrate_line_edit = self.findChild( + QLineEdit, "ffmpegAssembleVideoOutputOptionsBitrateLineEdit" + ) + self.ffmpeg_assemble_video_output_options_ensure_divisible_check_box = ( + self.findChild( + QCheckBox, "ffmpegAssembleVideoOutputOptionsEnsureDivisibleCheckBox" + ) + ) + self.ffmpeg_assemble_video_hardware_acceleration_check_box = self.findChild( + QCheckBox, "ffmpegAssembleVideoHardwareAccelerationCheckBox" + ) # migrate_streams - self.ffmpeg_migrate_streams_output_options_mapping_video_check_box_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsMappingVideoCheckBox') - self.ffmpeg_migrate_streams_output_options_mapping_audio_check_box_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsMappingAudioCheckBox') - self.ffmpeg_migrate_streams_output_options_mapping_subtitle_check_box_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsMappingSubtitleCheckBox') - self.ffmpeg_migrate_streams_output_options_mapping_data_check_box_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsMappingDataCheckBox') - self.ffmpeg_migrate_streams_output_options_mapping_font_check_box_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsMappingFontCheckBox') - self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit = self.findChild(QLineEdit, 'ffmpegMigrateStreamsOutputOptionsPixelFormatLineEdit') - self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box = self.findChild(QSpinBox, 'ffmpegMigrateStreamsOutputOptionsFrameInterpolationSpinBox') - self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.valueChanged.connect(self.mutually_exclude_frame_interpolation_stream_copy) - self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.textChanged.connect(self.mutually_exclude_frame_interpolation_stream_copy) - self.ffmpeg_migrate_streams_output_options_copy_streams_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsCopyStreamsCheckBox') - self.ffmpeg_migrate_streams_output_options_copy_known_metadata_tags_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsCopyKnownMetadataTagsCheckBox') - self.ffmpeg_migrate_streams_output_options_copy_arbitrary_metadata_tags_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsOutputOptionsCopyArbitraryMetadataTagsCheckBox') - self.ffmpeg_migrate_streams_hardware_acceleration_check_box = self.findChild(QCheckBox, 'ffmpegMigrateStreamsHardwareAccelerationCheckBox') + self.ffmpeg_migrate_streams_output_options_mapping_video_check_box_check_box = ( + self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsMappingVideoCheckBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_mapping_audio_check_box_check_box = ( + self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsMappingAudioCheckBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_mapping_subtitle_check_box_check_box = self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsMappingSubtitleCheckBox" + ) + self.ffmpeg_migrate_streams_output_options_mapping_data_check_box_check_box = ( + self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsMappingDataCheckBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_mapping_font_check_box_check_box = ( + self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsMappingFontCheckBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit = ( + self.findChild( + QLineEdit, "ffmpegMigrateStreamsOutputOptionsPixelFormatLineEdit" + ) + ) + self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box = ( + self.findChild( + QSpinBox, "ffmpegMigrateStreamsOutputOptionsFrameInterpolationSpinBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.valueChanged.connect( + self.mutually_exclude_frame_interpolation_stream_copy + ) + self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.textChanged.connect( + self.mutually_exclude_frame_interpolation_stream_copy + ) + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box = ( + self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsCopyStreamsCheckBox" + ) + ) + self.ffmpeg_migrate_streams_output_options_copy_known_metadata_tags_check_box = self.findChild( + QCheckBox, "ffmpegMigrateStreamsOutputOptionsCopyKnownMetadataTagsCheckBox" + ) + self.ffmpeg_migrate_streams_output_options_copy_arbitrary_metadata_tags_check_box = self.findChild( + QCheckBox, + "ffmpegMigrateStreamsOutputOptionsCopyArbitraryMetadataTagsCheckBox", + ) + self.ffmpeg_migrate_streams_hardware_acceleration_check_box = self.findChild( + QCheckBox, "ffmpegMigrateStreamsHardwareAccelerationCheckBox" + ) # Gifski settings - self.gifski_path_line_edit = self.findChild(QLineEdit, 'gifskiPathLineEdit') + self.gifski_path_line_edit = self.findChild(QLineEdit, "gifskiPathLineEdit") self.enable_line_edit_file_drop(self.gifski_path_line_edit) - self.gifski_quality_spin_box = self.findChild(QSpinBox, 'gifskiQualitySpinBox') - self.gifski_fast_check_box = self.findChild(QCheckBox, 'gifskiFastCheckBox') - self.gifski_once_check_box = self.findChild(QCheckBox, 'gifskiOnceCheckBox') - self.gifski_quiet_check_box = self.findChild(QCheckBox, 'gifskiQuietCheckBox') + self.gifski_quality_spin_box = self.findChild(QSpinBox, "gifskiQualitySpinBox") + self.gifski_fast_check_box = self.findChild(QCheckBox, "gifskiFastCheckBox") + self.gifski_once_check_box = self.findChild(QCheckBox, "gifskiOnceCheckBox") + self.gifski_quiet_check_box = self.findChild(QCheckBox, "gifskiQuietCheckBox") # Tools - self.ffprobe_plain_text_edit = self.findChild(QPlainTextEdit, 'ffprobePlainTextEdit') + self.ffprobe_plain_text_edit = self.findChild( + QPlainTextEdit, "ffprobePlainTextEdit" + ) self.ffprobe_plain_text_edit.dropEvent = self.show_ffprobe_output # load configurations after GUI initialization @@ -461,321 +746,612 @@ class Video2XMainWindow(QMainWindow): def load_configurations(self): # get config file path from line edit - config_file_path = pathlib.Path(os.path.expandvars(self.config_line_edit.text())) + config_file_path = pathlib.Path( + os.path.expandvars(self.config_line_edit.text()) + ) # if file doesn't exist, return if not config_file_path.is_file(): - QErrorMessage(self).showMessage('Video2X configuration file not found, please specify manually.') + QErrorMessage(self).showMessage( + "Video2X configuration file not found, please specify manually." + ) return # read configuration dict from config file self.config = self.read_config(config_file_path) # load FFmpeg settings - self.ffmpeg_settings = self.config['ffmpeg'] - self.ffmpeg_settings['ffmpeg_path'] = str(pathlib.Path(os.path.expandvars(self.ffmpeg_settings['ffmpeg_path'])).absolute()) + self.ffmpeg_settings = self.config["ffmpeg"] + self.ffmpeg_settings["ffmpeg_path"] = str( + pathlib.Path( + os.path.expandvars(self.ffmpeg_settings["ffmpeg_path"]) + ).absolute() + ) # read Gifski configuration - self.gifski_settings = self.config['gifski'] - self.gifski_settings['gifski_path'] = str(pathlib.Path(os.path.expandvars(self.gifski_settings['gifski_path'])).absolute()) + self.gifski_settings = self.config["gifski"] + self.gifski_settings["gifski_path"] = str( + pathlib.Path( + os.path.expandvars(self.gifski_settings["gifski_path"]) + ).absolute() + ) # set cache directory path - if self.config['video2x']['video2x_cache_directory'] is None: - self.config['video2x']['video2x_cache_directory'] = str((pathlib.Path(tempfile.gettempdir()) / 'video2x').absolute()) - self.cache_line_edit.setText(self.config['video2x']['video2x_cache_directory']) + if self.config["video2x"]["video2x_cache_directory"] is None: + self.config["video2x"]["video2x_cache_directory"] = str( + (pathlib.Path(tempfile.gettempdir()) / "video2x").absolute() + ) + self.cache_line_edit.setText(self.config["video2x"]["video2x_cache_directory"]) - self.output_file_name_format_string_line_edit.setText(self.config['video2x']['output_file_name_format_string']) - self.image_output_extension_line_edit.setText(self.config['video2x']['image_output_extension']) - self.video_output_extension_line_edit.setText(self.config['video2x']['video_output_extension']) + self.output_file_name_format_string_line_edit.setText( + self.config["video2x"]["output_file_name_format_string"] + ) + self.image_output_extension_line_edit.setText( + self.config["video2x"]["image_output_extension"] + ) + self.video_output_extension_line_edit.setText( + self.config["video2x"]["video_output_extension"] + ) # load preserve frames settings - self.preserve_frames_check_box.setChecked(self.config['video2x']['preserve_frames']) + self.preserve_frames_check_box.setChecked( + self.config["video2x"]["preserve_frames"] + ) self.start_button.setEnabled(True) # waifu2x-caffe - settings = self.config['waifu2x_caffe'] - self.waifu2x_caffe_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.waifu2x_caffe_mode_combo_box.setCurrentText(settings['mode']) - self.waifu2x_caffe_noise_level_spin_box.setValue(settings['noise_level']) - self.waifu2x_caffe_process_combo_box.setCurrentText(settings['process']) - self.waifu2x_caffe_crop_size_spin_box.setValue(settings['crop_size']) - self.waifu2x_caffe_output_quality_spin_box.setValue(settings['output_quality']) - self.waifu2x_caffe_output_depth_spin_box.setValue(settings['output_depth']) - self.waifu2x_caffe_batch_size_spin_box.setValue(settings['batch_size']) - self.waifu2x_caffe_gpu_spin_box.setValue(settings['gpu']) - self.waifu2x_caffe_tta_check_box.setChecked(bool(settings['tta'])) + settings = self.config["waifu2x_caffe"] + self.waifu2x_caffe_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.waifu2x_caffe_mode_combo_box.setCurrentText(settings["mode"]) + self.waifu2x_caffe_noise_level_spin_box.setValue(settings["noise_level"]) + self.waifu2x_caffe_process_combo_box.setCurrentText(settings["process"]) + self.waifu2x_caffe_crop_size_spin_box.setValue(settings["crop_size"]) + self.waifu2x_caffe_output_quality_spin_box.setValue(settings["output_quality"]) + self.waifu2x_caffe_output_depth_spin_box.setValue(settings["output_depth"]) + self.waifu2x_caffe_batch_size_spin_box.setValue(settings["batch_size"]) + self.waifu2x_caffe_gpu_spin_box.setValue(settings["gpu"]) + self.waifu2x_caffe_tta_check_box.setChecked(bool(settings["tta"])) # waifu2x-converter-cpp - settings = self.config['waifu2x_converter_cpp'] - self.waifu2x_converter_cpp_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.waifu2x_converter_cpp_png_compression_spin_box.setValue(settings['png-compression']) - self.waifu2x_converter_cpp_image_quality_spin_box.setValue(settings['image-quality']) - self.waifu2x_converter_cpp_block_size_spin_box.setValue(settings['block-size']) - self.waifu2x_converter_cpp_processor_spin_box.setValue(settings['processor']) - self.waifu2x_converter_cpp_noise_level_spin_box.setValue(settings['noise-level']) - self.waifu2x_converter_cpp_mode_combo_box.setCurrentText(settings['mode']) - self.waifu2x_converter_cpp_log_level_spin_box.setValue(settings['log-level']) - self.waifu2x_converter_cpp_disable_gpu_check_box.setChecked(settings['disable-gpu']) - self.waifu2x_converter_cpp_force_opencl_check_box.setChecked(settings['force-OpenCL']) - self.waifu2x_converter_cpp_tta_check_box.setChecked(bool(settings['tta'])) + settings = self.config["waifu2x_converter_cpp"] + self.waifu2x_converter_cpp_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.waifu2x_converter_cpp_png_compression_spin_box.setValue( + settings["png-compression"] + ) + self.waifu2x_converter_cpp_image_quality_spin_box.setValue( + settings["image-quality"] + ) + self.waifu2x_converter_cpp_block_size_spin_box.setValue(settings["block-size"]) + self.waifu2x_converter_cpp_processor_spin_box.setValue(settings["processor"]) + self.waifu2x_converter_cpp_noise_level_spin_box.setValue( + settings["noise-level"] + ) + self.waifu2x_converter_cpp_mode_combo_box.setCurrentText(settings["mode"]) + self.waifu2x_converter_cpp_log_level_spin_box.setValue(settings["log-level"]) + self.waifu2x_converter_cpp_disable_gpu_check_box.setChecked( + settings["disable-gpu"] + ) + self.waifu2x_converter_cpp_force_opencl_check_box.setChecked( + settings["force-OpenCL"] + ) + self.waifu2x_converter_cpp_tta_check_box.setChecked(bool(settings["tta"])) # waifu2x-ncnn-vulkan - settings = self.config['waifu2x_ncnn_vulkan'] - self.waifu2x_ncnn_vulkan_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.waifu2x_ncnn_vulkan_noise_level_spin_box.setValue(settings['n']) - self.waifu2x_ncnn_vulkan_tile_size_spin_box.setValue(settings['t']) - self.waifu2x_ncnn_vulkan_gpu_id_spin_box.setValue(settings['g']) - self.waifu2x_ncnn_vulkan_jobs_line_edit.setText(settings['j']) - self.waifu2x_ncnn_vulkan_tta_check_box.setChecked(settings['x']) + settings = self.config["waifu2x_ncnn_vulkan"] + self.waifu2x_ncnn_vulkan_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.waifu2x_ncnn_vulkan_noise_level_spin_box.setValue(settings["n"]) + self.waifu2x_ncnn_vulkan_tile_size_spin_box.setValue(settings["t"]) + self.waifu2x_ncnn_vulkan_gpu_id_spin_box.setValue(settings["g"]) + self.waifu2x_ncnn_vulkan_jobs_line_edit.setText(settings["j"]) + self.waifu2x_ncnn_vulkan_tta_check_box.setChecked(settings["x"]) # srmd-ncnn-vulkan - settings = self.config['srmd_ncnn_vulkan'] - self.srmd_ncnn_vulkan_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.srmd_ncnn_vulkan_noise_level_spin_box.setValue(settings['n']) - self.srmd_ncnn_vulkan_tile_size_spin_box.setValue(settings['t']) - self.srmd_ncnn_vulkan_gpu_id_spin_box.setValue(settings['g']) - self.srmd_ncnn_vulkan_jobs_line_edit.setText(settings['j']) - self.srmd_ncnn_vulkan_tta_check_box.setChecked(settings['x']) + settings = self.config["srmd_ncnn_vulkan"] + self.srmd_ncnn_vulkan_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.srmd_ncnn_vulkan_noise_level_spin_box.setValue(settings["n"]) + self.srmd_ncnn_vulkan_tile_size_spin_box.setValue(settings["t"]) + self.srmd_ncnn_vulkan_gpu_id_spin_box.setValue(settings["g"]) + self.srmd_ncnn_vulkan_jobs_line_edit.setText(settings["j"]) + self.srmd_ncnn_vulkan_tta_check_box.setChecked(settings["x"]) # realsr-ncnn-vulkan - settings = self.config['realsr_ncnn_vulkan'] - self.realsr_ncnn_vulkan_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.realsr_ncnn_vulkan_tile_size_spin_box.setValue(settings['t']) - self.realsr_ncnn_vulkan_gpu_id_spin_box.setValue(settings['g']) - self.realsr_ncnn_vulkan_jobs_line_edit.setText(settings['j']) - self.realsr_ncnn_vulkan_tta_check_box.setChecked(settings['x']) + settings = self.config["realsr_ncnn_vulkan"] + self.realsr_ncnn_vulkan_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.realsr_ncnn_vulkan_tile_size_spin_box.setValue(settings["t"]) + self.realsr_ncnn_vulkan_gpu_id_spin_box.setValue(settings["g"]) + self.realsr_ncnn_vulkan_jobs_line_edit.setText(settings["j"]) + self.realsr_ncnn_vulkan_tta_check_box.setChecked(settings["x"]) # anime4k - settings = self.config['anime4kcpp'] - self.anime4kcpp_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['path'])).absolute())) - self.anime4kcpp_passes_spin_box.setValue(settings['passes']) - self.anime4kcpp_push_color_count_spin_box.setValue(settings['pushColorCount']) - self.anime4kcpp_strength_color_spin_box.setValue(settings['strengthColor']) - self.anime4kcpp_strength_gradient_spin_box.setValue(settings['strengthGradient']) - self.anime4kcpp_threads_spin_box.setValue(settings['threads']) - self.anime4kcpp_pre_filters_spin_box.setValue(settings['preFilters']) - self.anime4kcpp_post_filters_spin_box.setValue(settings['postFilters']) - self.anime4kcpp_platform_id_spin_box.setValue(settings['platformID']) - self.anime4kcpp_device_id_spin_box.setValue(settings['deviceID']) - self.anime4kcpp_codec_combo_box.setCurrentText(settings['codec']) - self.anime4kcpp_fast_mode_check_box.setChecked(settings['fastMode']) - self.anime4kcpp_pre_processing_check_box.setChecked(settings['preprocessing']) - self.anime4kcpp_post_processing_check_box.setChecked(settings['postprocessing']) - self.anime4kcpp_gpu_mode_check_box.setChecked(settings['GPUMode']) - self.anime4kcpp_cnn_mode_check_box.setChecked(settings['CNNMode']) - self.anime4kcpp_hdn_check_box.setChecked(settings['HDN']) - self.anime4kcpp_hdn_level_spin_box.setValue(settings['HDNLevel']) - self.anime4kcpp_force_fps_double_spin_box.setValue(settings['forceFps']) - self.anime4kcpp_disable_progress_check_box.setChecked(settings['disableProgress']) - self.anime4kcpp_alpha_check_box.setChecked(settings['alpha']) + settings = self.config["anime4kcpp"] + self.anime4kcpp_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["path"])).absolute()) + ) + self.anime4kcpp_passes_spin_box.setValue(settings["passes"]) + self.anime4kcpp_push_color_count_spin_box.setValue(settings["pushColorCount"]) + self.anime4kcpp_strength_color_spin_box.setValue(settings["strengthColor"]) + self.anime4kcpp_strength_gradient_spin_box.setValue( + settings["strengthGradient"] + ) + self.anime4kcpp_threads_spin_box.setValue(settings["threads"]) + self.anime4kcpp_pre_filters_spin_box.setValue(settings["preFilters"]) + self.anime4kcpp_post_filters_spin_box.setValue(settings["postFilters"]) + self.anime4kcpp_platform_id_spin_box.setValue(settings["platformID"]) + self.anime4kcpp_device_id_spin_box.setValue(settings["deviceID"]) + self.anime4kcpp_codec_combo_box.setCurrentText(settings["codec"]) + self.anime4kcpp_fast_mode_check_box.setChecked(settings["fastMode"]) + self.anime4kcpp_pre_processing_check_box.setChecked(settings["preprocessing"]) + self.anime4kcpp_post_processing_check_box.setChecked(settings["postprocessing"]) + self.anime4kcpp_gpu_mode_check_box.setChecked(settings["GPUMode"]) + self.anime4kcpp_cnn_mode_check_box.setChecked(settings["CNNMode"]) + self.anime4kcpp_hdn_check_box.setChecked(settings["HDN"]) + self.anime4kcpp_hdn_level_spin_box.setValue(settings["HDNLevel"]) + self.anime4kcpp_force_fps_double_spin_box.setValue(settings["forceFps"]) + self.anime4kcpp_disable_progress_check_box.setChecked( + settings["disableProgress"] + ) + self.anime4kcpp_alpha_check_box.setChecked(settings["alpha"]) # ffmpeg # global options - settings = self.config['ffmpeg'] - self.ffmpeg_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['ffmpeg_path'])).absolute())) - self.ffmpeg_intermediate_file_name_line_edit.setText(settings['intermediate_file_name']) + settings = self.config["ffmpeg"] + self.ffmpeg_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["ffmpeg_path"])).absolute()) + ) + self.ffmpeg_intermediate_file_name_line_edit.setText( + settings["intermediate_file_name"] + ) # extract frames - settings = self.config['ffmpeg']['extract_frames'] - self.ffmpeg_extract_frames_output_options_pixel_format_line_edit.setText(settings['output_options']['-pix_fmt']) + settings = self.config["ffmpeg"]["extract_frames"] + self.ffmpeg_extract_frames_output_options_pixel_format_line_edit.setText( + settings["output_options"]["-pix_fmt"] + ) # assemble video - settings = self.config['ffmpeg']['assemble_video'] - self.ffmpeg_assemble_video_input_options_force_format_line_edit.setText(settings['input_options']['-f']) - self.ffmpeg_assemble_video_output_options_video_codec_line_edit.setText(settings['output_options']['-vcodec']) - self.ffmpeg_assemble_video_output_options_pixel_format_line_edit.setText(settings['output_options']['-pix_fmt']) - self.ffmpeg_assemble_video_output_options_crf_spin_box.setValue(settings['output_options']['-crf']) - self.ffmpeg_assemble_video_output_options_tune_combo_box.setCurrentText(settings['output_options']['-tune']) - self.ffmpeg_assemble_video_output_options_bitrate_line_edit.setText(settings['output_options']['-b:v']) + settings = self.config["ffmpeg"]["assemble_video"] + self.ffmpeg_assemble_video_input_options_force_format_line_edit.setText( + settings["input_options"]["-f"] + ) + self.ffmpeg_assemble_video_output_options_video_codec_line_edit.setText( + settings["output_options"]["-vcodec"] + ) + self.ffmpeg_assemble_video_output_options_pixel_format_line_edit.setText( + settings["output_options"]["-pix_fmt"] + ) + self.ffmpeg_assemble_video_output_options_crf_spin_box.setValue( + settings["output_options"]["-crf"] + ) + self.ffmpeg_assemble_video_output_options_tune_combo_box.setCurrentText( + settings["output_options"]["-tune"] + ) + self.ffmpeg_assemble_video_output_options_bitrate_line_edit.setText( + settings["output_options"]["-b:v"] + ) # migrate streams - settings = self.config['ffmpeg']['migrate_streams'] - self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit.setText(settings['output_options']['-pix_fmt']) + settings = self.config["ffmpeg"]["migrate_streams"] + self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit.setText( + settings["output_options"]["-pix_fmt"] + ) # Gifski - settings = self.config['gifski'] - self.gifski_path_line_edit.setText(str(pathlib.Path(os.path.expandvars(settings['gifski_path'])).absolute())) - self.gifski_quality_spin_box.setValue(settings['quality']) - self.gifski_fast_check_box.setChecked(settings['fast']) - self.gifski_once_check_box.setChecked(settings['once']) - self.gifski_quiet_check_box.setChecked(settings['quiet']) + settings = self.config["gifski"] + self.gifski_path_line_edit.setText( + str(pathlib.Path(os.path.expandvars(settings["gifski_path"])).absolute()) + ) + self.gifski_quality_spin_box.setValue(settings["quality"]) + self.gifski_fast_check_box.setChecked(settings["fast"]) + self.gifski_once_check_box.setChecked(settings["once"]) + self.gifski_quiet_check_box.setChecked(settings["quiet"]) def resolve_driver_settings(self): # waifu2x-caffe - self.config['waifu2x_caffe']['path'] = os.path.expandvars(self.waifu2x_caffe_path_line_edit.text()) - self.config['waifu2x_caffe']['mode'] = self.waifu2x_caffe_mode_combo_box.currentText() - self.config['waifu2x_caffe']['noise_level'] = self.waifu2x_caffe_noise_level_spin_box.value() - self.config['waifu2x_caffe']['process'] = self.waifu2x_caffe_process_combo_box.currentText() - self.config['waifu2x_caffe']['model_dir'] = str((pathlib.Path(self.config['waifu2x_caffe']['path']).parent / 'models' / self.waifu2x_caffe_model_combobox.currentText()).absolute()) - self.config['waifu2x_caffe']['crop_size'] = self.waifu2x_caffe_crop_size_spin_box.value() - self.config['waifu2x_caffe']['output_quality'] = self.waifu2x_caffe_output_quality_spin_box.value() - self.config['waifu2x_caffe']['output_depth'] = self.waifu2x_caffe_output_depth_spin_box.value() - self.config['waifu2x_caffe']['batch_size'] = self.waifu2x_caffe_batch_size_spin_box.value() - self.config['waifu2x_caffe']['gpu'] = self.waifu2x_caffe_gpu_spin_box.value() - self.config['waifu2x_caffe']['tta'] = int(self.waifu2x_caffe_tta_check_box.isChecked()) + self.config["waifu2x_caffe"]["path"] = os.path.expandvars( + self.waifu2x_caffe_path_line_edit.text() + ) + self.config["waifu2x_caffe"][ + "mode" + ] = self.waifu2x_caffe_mode_combo_box.currentText() + self.config["waifu2x_caffe"][ + "noise_level" + ] = self.waifu2x_caffe_noise_level_spin_box.value() + self.config["waifu2x_caffe"][ + "process" + ] = self.waifu2x_caffe_process_combo_box.currentText() + self.config["waifu2x_caffe"]["model_dir"] = str( + ( + pathlib.Path(self.config["waifu2x_caffe"]["path"]).parent + / "models" + / self.waifu2x_caffe_model_combobox.currentText() + ).absolute() + ) + self.config["waifu2x_caffe"][ + "crop_size" + ] = self.waifu2x_caffe_crop_size_spin_box.value() + self.config["waifu2x_caffe"][ + "output_quality" + ] = self.waifu2x_caffe_output_quality_spin_box.value() + self.config["waifu2x_caffe"][ + "output_depth" + ] = self.waifu2x_caffe_output_depth_spin_box.value() + self.config["waifu2x_caffe"][ + "batch_size" + ] = self.waifu2x_caffe_batch_size_spin_box.value() + self.config["waifu2x_caffe"]["gpu"] = self.waifu2x_caffe_gpu_spin_box.value() + self.config["waifu2x_caffe"]["tta"] = int( + self.waifu2x_caffe_tta_check_box.isChecked() + ) # waifu2x-converter-cpp - self.config['waifu2x_converter_cpp']['path'] = os.path.expandvars(self.waifu2x_converter_cpp_path_line_edit.text()) - self.config['waifu2x_converter_cpp']['png-compression'] = self.waifu2x_converter_cpp_png_compression_spin_box.value() - self.config['waifu2x_converter_cpp']['image-quality'] = self.waifu2x_converter_cpp_image_quality_spin_box.value() - self.config['waifu2x_converter_cpp']['block-size'] = self.waifu2x_converter_cpp_block_size_spin_box.value() - self.config['waifu2x_converter_cpp']['processor'] = self.waifu2x_converter_cpp_processor_spin_box.value() - self.config['waifu2x_converter_cpp']['model-dir'] = str((pathlib.Path(self.config['waifu2x_converter_cpp']['path']).parent / self.waifu2x_converter_cpp_model_combo_box.currentText()).absolute()) - self.config['waifu2x_converter_cpp']['noise-level'] = self.waifu2x_converter_cpp_noise_level_spin_box.value() - self.config['waifu2x_converter_cpp']['mode'] = self.waifu2x_converter_cpp_mode_combo_box.currentText() - self.config['waifu2x_converter_cpp']['log-level'] = self.waifu2x_converter_cpp_log_level_spin_box.value() - self.config['waifu2x_converter_cpp']['disable-gpu'] = bool(self.waifu2x_converter_cpp_disable_gpu_check_box.isChecked()) - self.config['waifu2x_converter_cpp']['force-OpenCL'] = bool(self.waifu2x_converter_cpp_force_opencl_check_box.isChecked()) - self.config['waifu2x_converter_cpp']['tta'] = int(self.waifu2x_converter_cpp_tta_check_box.isChecked()) + self.config["waifu2x_converter_cpp"]["path"] = os.path.expandvars( + self.waifu2x_converter_cpp_path_line_edit.text() + ) + self.config["waifu2x_converter_cpp"][ + "png-compression" + ] = self.waifu2x_converter_cpp_png_compression_spin_box.value() + self.config["waifu2x_converter_cpp"][ + "image-quality" + ] = self.waifu2x_converter_cpp_image_quality_spin_box.value() + self.config["waifu2x_converter_cpp"][ + "block-size" + ] = self.waifu2x_converter_cpp_block_size_spin_box.value() + self.config["waifu2x_converter_cpp"][ + "processor" + ] = self.waifu2x_converter_cpp_processor_spin_box.value() + self.config["waifu2x_converter_cpp"]["model-dir"] = str( + ( + pathlib.Path(self.config["waifu2x_converter_cpp"]["path"]).parent + / self.waifu2x_converter_cpp_model_combo_box.currentText() + ).absolute() + ) + self.config["waifu2x_converter_cpp"][ + "noise-level" + ] = self.waifu2x_converter_cpp_noise_level_spin_box.value() + self.config["waifu2x_converter_cpp"][ + "mode" + ] = self.waifu2x_converter_cpp_mode_combo_box.currentText() + self.config["waifu2x_converter_cpp"][ + "log-level" + ] = self.waifu2x_converter_cpp_log_level_spin_box.value() + self.config["waifu2x_converter_cpp"]["disable-gpu"] = bool( + self.waifu2x_converter_cpp_disable_gpu_check_box.isChecked() + ) + self.config["waifu2x_converter_cpp"]["force-OpenCL"] = bool( + self.waifu2x_converter_cpp_force_opencl_check_box.isChecked() + ) + self.config["waifu2x_converter_cpp"]["tta"] = int( + self.waifu2x_converter_cpp_tta_check_box.isChecked() + ) # waifu2x-ncnn-vulkan - self.config['waifu2x_ncnn_vulkan']['path'] = os.path.expandvars(self.waifu2x_ncnn_vulkan_path_line_edit.text()) - self.config['waifu2x_ncnn_vulkan']['n'] = self.waifu2x_ncnn_vulkan_noise_level_spin_box.value() - self.config['waifu2x_ncnn_vulkan']['t'] = self.waifu2x_ncnn_vulkan_tile_size_spin_box.value() - self.config['waifu2x_ncnn_vulkan']['m'] = str((pathlib.Path(self.config['waifu2x_ncnn_vulkan']['path']).parent / self.waifu2x_ncnn_vulkan_model_combo_box.currentText()).absolute()) - self.config['waifu2x_ncnn_vulkan']['g'] = self.waifu2x_ncnn_vulkan_gpu_id_spin_box.value() - self.config['waifu2x_ncnn_vulkan']['j'] = self.waifu2x_ncnn_vulkan_jobs_line_edit.text() - self.config['waifu2x_ncnn_vulkan']['x'] = self.waifu2x_ncnn_vulkan_tta_check_box.isChecked() + self.config["waifu2x_ncnn_vulkan"]["path"] = os.path.expandvars( + self.waifu2x_ncnn_vulkan_path_line_edit.text() + ) + self.config["waifu2x_ncnn_vulkan"][ + "n" + ] = self.waifu2x_ncnn_vulkan_noise_level_spin_box.value() + self.config["waifu2x_ncnn_vulkan"][ + "t" + ] = self.waifu2x_ncnn_vulkan_tile_size_spin_box.value() + self.config["waifu2x_ncnn_vulkan"]["m"] = str( + ( + pathlib.Path(self.config["waifu2x_ncnn_vulkan"]["path"]).parent + / self.waifu2x_ncnn_vulkan_model_combo_box.currentText() + ).absolute() + ) + self.config["waifu2x_ncnn_vulkan"][ + "g" + ] = self.waifu2x_ncnn_vulkan_gpu_id_spin_box.value() + self.config["waifu2x_ncnn_vulkan"][ + "j" + ] = self.waifu2x_ncnn_vulkan_jobs_line_edit.text() + self.config["waifu2x_ncnn_vulkan"][ + "x" + ] = self.waifu2x_ncnn_vulkan_tta_check_box.isChecked() # srmd-ncnn-vulkan - self.config['srmd_ncnn_vulkan']['path'] = os.path.expandvars(self.srmd_ncnn_vulkan_path_line_edit.text()) - self.config['srmd_ncnn_vulkan']['n'] = self.srmd_ncnn_vulkan_noise_level_spin_box.value() - self.config['srmd_ncnn_vulkan']['t'] = self.srmd_ncnn_vulkan_tile_size_spin_box.value() - self.config['srmd_ncnn_vulkan']['m'] = str((pathlib.Path(self.config['srmd_ncnn_vulkan']['path']).parent / self.srmd_ncnn_vulkan_model_combo_box.currentText()).absolute()) - self.config['srmd_ncnn_vulkan']['g'] = self.srmd_ncnn_vulkan_gpu_id_spin_box.value() - self.config['srmd_ncnn_vulkan']['j'] = self.srmd_ncnn_vulkan_jobs_line_edit.text() - self.config['srmd_ncnn_vulkan']['x'] = self.srmd_ncnn_vulkan_tta_check_box.isChecked() + self.config["srmd_ncnn_vulkan"]["path"] = os.path.expandvars( + self.srmd_ncnn_vulkan_path_line_edit.text() + ) + self.config["srmd_ncnn_vulkan"][ + "n" + ] = self.srmd_ncnn_vulkan_noise_level_spin_box.value() + self.config["srmd_ncnn_vulkan"][ + "t" + ] = self.srmd_ncnn_vulkan_tile_size_spin_box.value() + self.config["srmd_ncnn_vulkan"]["m"] = str( + ( + pathlib.Path(self.config["srmd_ncnn_vulkan"]["path"]).parent + / self.srmd_ncnn_vulkan_model_combo_box.currentText() + ).absolute() + ) + self.config["srmd_ncnn_vulkan"][ + "g" + ] = self.srmd_ncnn_vulkan_gpu_id_spin_box.value() + self.config["srmd_ncnn_vulkan"][ + "j" + ] = self.srmd_ncnn_vulkan_jobs_line_edit.text() + self.config["srmd_ncnn_vulkan"][ + "x" + ] = self.srmd_ncnn_vulkan_tta_check_box.isChecked() # realsr-ncnn-vulkan - self.config['realsr_ncnn_vulkan']['path'] = os.path.expandvars(self.realsr_ncnn_vulkan_path_line_edit.text()) - self.config['realsr_ncnn_vulkan']['t'] = self.realsr_ncnn_vulkan_tile_size_spin_box.value() - self.config['realsr_ncnn_vulkan']['m'] = str((pathlib.Path(self.config['realsr_ncnn_vulkan']['path']).parent / self.realsr_ncnn_vulkan_model_combo_box.currentText()).absolute()) - self.config['realsr_ncnn_vulkan']['g'] = self.realsr_ncnn_vulkan_gpu_id_spin_box.value() - self.config['realsr_ncnn_vulkan']['j'] = self.realsr_ncnn_vulkan_jobs_line_edit.text() - self.config['realsr_ncnn_vulkan']['x'] = self.realsr_ncnn_vulkan_tta_check_box.isChecked() + self.config["realsr_ncnn_vulkan"]["path"] = os.path.expandvars( + self.realsr_ncnn_vulkan_path_line_edit.text() + ) + self.config["realsr_ncnn_vulkan"][ + "t" + ] = self.realsr_ncnn_vulkan_tile_size_spin_box.value() + self.config["realsr_ncnn_vulkan"]["m"] = str( + ( + pathlib.Path(self.config["realsr_ncnn_vulkan"]["path"]).parent + / self.realsr_ncnn_vulkan_model_combo_box.currentText() + ).absolute() + ) + self.config["realsr_ncnn_vulkan"][ + "g" + ] = self.realsr_ncnn_vulkan_gpu_id_spin_box.value() + self.config["realsr_ncnn_vulkan"][ + "j" + ] = self.realsr_ncnn_vulkan_jobs_line_edit.text() + self.config["realsr_ncnn_vulkan"][ + "x" + ] = self.realsr_ncnn_vulkan_tta_check_box.isChecked() # anime4k - self.config['anime4kcpp']['path'] = os.path.expandvars(self.anime4kcpp_path_line_edit.text()) - self.config['anime4kcpp']['passes'] = self.anime4kcpp_passes_spin_box.value() - self.config['anime4kcpp']['pushColorCount'] = self.anime4kcpp_push_color_count_spin_box.value() - self.config['anime4kcpp']['strengthColor'] = self.anime4kcpp_strength_color_spin_box.value() - self.config['anime4kcpp']['strengthGradient'] = self.anime4kcpp_strength_gradient_spin_box.value() - self.config['anime4kcpp']['threads'] = self.anime4kcpp_threads_spin_box.value() - self.config['anime4kcpp']['preFilters'] = self.anime4kcpp_pre_filters_spin_box.value() - self.config['anime4kcpp']['postFilters'] = self.anime4kcpp_post_filters_spin_box.value() - self.config['anime4kcpp']['platformID'] = self.anime4kcpp_platform_id_spin_box.value() - self.config['anime4kcpp']['deviceID'] = self.anime4kcpp_device_id_spin_box.value() - self.config['anime4kcpp']['codec'] = self.anime4kcpp_codec_combo_box.currentText() - self.config['anime4kcpp']['fastMode'] = bool(self.anime4kcpp_fast_mode_check_box.isChecked()) - self.config['anime4kcpp']['preprocessing'] = bool(self.anime4kcpp_pre_processing_check_box.isChecked()) - self.config['anime4kcpp']['postprocessing'] = bool(self.anime4kcpp_post_processing_check_box.isChecked()) - self.config['anime4kcpp']['GPUMode'] = bool(self.anime4kcpp_gpu_mode_check_box.isChecked()) - self.config['anime4kcpp']['CNNMode'] = bool(self.anime4kcpp_cnn_mode_check_box.isChecked()) - self.config['anime4kcpp']['HDN'] = bool(self.anime4kcpp_hdn_check_box.isChecked()) - self.config['anime4kcpp']['HDNLevel'] = self.anime4kcpp_hdn_level_spin_box.value() - self.config['anime4kcpp']['forceFps'] = self.anime4kcpp_force_fps_double_spin_box.value() - self.config['anime4kcpp']['disableProgress'] = bool(self.anime4kcpp_disable_progress_check_box.isChecked()) - self.config['anime4kcpp']['alpha'] = bool(self.anime4kcpp_alpha_check_box.isChecked()) + self.config["anime4kcpp"]["path"] = os.path.expandvars( + self.anime4kcpp_path_line_edit.text() + ) + self.config["anime4kcpp"]["passes"] = self.anime4kcpp_passes_spin_box.value() + self.config["anime4kcpp"][ + "pushColorCount" + ] = self.anime4kcpp_push_color_count_spin_box.value() + self.config["anime4kcpp"][ + "strengthColor" + ] = self.anime4kcpp_strength_color_spin_box.value() + self.config["anime4kcpp"][ + "strengthGradient" + ] = self.anime4kcpp_strength_gradient_spin_box.value() + self.config["anime4kcpp"]["threads"] = self.anime4kcpp_threads_spin_box.value() + self.config["anime4kcpp"][ + "preFilters" + ] = self.anime4kcpp_pre_filters_spin_box.value() + self.config["anime4kcpp"][ + "postFilters" + ] = self.anime4kcpp_post_filters_spin_box.value() + self.config["anime4kcpp"][ + "platformID" + ] = self.anime4kcpp_platform_id_spin_box.value() + self.config["anime4kcpp"][ + "deviceID" + ] = self.anime4kcpp_device_id_spin_box.value() + self.config["anime4kcpp"][ + "codec" + ] = self.anime4kcpp_codec_combo_box.currentText() + self.config["anime4kcpp"]["fastMode"] = bool( + self.anime4kcpp_fast_mode_check_box.isChecked() + ) + self.config["anime4kcpp"]["preprocessing"] = bool( + self.anime4kcpp_pre_processing_check_box.isChecked() + ) + self.config["anime4kcpp"]["postprocessing"] = bool( + self.anime4kcpp_post_processing_check_box.isChecked() + ) + self.config["anime4kcpp"]["GPUMode"] = bool( + self.anime4kcpp_gpu_mode_check_box.isChecked() + ) + self.config["anime4kcpp"]["CNNMode"] = bool( + self.anime4kcpp_cnn_mode_check_box.isChecked() + ) + self.config["anime4kcpp"]["HDN"] = bool( + self.anime4kcpp_hdn_check_box.isChecked() + ) + self.config["anime4kcpp"][ + "HDNLevel" + ] = self.anime4kcpp_hdn_level_spin_box.value() + self.config["anime4kcpp"][ + "forceFps" + ] = self.anime4kcpp_force_fps_double_spin_box.value() + self.config["anime4kcpp"]["disableProgress"] = bool( + self.anime4kcpp_disable_progress_check_box.isChecked() + ) + self.config["anime4kcpp"]["alpha"] = bool( + self.anime4kcpp_alpha_check_box.isChecked() + ) # ffmpeg - self.config['ffmpeg']['ffmpeg_path'] = os.path.expandvars(self.ffmpeg_path_line_edit.text()) - self.config['ffmpeg']['intermediate_file_name'] = self.ffmpeg_intermediate_file_name_line_edit.text() + self.config["ffmpeg"]["ffmpeg_path"] = os.path.expandvars( + self.ffmpeg_path_line_edit.text() + ) + self.config["ffmpeg"][ + "intermediate_file_name" + ] = self.ffmpeg_intermediate_file_name_line_edit.text() # extract frames - self.config['ffmpeg']['extract_frames']['output_options']['-pix_fmt'] = self.ffmpeg_extract_frames_output_options_pixel_format_line_edit.text() + self.config["ffmpeg"]["extract_frames"]["output_options"][ + "-pix_fmt" + ] = self.ffmpeg_extract_frames_output_options_pixel_format_line_edit.text() if not self.ffmpeg_extract_frames_hardware_acceleration_check_box.isChecked(): - self.config['ffmpeg']['extract_frames'].pop('-hwaccel', None) + self.config["ffmpeg"]["extract_frames"].pop("-hwaccel", None) # assemble video - self.config['ffmpeg']['assemble_video']['input_options']['-f'] = self.ffmpeg_assemble_video_input_options_force_format_line_edit.text() - self.config['ffmpeg']['assemble_video']['output_options']['-vcodec'] = self.ffmpeg_assemble_video_output_options_video_codec_line_edit.text() - self.config['ffmpeg']['assemble_video']['output_options']['-pix_fmt'] = self.ffmpeg_assemble_video_output_options_pixel_format_line_edit.text() - self.config['ffmpeg']['assemble_video']['output_options']['-crf'] = self.ffmpeg_assemble_video_output_options_crf_spin_box.value() - if self.ffmpeg_assemble_video_output_options_tune_combo_box.currentText() == 'none': - self.config['ffmpeg']['assemble_video']['output_options']['-tune'] = None + self.config["ffmpeg"]["assemble_video"]["input_options"][ + "-f" + ] = self.ffmpeg_assemble_video_input_options_force_format_line_edit.text() + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-vcodec" + ] = self.ffmpeg_assemble_video_output_options_video_codec_line_edit.text() + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-pix_fmt" + ] = self.ffmpeg_assemble_video_output_options_pixel_format_line_edit.text() + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-crf" + ] = self.ffmpeg_assemble_video_output_options_crf_spin_box.value() + if ( + self.ffmpeg_assemble_video_output_options_tune_combo_box.currentText() + == "none" + ): + self.config["ffmpeg"]["assemble_video"]["output_options"]["-tune"] = None else: - self.config['ffmpeg']['assemble_video']['output_options']['-tune'] = self.ffmpeg_assemble_video_output_options_tune_combo_box.currentText() - if self.ffmpeg_assemble_video_output_options_bitrate_line_edit.text() != '': - self.config['ffmpeg']['assemble_video']['output_options']['-b:v'] = self.ffmpeg_assemble_video_output_options_bitrate_line_edit.text() + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-tune" + ] = self.ffmpeg_assemble_video_output_options_tune_combo_box.currentText() + if self.ffmpeg_assemble_video_output_options_bitrate_line_edit.text() != "": + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-b:v" + ] = self.ffmpeg_assemble_video_output_options_bitrate_line_edit.text() else: - self.config['ffmpeg']['assemble_video']['output_options']['-b:v'] = None + self.config["ffmpeg"]["assemble_video"]["output_options"]["-b:v"] = None - if self.ffmpeg_assemble_video_output_options_ensure_divisible_check_box.isChecked(): + if ( + self.ffmpeg_assemble_video_output_options_ensure_divisible_check_box.isChecked() + ): # if video filter is enabled and is not empty and is not equal to divisible by two filter # append divisible by two filter to the end of existing filter - if ('-vf' in self.config['ffmpeg']['assemble_video']['output_options'] and - len(self.config['ffmpeg']['assemble_video']['output_options']['-vf']) > 0 and - self.config['ffmpeg']['assemble_video']['output_options']['-vf'] != 'pad=ceil(iw/2)*2:ceil(ih/2)*2'): - self.config['ffmpeg']['assemble_video']['output_options']['-vf'] += ',pad=ceil(iw/2)*2:ceil(ih/2)*2' + if ( + "-vf" in self.config["ffmpeg"]["assemble_video"]["output_options"] + and len( + self.config["ffmpeg"]["assemble_video"]["output_options"]["-vf"] + ) + > 0 + and self.config["ffmpeg"]["assemble_video"]["output_options"]["-vf"] + != "pad=ceil(iw/2)*2:ceil(ih/2)*2" + ): + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-vf" + ] += ",pad=ceil(iw/2)*2:ceil(ih/2)*2" else: - self.config['ffmpeg']['assemble_video']['output_options']['-vf'] = 'pad=ceil(iw/2)*2:ceil(ih/2)*2' + self.config["ffmpeg"]["assemble_video"]["output_options"][ + "-vf" + ] = "pad=ceil(iw/2)*2:ceil(ih/2)*2" else: - self.config['ffmpeg']['assemble_video']['output_options'].pop('-vf', None) + self.config["ffmpeg"]["assemble_video"]["output_options"].pop("-vf", None) if not self.ffmpeg_assemble_video_hardware_acceleration_check_box.isChecked(): - self.config['ffmpeg']['assemble_video'].pop('-hwaccel', None) + self.config["ffmpeg"]["assemble_video"].pop("-hwaccel", None) # migrate streams - self.config['ffmpeg']['migrate_streams']['output_options']['-map'] = [] - if self.ffmpeg_migrate_streams_output_options_mapping_video_check_box_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map'].append('0:v?') - if self.ffmpeg_migrate_streams_output_options_mapping_audio_check_box_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map'].append('1:a?') - if self.ffmpeg_migrate_streams_output_options_mapping_subtitle_check_box_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map'].append('1:s?') - if self.ffmpeg_migrate_streams_output_options_mapping_data_check_box_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map'].append('1:d?') - if self.ffmpeg_migrate_streams_output_options_mapping_font_check_box_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map'].append('1:t?') + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"] = [] + if ( + self.ffmpeg_migrate_streams_output_options_mapping_video_check_box_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"].append( + "0:v?" + ) + if ( + self.ffmpeg_migrate_streams_output_options_mapping_audio_check_box_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"].append( + "1:a?" + ) + if ( + self.ffmpeg_migrate_streams_output_options_mapping_subtitle_check_box_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"].append( + "1:s?" + ) + if ( + self.ffmpeg_migrate_streams_output_options_mapping_data_check_box_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"].append( + "1:d?" + ) + if ( + self.ffmpeg_migrate_streams_output_options_mapping_font_check_box_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"].append( + "1:t?" + ) # if the list is empty, delete the key # otherwise parser will run into an error (key with no value) - if len(self.config['ffmpeg']['migrate_streams']['output_options']['-map']) == 0: - self.config['ffmpeg']['migrate_streams']['output_options'].pop('-map', None) + if len(self.config["ffmpeg"]["migrate_streams"]["output_options"]["-map"]) == 0: + self.config["ffmpeg"]["migrate_streams"]["output_options"].pop("-map", None) - self.config['ffmpeg']['migrate_streams']['output_options']['-pix_fmt'] = self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit.text() + self.config["ffmpeg"]["migrate_streams"]["output_options"][ + "-pix_fmt" + ] = self.ffmpeg_migrate_streams_output_options_pixel_format_line_edit.text() - fps = self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.value() + fps = ( + self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.value() + ) if fps > 0: - if ('-vf' in self.config['ffmpeg']['migrate_streams']['output_options'] and - len(self.config['ffmpeg']['migrate_streams']['output_options']['-vf']) > 0 and - 'minterpolate=' not in self.config['ffmpeg']['migrate_streams']['output_options']['-vf']): - self.config['ffmpeg']['migrate_streams']['output_options']['-vf'] += f',minterpolate=\'fps={fps}\'' + if ( + "-vf" in self.config["ffmpeg"]["migrate_streams"]["output_options"] + and len( + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-vf"] + ) + > 0 + and "minterpolate=" + not in self.config["ffmpeg"]["migrate_streams"]["output_options"]["-vf"] + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"][ + "-vf" + ] += f",minterpolate='fps={fps}'" else: - self.config['ffmpeg']['migrate_streams']['output_options']['-vf'] = f'minterpolate=\'fps={fps}\'' + self.config["ffmpeg"]["migrate_streams"]["output_options"][ + "-vf" + ] = f"minterpolate='fps={fps}'" else: - self.config['ffmpeg']['migrate_streams']['output_options'].pop('-vf', None) + self.config["ffmpeg"]["migrate_streams"]["output_options"].pop("-vf", None) # copy source codec - if self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-c'] = 'copy' + if ( + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"]["-c"] = "copy" else: - self.config['ffmpeg']['migrate_streams']['output_options'].pop('-c', None) + self.config["ffmpeg"]["migrate_streams"]["output_options"].pop("-c", None) # copy known metadata - if self.ffmpeg_migrate_streams_output_options_copy_known_metadata_tags_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-map_metadata'] = 0 + if ( + self.ffmpeg_migrate_streams_output_options_copy_known_metadata_tags_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"][ + "-map_metadata" + ] = 0 else: - self.config['ffmpeg']['migrate_streams']['output_options'].pop('-map_metadata', None) + self.config["ffmpeg"]["migrate_streams"]["output_options"].pop( + "-map_metadata", None + ) # copy arbitrary metadata - if self.ffmpeg_migrate_streams_output_options_copy_arbitrary_metadata_tags_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams']['output_options']['-movflags'] = 'use_metadata_tags' + if ( + self.ffmpeg_migrate_streams_output_options_copy_arbitrary_metadata_tags_check_box.isChecked() + ): + self.config["ffmpeg"]["migrate_streams"]["output_options"][ + "-movflags" + ] = "use_metadata_tags" else: - self.config['ffmpeg']['migrate_streams']['output_options'].pop('-movflags', None) + self.config["ffmpeg"]["migrate_streams"]["output_options"].pop( + "-movflags", None + ) # hardware acceleration if not self.ffmpeg_migrate_streams_hardware_acceleration_check_box.isChecked(): - self.config['ffmpeg']['migrate_streams'].pop('-hwaccel', None) + self.config["ffmpeg"]["migrate_streams"].pop("-hwaccel", None) # Gifski - self.config['gifski']['gifski_path'] = os.path.expandvars(self.gifski_path_line_edit.text()) - self.config['gifski']['quality'] = self.gifski_quality_spin_box.value() - self.config['gifski']['fast'] = self.gifski_fast_check_box.isChecked() - self.config['gifski']['once'] = self.gifski_once_check_box.isChecked() - self.config['gifski']['quiet'] = self.gifski_quiet_check_box.isChecked() + self.config["gifski"]["gifski_path"] = os.path.expandvars( + self.gifski_path_line_edit.text() + ) + self.config["gifski"]["quality"] = self.gifski_quality_spin_box.value() + self.config["gifski"]["fast"] = self.gifski_fast_check_box.isChecked() + self.config["gifski"]["once"] = self.gifski_once_check_box.isChecked() + self.config["gifski"]["quiet"] = self.gifski_quiet_check_box.isChecked() def dragEnterEvent(self, event): if event.mimeData().hasUrls(): @@ -786,7 +1362,9 @@ class Video2XMainWindow(QMainWindow): def dropEvent(self, event): input_paths = [pathlib.Path(u.toLocalFile()) for u in event.mimeData().urls()] for path in input_paths: - if (path.is_file() or path.is_dir()) and not self.input_table_path_exists(path): + if (path.is_file() or path.is_dir()) and not self.input_table_path_exists( + path + ): self.input_table_data.append(path) self.update_output_path() @@ -794,7 +1372,9 @@ class Video2XMainWindow(QMainWindow): def enable_line_edit_file_drop(self, line_edit: QLineEdit): line_edit.dragEnterEvent = self.dragEnterEvent - line_edit.dropEvent = lambda event: line_edit.setText(str(pathlib.Path(event.mimeData().urls()[0].toLocalFile()).absolute())) + line_edit.dropEvent = lambda event: line_edit.setText( + str(pathlib.Path(event.mimeData().urls()[0].toLocalFile()).absolute()) + ) def show_ffprobe_output(self, event): input_paths = [pathlib.Path(u.toLocalFile()) for u in event.mimeData().urls()] @@ -807,7 +1387,7 @@ class Video2XMainWindow(QMainWindow): @staticmethod def read_config(config_file: pathlib.Path) -> dict: - """ read video2x configurations from config file + """read video2x configurations from config file Arguments: config_file {pathlib.Path} -- video2x configuration file pathlib.Path @@ -816,28 +1396,45 @@ class Video2XMainWindow(QMainWindow): dict -- dictionary of video2x configuration """ - with open(config_file, 'r') as config: + with open(config_file, "r") as config: return yaml.load(config, Loader=yaml.FullLoader) def mutually_exclude_scale_ratio_resolution(self): - if self.output_width_spin_box.value() != 0 or self.output_height_spin_box.value() != 0: + if ( + self.output_width_spin_box.value() != 0 + or self.output_height_spin_box.value() != 0 + ): self.scale_ratio_double_spin_box.setDisabled(True) - elif self.output_width_spin_box.value() == 0 and self.output_height_spin_box.value() == 0: + elif ( + self.output_width_spin_box.value() == 0 + and self.output_height_spin_box.value() == 0 + ): self.scale_ratio_double_spin_box.setDisabled(False) def mutually_exclude_frame_interpolation_stream_copy(self): - if self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.value() > 0: - self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setChecked(False) - self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setDisabled(True) + if ( + self.ffmpeg_migrate_streams_output_options_frame_interpolation_spin_box.value() + > 0 + ): + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setChecked( + False + ) + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setDisabled( + True + ) else: - self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setChecked(True) - self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setDisabled(False) + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setChecked( + True + ) + self.ffmpeg_migrate_streams_output_options_copy_streams_check_box.setDisabled( + False + ) def update_gui_for_driver(self): current_driver = AVAILABLE_DRIVERS[self.driver_combo_box.currentText()] # update preferred processes/threads count - if current_driver == 'anime4kcpp': + if current_driver == "anime4kcpp": self.processes_spin_box.setValue(16) else: self.processes_spin_box.setValue(1) @@ -870,19 +1467,19 @@ class Video2XMainWindow(QMainWindow): def select_file(self, *args, **kwargs) -> pathlib.Path: file_selected = QFileDialog.getOpenFileName(self, *args, **kwargs) - if not isinstance(file_selected, tuple) or file_selected[0] == '': + if not isinstance(file_selected, tuple) or file_selected[0] == "": return None return pathlib.Path(file_selected[0]) def select_folder(self, *args, **kwargs) -> pathlib.Path: folder_selected = QFileDialog.getExistingDirectory(self, *args, **kwargs) - if folder_selected == '': + if folder_selected == "": return None return pathlib.Path(folder_selected) def select_save_file(self, *args, **kwargs) -> pathlib.Path: save_file_selected = QFileDialog.getSaveFileName(self, *args, **kwargs) - if not isinstance(save_file_selected, tuple) or save_file_selected[0] == '': + if not isinstance(save_file_selected, tuple) or save_file_selected[0] == "": return None return pathlib.Path(save_file_selected[0]) @@ -890,51 +1487,55 @@ class Video2XMainWindow(QMainWindow): # if input list is empty # clear output path if len(self.input_table_data) == 0: - self.output_line_edit.setText('') + self.output_line_edit.setText("") # if there are multiple output files # use cwd/output directory for output elif len(self.input_table_data) > 1: - self.output_line_edit.setText(str((CWD / 'output').absolute())) + self.output_line_edit.setText(str((CWD / "output").absolute())) # if there's only one input file # generate output file/directory name automatically elif len(self.input_table_data) == 1: input_path = self.input_table_data[0] # give up if input path doesn't exist or isn't a file or a directory - if not input_path.exists() or not (input_path.is_file() or input_path.is_dir()): + if not input_path.exists() or not ( + input_path.is_file() or input_path.is_dir() + ): return if input_path.is_file(): # generate suffix automatically try: - input_file_mime_type = magic.from_file(str(input_path.absolute()), mime=True) - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_mime_type = magic.from_file( + str(input_path.absolute()), mime=True + ) + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] except Exception: input_file_type = input_file_subtype = None # in case python-magic fails to detect file type # try guessing file mime type with mimetypes - if input_file_type not in ['image', 'video']: + if input_file_type not in ["image", "video"]: input_file_mime_type = mimetypes.guess_type(input_path.name)[0] - input_file_type = input_file_mime_type.split('/')[0] - input_file_subtype = input_file_mime_type.split('/')[1] + input_file_type = input_file_mime_type.split("/")[0] + input_file_subtype = input_file_mime_type.split("/")[1] # if input file is an image - if input_file_type == 'image': + if input_file_type == "image": # if file is a gif, use .gif - if input_file_subtype == 'gif': - suffix = '.gif' + if input_file_subtype == "gif": + suffix = ".gif" # otherwise, use .png by default for all images else: suffix = self.image_output_extension_line_edit.text() # if input is video, use .mp4 as output by default - elif input_file_type == 'video': + elif input_file_type == "video": suffix = self.video_output_extension_line_edit.text() # if failed to detect file type @@ -942,86 +1543,102 @@ class Video2XMainWindow(QMainWindow): else: suffix = input_path.suffix - output_path = input_path.parent / self.output_file_name_format_string_line_edit.text().format(original_file_name=input_path.stem, extension=suffix) + output_path = ( + input_path.parent + / self.output_file_name_format_string_line_edit.text().format( + original_file_name=input_path.stem, extension=suffix + ) + ) elif input_path.is_dir(): - output_path = input_path.parent / self.output_file_name_format_string_line_edit.text().format(original_file_name=input_path.stem, extension='') + output_path = ( + input_path.parent + / self.output_file_name_format_string_line_edit.text().format( + original_file_name=input_path.stem, extension="" + ) + ) # try a new name with a different file ID output_path_id = 0 while output_path.exists(): if input_path.is_file(): - output_path = input_path.parent / pathlib.Path(f'{input_path.stem}_output_{output_path_id}{suffix}') + output_path = input_path.parent / pathlib.Path( + f"{input_path.stem}_output_{output_path_id}{suffix}" + ) elif input_path.is_dir(): - output_path = input_path.parent / pathlib.Path(f'{input_path.stem}_output_{output_path_id}') + output_path = input_path.parent / pathlib.Path( + f"{input_path.stem}_output_{output_path_id}" + ) output_path_id += 1 if not output_path.exists(): self.output_line_edit.setText(str(output_path.absolute())) def select_input_file(self): - input_file = self.select_file('Select Input File') - if (input_file is None or self.input_table_path_exists(input_file)): + input_file = self.select_file("Select Input File") + if input_file is None or self.input_table_path_exists(input_file): return self.input_table_data.append(input_file) self.update_output_path() self.update_input_table() def select_input_folder(self): - input_folder = self.select_folder('Select Input Folder') - if (input_folder is None or self.input_table_path_exists(input_folder)): + input_folder = self.select_folder("Select Input Folder") + if input_folder is None or self.input_table_path_exists(input_folder): return self.input_table_data.append(input_folder) self.update_output_path() self.update_input_table() def select_output_file(self): - output_file = self.select_file('Select Output File') + output_file = self.select_file("Select Output File") if output_file is None: return self.output_line_edit.setText(str(output_file.absolute())) def select_output_folder(self): - output_folder = self.select_folder('Select Output Folder') + output_folder = self.select_folder("Select Output Folder") if output_folder is None: return self.output_line_edit.setText(str(output_folder.absolute())) def select_cache_folder(self): - cache_folder = self.select_folder('Select Cache Folder') + cache_folder = self.select_folder("Select Cache Folder") if cache_folder is None: return self.cache_line_edit.setText(str(cache_folder.absolute())) def select_config_file(self): - config_file = self.select_file('Select Config File', filter='(YAML files (*.yaml))') + config_file = self.select_file( + "Select Config File", filter="(YAML files (*.yaml))" + ) if config_file is None: return self.config_line_edit.setText(str(config_file.absolute())) self.load_configurations() def select_driver_binary_path(self, driver_line_edit: QLineEdit): - driver_binary_path = self.select_file('Select Driver Binary File') + driver_binary_path = self.select_file("Select Driver Binary File") if driver_binary_path is None: return driver_line_edit.setText(str(driver_binary_path.absolute())) def show_shortcuts(self): message_box = QMessageBox(self) - message_box.setWindowTitle('Video2X Shortcuts') + message_box.setWindowTitle("Video2X Shortcuts") message_box.setTextFormat(Qt.MarkdownText) - shortcut_information = '''**Ctrl+W**:\tExit application\\ + shortcut_information = """**Ctrl+W**:\tExit application\\ **Ctrl+Q**:\tExit application\\ **Ctrl+I**:\tOpen select input file dialog\\ **Ctrl+O**:\tOpen select output file dialog\\ **Ctrl+Shift+I**:\tOpen select input folder dialog\\ -**Ctrl+Shift+O**:\tOpen select output folder dialog''' +**Ctrl+Shift+O**:\tOpen select output folder dialog""" message_box.setText(shortcut_information) message_box.exec_() def show_about(self): message_box = QMessageBox(self) - message_box.setWindowTitle('About Video2X') + message_box.setWindowTitle("About Video2X") message_box.setIconPixmap(QPixmap(self.video2x_icon_path).scaled(64, 64)) message_box.setTextFormat(Qt.MarkdownText) message_box.setText(LEGAL_INFO) @@ -1029,42 +1646,45 @@ class Video2XMainWindow(QMainWindow): def show_information(self, message: str): message_box = QMessageBox(self) - message_box.setWindowTitle('Information') + message_box.setWindowTitle("Information") message_box.setIcon(QMessageBox.Information) message_box.setText(message) message_box.exec_() def show_warning(self, message: str): message_box = QMessageBox(self) - message_box.setWindowTitle('Warning') + message_box.setWindowTitle("Warning") message_box.setIcon(QMessageBox.Warning) message_box.setText(message) message_box.exec_() def show_error(self, exception: Exception): - def _process_button_press(button_pressed): # if the user pressed the save button, save log file to destination - if button_pressed.text() == 'Save': - log_file_saving_path = self.select_save_file('Select Log File Saving Destination', 'video2x_error.log') + if button_pressed.text() == "Save": + log_file_saving_path = self.select_save_file( + "Select Log File Saving Destination", "video2x_error.log" + ) if log_file_saving_path is not None: - with open(log_file_saving_path, 'w', encoding='utf-8') as log_file: + with open(log_file_saving_path, "w", encoding="utf-8") as log_file: self.log_file.seek(0) log_file.write(self.log_file.read()) # QErrorMessage(self).showMessage(message.replace('\n', '
')) message_box = QMessageBox(self) - message_box.setWindowTitle('Error') + message_box.setWindowTitle("Error") message_box.setIcon(QMessageBox.Critical) message_box.setTextFormat(Qt.MarkdownText) - error_message = '''Upscaler ran into an error:\\ + error_message = """Upscaler ran into an error:\\ {}\\ Check the console output or the log file for details.\\ You can [submit an issue on GitHub](https://github.com/k4yt3x/video2x/issues/new?assignees=K4YT3X&labels=bug&template=bug-report.md&title={}) to report this error.\\ It\'s highly recommended to attach the log file.\\ -You can click \"Save\" to save the log file.''' - message_box.setText(error_message.format(exception, urllib.parse.quote(str(exception)))) +You can click \"Save\" to save the log file.""" + message_box.setText( + error_message.format(exception, urllib.parse.quote(str(exception))) + ) message_box.setStandardButtons(QMessageBox.Save | QMessageBox.Close) message_box.setDefaultButton(QMessageBox.Save) @@ -1074,33 +1694,43 @@ You can click \"Save\" to save the log file.''' def progress_monitor(self, progress_callback: pyqtSignal): # initialize progress bar values - progress_callback.emit((time.time(), 0, 0, 0, 0, 0, [], pathlib.Path(), pathlib.Path())) + progress_callback.emit( + (time.time(), 0, 0, 0, 0, 0, [], pathlib.Path(), pathlib.Path()) + ) # keep querying upscaling process and feed information to callback signal while self.upscaler.running: - progress_callback.emit((self.upscaler.current_processing_starting_time, - self.upscaler.total_frames_upscaled, - self.upscaler.total_frames, - self.upscaler.total_processed, - self.upscaler.total_files, - self.upscaler.current_pass, - self.upscaler.scaling_jobs, - self.upscaler.current_input_file, - self.upscaler.last_frame_upscaled)) + progress_callback.emit( + ( + self.upscaler.current_processing_starting_time, + self.upscaler.total_frames_upscaled, + self.upscaler.total_frames, + self.upscaler.total_processed, + self.upscaler.total_files, + self.upscaler.current_pass, + self.upscaler.scaling_jobs, + self.upscaler.current_input_file, + self.upscaler.last_frame_upscaled, + ) + ) time.sleep(1) # upscale process will stop at 99% # so it's set to 100 manually when all is done - progress_callback.emit((time.time(), - self.upscaler.total_frames, - self.upscaler.total_frames, - self.upscaler.total_files, - self.upscaler.total_files, - len(self.upscaler.scaling_jobs), - self.upscaler.scaling_jobs, - pathlib.Path(), - pathlib.Path())) + progress_callback.emit( + ( + time.time(), + self.upscaler.total_frames, + self.upscaler.total_frames, + self.upscaler.total_files, + self.upscaler.total_files, + len(self.upscaler.scaling_jobs), + self.upscaler.scaling_jobs, + pathlib.Path(), + pathlib.Path(), + ) + ) def set_progress(self, progress_information: tuple): current_processing_starting_time = progress_information[0] @@ -1125,22 +1755,45 @@ You can click \"Save\" to save the log file.''' # set calculated values in GUI self.current_progress_bar.setMaximum(total_frames) self.current_progress_bar.setValue(total_frames_upscaled) - self.frames_label.setText('Frames: {}/{}'.format(total_frames_upscaled, total_frames)) - self.time_elapsed_label.setText('Time Elapsed: {}'.format(time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))) - self.time_remaining_label.setText('Time Remaining: {}'.format(time.strftime("%H:%M:%S", time.gmtime(time_remaining)))) - self.rate_label.setText('Rate (FPS): {}'.format(round(rate, 2))) - self.overall_progress_label.setText('Overall Progress: {}/{}'.format(total_processed, total_files)) + self.frames_label.setText( + "Frames: {}/{}".format(total_frames_upscaled, total_frames) + ) + self.time_elapsed_label.setText( + "Time Elapsed: {}".format( + time.strftime("%H:%M:%S", time.gmtime(time_elapsed)) + ) + ) + self.time_remaining_label.setText( + "Time Remaining: {}".format( + time.strftime("%H:%M:%S", time.gmtime(time_remaining)) + ) + ) + self.rate_label.setText("Rate (FPS): {}".format(round(rate, 2))) + self.overall_progress_label.setText( + "Overall Progress: {}/{}".format(total_processed, total_files) + ) self.overall_progress_bar.setMaximum(total_files) self.overall_progress_bar.setValue(total_processed) - self.currently_processing_label.setText('Currently Processing: {} (pass {}/{})'.format(str(current_input_file.name), current_pass, len(scaling_jobs))) + self.currently_processing_label.setText( + "Currently Processing: {} (pass {}/{})".format( + str(current_input_file.name), current_pass, len(scaling_jobs) + ) + ) # if show frame is checked, show preview image - if self.frame_preview_show_preview_check_box.isChecked() and last_frame_upscaled.is_file(): + if ( + self.frame_preview_show_preview_check_box.isChecked() + and last_frame_upscaled.is_file() + ): last_frame_pixmap = QPixmap(str(last_frame_upscaled.absolute())) # the -2 here behind geometry subtracts frame size from width and height - self.frame_preview_label.setPixmap(last_frame_pixmap.scaled(self.frame_preview_label.width() - 2, - self.frame_preview_label.height() - 2, - Qt.KeepAspectRatio)) + self.frame_preview_label.setPixmap( + last_frame_pixmap.scaled( + self.frame_preview_label.width() - 2, + self.frame_preview_label.height() - 2, + Qt.KeepAspectRatio, + ) + ) # if keep aspect ratio is checked, don't stretch image if self.frame_preview_keep_aspect_ratio_check_box.isChecked(): @@ -1159,14 +1812,18 @@ You can click \"Save\" to save the log file.''' # reset progress display UI elements self.current_progress_bar.setMaximum(100) self.current_progress_bar.setValue(0) - self.frames_label.setText('Frames: {}/{}'.format(0, 0)) - self.time_elapsed_label.setText('Time Elapsed: {}'.format(time.strftime("%H:%M:%S", time.gmtime(0)))) - self.time_remaining_label.setText('Time Remaining: {}'.format(time.strftime("%H:%M:%S", time.gmtime(0)))) - self.rate_label.setText('Rate (FPS): {}'.format(0.0)) - self.overall_progress_label.setText('Overall Progress: {}/{}'.format(0, 0)) + self.frames_label.setText("Frames: {}/{}".format(0, 0)) + self.time_elapsed_label.setText( + "Time Elapsed: {}".format(time.strftime("%H:%M:%S", time.gmtime(0))) + ) + self.time_remaining_label.setText( + "Time Remaining: {}".format(time.strftime("%H:%M:%S", time.gmtime(0))) + ) + self.rate_label.setText("Rate (FPS): {}".format(0.0)) + self.overall_progress_label.setText("Overall Progress: {}/{}".format(0, 0)) self.overall_progress_bar.setMaximum(100) self.overall_progress_bar.setValue(0) - self.currently_processing_label.setText('Currently Processing:') + self.currently_processing_label.setText("Currently Processing:") def start(self): @@ -1177,10 +1834,10 @@ You can click \"Save\" to save the log file.''' # resolve input and output directories from GUI if len(self.input_table_data) == 0: - self.show_warning('Input path unspecified') + self.show_warning("Input path unspecified") return - if self.output_line_edit.text().strip() == '': - self.show_warning('Output path unspecified') + if self.output_line_edit.text().strip() == "": + self.show_warning("Output path unspecified") return if len(self.input_table_data) == 1: @@ -1189,13 +1846,17 @@ You can click \"Save\" to save the log file.''' input_directory = self.input_table_data # resolve output directory - output_directory = pathlib.Path(os.path.expandvars(self.output_line_edit.text())) + output_directory = pathlib.Path( + os.path.expandvars(self.output_line_edit.text()) + ) # load driver settings from GUI self.resolve_driver_settings() # load driver settings for the current driver - self.driver_settings = self.config[AVAILABLE_DRIVERS[self.driver_combo_box.currentText()]] + self.driver_settings = self.config[ + AVAILABLE_DRIVERS[self.driver_combo_box.currentText()] + ] # get scale ratio or resolution if self.scale_ratio_double_spin_box.isEnabled(): @@ -1214,18 +1875,21 @@ You can click \"Save\" to save the log file.''' driver_settings=self.driver_settings, ffmpeg_settings=self.ffmpeg_settings, gifski_settings=self.gifski_settings, - # optional parameters driver=AVAILABLE_DRIVERS[self.driver_combo_box.currentText()], scale_ratio=scale_ratio, scale_width=scale_width, scale_height=scale_height, processes=self.processes_spin_box.value(), - video2x_cache_directory=pathlib.Path(os.path.expandvars(self.cache_line_edit.text())), - extracted_frame_format=self.config['video2x']['extracted_frame_format'].lower(), + video2x_cache_directory=pathlib.Path( + os.path.expandvars(self.cache_line_edit.text()) + ), + extracted_frame_format=self.config["video2x"][ + "extracted_frame_format" + ].lower(), image_output_extension=self.image_output_extension_line_edit.text(), video_output_extension=self.video_output_extension_line_edit.text(), - preserve_frames=bool(self.preserve_frames_check_box.isChecked()) + preserve_frames=bool(self.preserve_frames_check_box.isChecked()), ) # run upscaler @@ -1259,7 +1923,7 @@ You can click \"Save\" to save the log file.''' self.reset_progress_display() def upscale_interrupted(self): - self.show_information('Upscale has been interrupted') + self.show_information("Upscale has been interrupted") self.threadpool.waitForDone(5) self.start_button.setEnabled(True) self.stop_button.setEnabled(False) @@ -1268,7 +1932,11 @@ You can click \"Save\" to save the log file.''' def upscale_successful(self): # if all threads have finished self.threadpool.waitForDone(5) - self.show_information('Upscale finished successfully, taking {} seconds'.format(round((time.time() - self.begin_time), 5))) + self.show_information( + "Upscale finished successfully, taking {} seconds".format( + round((time.time() - self.begin_time), 5) + ) + ) self.start_button.setEnabled(True) self.stop_button.setEnabled(False) self.reset_progress_display() @@ -1278,11 +1946,13 @@ You can click \"Save\" to save the log file.''' try: # if upscaler is running, ask the user for confirmation if self.upscaler.running is True: - confirmation = QMessageBox.question(self, - 'Stopping Confirmation', - 'Are you sure you want to want to stop the upscaling process?', - QMessageBox.Yes, - QMessageBox.No) + confirmation = QMessageBox.question( + self, + "Stopping Confirmation", + "Are you sure you want to want to stop the upscaling process?", + QMessageBox.Yes, + QMessageBox.No, + ) # if the user indeed wants to stop processing if confirmation == QMessageBox.Yes: with contextlib.suppress(AttributeError): @@ -1310,7 +1980,7 @@ You can click \"Save\" to save the log file.''' # this file shouldn't be imported -if __name__ == '__main__': +if __name__ == "__main__": try: app = QApplication(sys.argv) window = Video2XMainWindow() @@ -1321,4 +1991,4 @@ if __name__ == '__main__': # and hold window open using input() except Exception: traceback.print_exc() - input('Press enter to close') + input("Press enter to close") diff --git a/src/wrappers/anime4kcpp.py b/src/wrappers/anime4kcpp.py index 52efd43..1e5d9e4 100755 --- a/src/wrappers/anime4kcpp.py +++ b/src/wrappers/anime4kcpp.py @@ -27,8 +27,7 @@ from avalon_framework import Avalon class WrapperMain: - """ Anime4K CPP wrapper - """ + """Anime4K CPP wrapper""" def __init__(self, driver_settings): self.driver_settings = driver_settings @@ -38,53 +37,55 @@ class WrapperMain: def zero_to_one_float(value): value = float(value) if value < 0.0 or value > 1.0: - raise argparse.ArgumentTypeError(f'{value} is not between 0.0 and 1.0') + raise argparse.ArgumentTypeError(f"{value} is not between 0.0 and 1.0") return value @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('-i', '--input', type=str, help=argparse.SUPPRESS) # help='File for loading') - parser.add_argument('-o', '--output', type=str, help=argparse.SUPPRESS) # help='File for outputting') - parser.add_argument('-p', '--passes', type=int, help='Passes for processing') - parser.add_argument('-n', '--pushColorCount', type=int, help='Limit the number of color pushes') - parser.add_argument('-c', '--strengthColor', type=WrapperMain.zero_to_one_float, help='Strength for pushing color,range 0 to 1,higher for thinner') - parser.add_argument('-g', '--strengthGradient', type=WrapperMain.zero_to_one_float, help='Strength for pushing gradient,range 0 to 1,higher for sharper') - parser.add_argument('-z', '--zoomFactor', type=float, help='zoom factor for resizing') - parser.add_argument('-t', '--threads', type=int, help='Threads count for video processing') - parser.add_argument('-f', '--fastMode', action='store_true', help='Faster but maybe low quality') - parser.add_argument('-v', '--videoMode', action='store_true', help='Video process') - parser.add_argument('-s', '--preview', action='store_true', help='Preview image') - parser.add_argument('-b', '--preprocessing', action='store_true', help='Enable pre processing') - parser.add_argument('-a', '--postprocessing', action='store_true', help='Enable post processing') - parser.add_argument('-r', '--preFilters', type=int, help='Enhancement filter, only working when preProcessing is true,there are 5 options by binary:Median blur=0000001, Mean blur=0000010, CAS Sharpening=0000100, Gaussian blur weak=0001000, Gaussian blur=0010000, Bilateral filter=0100000, Bilateral filter faster=1000000, you can freely combine them, eg: Gaussian blur weak + Bilateral filter = 0001000 | 0100000 = 0101000 = 40(D)') - parser.add_argument('-e', '--postFilters', type=int, help='Enhancement filter, only working when postProcessing is true,there are 5 options by binary:Median blur=0000001, Mean blur=0000010, CAS Sharpening=0000100, Gaussian blur weak=0001000, Gaussian blur=0010000, Bilateral filter=0100000, Bilateral filter faster=1000000, you can freely combine them, eg: Gaussian blur weak + Bilateral filter = 0001000 | 0100000 = 0101000 = 40(D), so you can put 40 to enable Gaussian blur weak and Bilateral filter, which also is what I recommend for image that < 1080P, 48 for image that >= 1080P, and for performance I recommend to use 72 for video that < 1080P, 80 for video that >=1080P') - parser.add_argument('-q', '--GPUMode', action='store_true', help='Enable GPU acceleration') - parser.add_argument('-w', '--CNNMode', action='store_true', help='Enable ACNet') - parser.add_argument('-H', '--HDN', action='store_true', help='Enable HDN mode for ACNet') - parser.add_argument('-L', '--HDNLevel', type=int, help='Set HDN level') - parser.add_argument('-l', '--listGPUs', action='store_true', help='list GPUs') - parser.add_argument('-h', '--platformID', type=int, help='Specify the platform ID') - parser.add_argument('-d', '--deviceID', type=int, help='Specify the device ID') - parser.add_argument('-C', '--codec', type=str, help='Specify the codec for encoding from mp4v(recommended in Windows), dxva(for Windows), avc1(H264, recommended in Linux), vp09(very slow), hevc(not support in Windowds), av01(not support in Windowds) (string [=mp4v])') - parser.add_argument('-F', '--forceFps', type=float, help='Set output video fps to the specifying number, 0 to disable') - parser.add_argument('-D', '--disableProgress', action='store_true', help='disable progress display') - parser.add_argument('-W', '--webVideo', type=str, help='process the video from URL') - parser.add_argument('-A', '--alpha', action='store_true', help='preserve the Alpha channel for transparent image') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("-i", "--input", type=str, help=argparse.SUPPRESS) # help="File for loading") + parser.add_argument("-o", "--output", type=str, help=argparse.SUPPRESS) # help="File for outputting") + parser.add_argument("-p", "--passes", type=int, help="Passes for processing") + parser.add_argument("-n", "--pushColorCount", type=int, help="Limit the number of color pushes") + parser.add_argument("-c", "--strengthColor", type=WrapperMain.zero_to_one_float, help="Strength for pushing color,range 0 to 1,higher for thinner") + parser.add_argument("-g", "--strengthGradient", type=WrapperMain.zero_to_one_float, help="Strength for pushing gradient,range 0 to 1,higher for sharper") + parser.add_argument("-z", "--zoomFactor", type=float, help="zoom factor for resizing") + parser.add_argument("-t", "--threads", type=int, help="Threads count for video processing") + parser.add_argument("-f", "--fastMode", action="store_true", help="Faster but maybe low quality") + parser.add_argument("-v", "--videoMode", action="store_true", help="Video process") + parser.add_argument("-s", "--preview", action="store_true", help="Preview image") + parser.add_argument("-b", "--preprocessing", action="store_true", help="Enable pre processing") + parser.add_argument("-a", "--postprocessing", action="store_true", help="Enable post processing") + parser.add_argument("-r", "--preFilters", type=int, help="Enhancement filter, only working when preProcessing is true,there are 5 options by binary:Median blur=0000001, Mean blur=0000010, CAS Sharpening=0000100, Gaussian blur weak=0001000, Gaussian blur=0010000, Bilateral filter=0100000, Bilateral filter faster=1000000, you can freely combine them, eg: Gaussian blur weak + Bilateral filter = 0001000 | 0100000 = 0101000 = 40(D)") + parser.add_argument("-e", "--postFilters", type=int, help="Enhancement filter, only working when postProcessing is true,there are 5 options by binary:Median blur=0000001, Mean blur=0000010, CAS Sharpening=0000100, Gaussian blur weak=0001000, Gaussian blur=0010000, Bilateral filter=0100000, Bilateral filter faster=1000000, you can freely combine them, eg: Gaussian blur weak + Bilateral filter = 0001000 | 0100000 = 0101000 = 40(D), so you can put 40 to enable Gaussian blur weak and Bilateral filter, which also is what I recommend for image that < 1080P, 48 for image that >= 1080P, and for performance I recommend to use 72 for video that < 1080P, 80 for video that >=1080P") + parser.add_argument("-q", "--GPUMode", action="store_true", help="Enable GPU acceleration") + parser.add_argument("-w", "--CNNMode", action="store_true", help="Enable ACNet") + parser.add_argument("-H", "--HDN", action="store_true", help="Enable HDN mode for ACNet") + parser.add_argument("-L", "--HDNLevel", type=int, help="Set HDN level") + parser.add_argument("-l", "--listGPUs", action="store_true", help="list GPUs") + parser.add_argument("-h", "--platformID", type=int, help="Specify the platform ID") + parser.add_argument("-d", "--deviceID", type=int, help="Specify the device ID") + parser.add_argument("-C", "--codec", type=str, help="Specify the codec for encoding from mp4v(recommended in Windows), dxva(for Windows), avc1(H264, recommended in Linux), vp09(very slow), hevc(not support in Windowds), av01(not support in Windowds) (string [=mp4v])") + parser.add_argument("-F", "--forceFps", type=float, help="Set output video fps to the specifying number, 0 to disable") + parser.add_argument("-D", "--disableProgress", action="store_true", help="disable progress display") + parser.add_argument("-W", "--webVideo", type=str, help="process the video from URL") + parser.add_argument("-A", "--alpha", action="store_true", help="preserve the Alpha channel for transparent image") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # self.driver_settings['zoomFactor'] = upscaler.scale_ratio - self.driver_settings['threads'] = upscaler.processes + self.driver_settings["threads"] = upscaler.processes # append FFmpeg path to the end of PATH # Anime4KCPP will then use FFmpeg to migrate audio tracks - os.environ['PATH'] += f';{upscaler.ffmpeg_settings["ffmpeg_path"]}' + os.environ["PATH"] += f';{upscaler.ffmpeg_settings["ffmpeg_path"]}' def set_scale_ratio(self, scale_ratio: float): - self.driver_settings['zoomFactor'] = scale_ratio + self.driver_settings["zoomFactor"] = scale_ratio def upscale(self, input_file, output_file): """This is the core function for WAIFU2X class @@ -98,33 +99,33 @@ class WrapperMain: # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['input'] = input_file - self.driver_settings['output'] = output_file + self.driver_settings["input"] = input_file + self.driver_settings["output"] = output_file # Anime4KCPP will look for Anime4KCPPKernel.cl under the current working directory # change the CWD to its containing directory so it will find it - if platform.system() == 'Windows': - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + if platform.system() == "Windows": + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # list to be executed # initialize the list with waifu2x binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -132,6 +133,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/ffmpeg.py b/src/wrappers/ffmpeg.py index e3cefea..58b04fc 100755 --- a/src/wrappers/ffmpeg.py +++ b/src/wrappers/ffmpeg.py @@ -27,20 +27,24 @@ class Ffmpeg: and inserting audio tracks to videos. """ - def __init__(self, ffmpeg_settings, extracted_frame_format='png'): + def __init__(self, ffmpeg_settings, extracted_frame_format="png"): self.ffmpeg_settings = ffmpeg_settings - self.ffmpeg_path = pathlib.Path(self.ffmpeg_settings['ffmpeg_path']) - self.ffmpeg_binary = self.ffmpeg_path / 'ffmpeg' - self.ffmpeg_probe_binary = self.ffmpeg_path / 'ffprobe' + self.ffmpeg_path = pathlib.Path(self.ffmpeg_settings["ffmpeg_path"]) + self.ffmpeg_binary = self.ffmpeg_path / "ffmpeg" + self.ffmpeg_probe_binary = self.ffmpeg_path / "ffprobe" # video metadata self.extracted_frame_format = extracted_frame_format - self.intermediate_file_name = pathlib.Path(self.ffmpeg_settings['intermediate_file_name']) - self.pixel_format = self.ffmpeg_settings['extract_frames']['output_options']['-pix_fmt'] + self.intermediate_file_name = pathlib.Path( + self.ffmpeg_settings["intermediate_file_name"] + ) + self.pixel_format = self.ffmpeg_settings["extract_frames"]["output_options"][ + "-pix_fmt" + ] def get_pixel_formats(self): - """ Get a dictionary of supported pixel formats + """Get a dictionary of supported pixel formats List all supported pixel formats and their corresponding bit depth. @@ -48,12 +52,7 @@ class Ffmpeg: Returns: dictionary -- JSON dict of all pixel formats to bit depth """ - execute = [ - self.ffmpeg_probe_binary, - '-v', - 'quiet', - '-pix_fmts' - ] + execute = [self.ffmpeg_probe_binary, "-v", "quiet", "-pix_fmts"] # turn elements into str execute = [str(e) for e in execute] @@ -64,9 +63,15 @@ class Ffmpeg: pixel_formats = {} # record all pixel formats into dictionary - for line in subprocess.run(execute, check=True, stdout=subprocess.PIPE).stdout.decode().split('\n'): + for line in ( + subprocess.run(execute, check=True, stdout=subprocess.PIPE) + .stdout.decode() + .split("\n") + ): try: - pixel_formats[" ".join(line.split()).split()[1]] = int(" ".join(line.split()).split()[3]) + pixel_formats[" ".join(line.split()).split()[1]] = int( + " ".join(line.split()).split()[3] + ) except (IndexError, ValueError): pass @@ -76,7 +81,7 @@ class Ffmpeg: return pixel_formats def get_number_of_frames(self, input_file: str, video_stream_index: int) -> int: - """ Count the number of frames in a video + """Count the number of frames in a video Args: input_file (str): input file path @@ -88,26 +93,30 @@ class Ffmpeg: execute = [ self.ffmpeg_probe_binary, - '-v', - 'quiet', - '-count_frames', - '-select_streams', - f'v:{video_stream_index}', - '-show_entries', - 'stream=nb_read_frames', - '-of', - 'default=nokey=1:noprint_wrappers=1', - input_file + "-v", + "quiet", + "-count_frames", + "-select_streams", + f"v:{video_stream_index}", + "-show_entries", + "stream=nb_read_frames", + "-of", + "default=nokey=1:noprint_wrappers=1", + input_file, ] # turn elements into str execute = [str(e) for e in execute] Avalon.debug_info(f'Executing: {" ".join(execute)}') - return int(subprocess.run(execute, check=True, stdout=subprocess.PIPE).stdout.decode().strip()) + return int( + subprocess.run(execute, check=True, stdout=subprocess.PIPE) + .stdout.decode() + .strip() + ) def probe_file_info(self, input_video): - """ Gets input video information + """Gets input video information This method reads input video information using ffprobe in dictionary @@ -123,14 +132,14 @@ class Ffmpeg: # since video2x only strictly recignizes this one format execute = [ self.ffmpeg_probe_binary, - '-v', - 'quiet', - '-print_format', - 'json', - '-show_format', - '-show_streams', - '-i', - input_video + "-v", + "quiet", + "-print_format", + "json", + "-show_format", + "-show_streams", + "-i", + input_video, ] # turn elements into str @@ -138,37 +147,38 @@ class Ffmpeg: Avalon.debug_info(f'Executing: {" ".join(execute)}') json_str = subprocess.run(execute, check=True, stdout=subprocess.PIPE).stdout - return json.loads(json_str.decode('utf-8')) + return json.loads(json_str.decode("utf-8")) def extract_frames(self, input_file, extracted_frames): - """ extract frames from video or GIF file - """ - execute = [ - self.ffmpeg_binary - ] + """extract frames from video or GIF file""" + execute = [self.ffmpeg_binary] # load general options - execute.extend(self._read_configuration(phase='extract_frames')) + execute.extend(self._read_configuration(phase="extract_frames")) # load input_options - execute.extend(self._read_configuration(phase='extract_frames', section='input_options')) + execute.extend( + self._read_configuration(phase="extract_frames", section="input_options") + ) # specify input file - execute.extend([ - '-i', - input_file - ]) + execute.extend(["-i", input_file]) # load output options - execute.extend(self._read_configuration(phase='extract_frames', section='output_options')) + execute.extend( + self._read_configuration(phase="extract_frames", section="output_options") + ) # specify output file - execute.extend([ - extracted_frames / f'extracted_%0d.{self.extracted_frame_format}' - # extracted_frames / f'frame_%06d.{self.extracted_frame_format}' - ]) + execute.extend( + [ + extracted_frames + / f"extracted_%0d.{self.extracted_frame_format}" + # extracted_frames / f'frame_%06d.{self.extracted_frame_format}' + ] + ) - return(self._execute(execute)) + return self._execute(execute) def assemble_video(self, framerate, upscaled_frames): """Converts images into videos @@ -182,86 +192,93 @@ class Ffmpeg: """ execute = [ self.ffmpeg_binary, - '-r', + "-r", str(framerate) # '-s', # resolution ] # read other options - execute.extend(self._read_configuration(phase='assemble_video')) + execute.extend(self._read_configuration(phase="assemble_video")) # read input options - execute.extend(self._read_configuration(phase='assemble_video', section='input_options')) + execute.extend( + self._read_configuration(phase="assemble_video", section="input_options") + ) # WORKAROUND FOR WAIFU2X-NCNN-VULKAN # Dev: SAT3LL # rename all .png.png suffixes to .png import re - regex = re.compile(r'\.png\.png$', re.IGNORECASE) + + regex = re.compile(r"\.png\.png$", re.IGNORECASE) for frame_name in upscaled_frames.iterdir(): - (upscaled_frames / frame_name).rename(upscaled_frames / regex.sub('.png', str(frame_name))) + (upscaled_frames / frame_name).rename( + upscaled_frames / regex.sub(".png", str(frame_name)) + ) # END WORKAROUND # append input frames path into command - execute.extend([ - '-i', - upscaled_frames / f'extracted_%d.{self.extracted_frame_format}' - # upscaled_frames / f'%06d.{self.extracted_frame_format}' - ]) + execute.extend( + [ + "-i", + upscaled_frames / f"extracted_%d.{self.extracted_frame_format}" + # upscaled_frames / f'%06d.{self.extracted_frame_format}' + ] + ) # read FFmpeg output options - execute.extend(self._read_configuration(phase='assemble_video', section='output_options')) + execute.extend( + self._read_configuration(phase="assemble_video", section="output_options") + ) # specify output file location - execute.extend([ - upscaled_frames / self.intermediate_file_name - ]) + execute.extend([upscaled_frames / self.intermediate_file_name]) - return(self._execute(execute)) + return self._execute(execute) def migrate_streams(self, input_video, output_video, upscaled_frames): - """ Migrates audio tracks and subtitles from input video to output video + """Migrates audio tracks and subtitles from input video to output video Arguments: input_video {string} -- input video file path output_video {string} -- output video file path upscaled_frames {string} -- directory containing upscaled frames """ - execute = [ - self.ffmpeg_binary - ] + execute = [self.ffmpeg_binary] # load general options - execute.extend(self._read_configuration(phase='migrate_streams')) + execute.extend(self._read_configuration(phase="migrate_streams")) # load input options - execute.extend(self._read_configuration(phase='migrate_streams', section='input_options')) + execute.extend( + self._read_configuration(phase="migrate_streams", section="input_options") + ) # load input file names - execute.extend([ - - # input 1: upscaled intermediate file without sound - '-i', - upscaled_frames / self.intermediate_file_name, - - # input 2: original video with streams to copy over - '-i', - input_video - ]) + execute.extend( + [ + # input 1: upscaled intermediate file without sound + "-i", + upscaled_frames / self.intermediate_file_name, + # input 2: original video with streams to copy over + "-i", + input_video, + ] + ) # load output options - execute.extend(self._read_configuration(phase='migrate_streams', section='output_options')) + execute.extend( + self._read_configuration(phase="migrate_streams", section="output_options") + ) # load output video path - execute.extend([ - output_video - ]) + execute.extend([output_video]) - return(self._execute(execute)) + return self._execute(execute) def _read_configuration(self, phase, section=None): - """ read configuration from JSON + """read configuration from JSON Read the configurations (arguments) from the JSON configuration file and append them to the end of the @@ -290,7 +307,12 @@ class Ffmpeg: value = self.ffmpeg_settings[phase][key] # null or None means that leave this option out (keep default) - if value is None or value is False or isinstance(value, dict) or value == '': + if ( + value is None + or value is False + or isinstance(value, dict) + or value == "" + ): continue # if the value is a list, append the same argument and all values diff --git a/src/wrappers/gifski.py b/src/wrappers/gifski.py index e1c9d1f..4bc2621 100755 --- a/src/wrappers/gifski.py +++ b/src/wrappers/gifski.py @@ -19,30 +19,37 @@ from avalon_framework import Avalon class Gifski: - def __init__(self, gifski_settings): self.gifski_settings = gifski_settings - def make_gif(self, upscaled_frames: pathlib.Path, output_path: pathlib.Path, framerate: float, extracted_frame_format: str, output_width: int, output_height: int) -> subprocess.Popen: + def make_gif( + self, + upscaled_frames: pathlib.Path, + output_path: pathlib.Path, + framerate: float, + extracted_frame_format: str, + output_width: int, + output_height: int, + ) -> subprocess.Popen: execute = [ - self.gifski_settings['gifski_path'], - '-o', + self.gifski_settings["gifski_path"], + "-o", output_path, - '--fps', + "--fps", int(round(framerate, 0)), - '--width', + "--width", output_width, - '--height', - output_height + "--height", + output_height, ] # load configurations from config file execute.extend(self._load_configuration()) # append frames location - execute.extend([upscaled_frames / f'extracted_*.{extracted_frame_format}']) + execute.extend([upscaled_frames / f"extracted_*.{extracted_frame_format}"]) - return(self._execute(execute)) + return self._execute(execute) def _load_configuration(self): @@ -53,13 +60,13 @@ class Gifski: value = self.gifski_settings[key] # null or None means that leave this option out (keep default) - if key == 'gifski_path' or value is None or value is False: + if key == "gifski_path" or value is None or value is False: continue else: if len(key) == 1: - configuration.append(f'-{key}') + configuration.append(f"-{key}") else: - configuration.append(f'--{key}') + configuration.append(f"--{key}") # true means key is an option if value is not True: @@ -70,6 +77,6 @@ class Gifski: # turn all list elements into string to avoid errors execute = [str(e) for e in execute] - Avalon.debug_info(f'Executing: {execute}') + Avalon.debug_info(f"Executing: {execute}") return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/realsr_ncnn_vulkan.py b/src/wrappers/realsr_ncnn_vulkan.py index 43992da..6d0e174 100755 --- a/src/wrappers/realsr_ncnn_vulkan.py +++ b/src/wrappers/realsr_ncnn_vulkan.py @@ -38,28 +38,32 @@ class WrapperMain: @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('-v', action='store_true', help='verbose output') - parser.add_argument('-i', type=str, help=argparse.SUPPRESS) # help='input image path (jpg/png) or directory') - parser.add_argument('-o', type=str, help=argparse.SUPPRESS) # help='output image path (png) or directory') - parser.add_argument('-s', type=int, help='upscale ratio') - parser.add_argument('-t', type=int, help='tile size (>=32/0=auto)') - parser.add_argument('-m', type=str, help='realsr model path') - parser.add_argument('-g', type=int, help='gpu device to use') - parser.add_argument('-j', type=str, help='thread count for load/proc/save') - parser.add_argument('-x', action='store_true', help='enable tta mode') - parser.add_argument('-f', type=str, help=argparse.SUPPRESS) # help='output image format (jpg/png/webp, default=ext/png)') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("-v", action="store_true", help="verbose output") + parser.add_argument("-i", type=str, help=argparse.SUPPRESS) # help="input image path (jpg/png) or directory") + parser.add_argument("-o", type=str, help=argparse.SUPPRESS) # help="output image path (png) or directory") + parser.add_argument("-s", type=int, help="upscale ratio") + parser.add_argument("-t", type=int, help="tile size (>=32/0=auto)") + parser.add_argument("-m", type=str, help="realsr model path") + parser.add_argument("-g", type=int, help="gpu device to use") + parser.add_argument("-j", type=str, help="thread count for load/proc/save") + parser.add_argument("-x", action="store_true", help="enable tta mode") + parser.add_argument("-f", type=str, help=argparse.SUPPRESS) # help="output image format (jpg/png/webp, default=ext/png)") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # self.driver_settings['s'] = int(upscaler.scale_ratio) - self.driver_settings['j'] = '{}:{}:{}'.format(upscaler.processes, upscaler.processes, upscaler.processes) - self.driver_settings['f'] = upscaler.extracted_frame_format.lower() + self.driver_settings["j"] = "{}:{}:{}".format( + upscaler.processes, upscaler.processes, upscaler.processes + ) + self.driver_settings["f"] = upscaler.extracted_frame_format.lower() def set_scale_ratio(self, scale_ratio: int): - self.driver_settings['s'] = int(scale_ratio) + self.driver_settings["s"] = int(scale_ratio) def upscale(self, input_directory, output_directory): """This is the core function for RealSR NCNN Vulkan class @@ -72,33 +76,33 @@ class WrapperMain: # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['i'] = input_directory - self.driver_settings['o'] = output_directory + self.driver_settings["i"] = input_directory + self.driver_settings["o"] = output_directory # by default, realsr-ncnn-vulkan will look for the models under the current working directory # change the working directory to its containing folder if model directory not specified - if self.driver_settings['m'] is None and platform.system() == 'Windows': - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + if self.driver_settings["m"] is None and platform.system() == "Windows": + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # list to be executed # initialize the list with the binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -106,6 +110,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/srmd_ncnn_vulkan.py b/src/wrappers/srmd_ncnn_vulkan.py index ed1531b..5d4c152 100755 --- a/src/wrappers/srmd_ncnn_vulkan.py +++ b/src/wrappers/srmd_ncnn_vulkan.py @@ -38,29 +38,33 @@ class WrapperMain: @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('-v', action='store_true', help='verbose output') - parser.add_argument('-i', type=str, help=argparse.SUPPRESS) # help='input image path (jpg/png) or directory') - parser.add_argument('-o', type=str, help=argparse.SUPPRESS) # help='output image path (png) or directory') - parser.add_argument('-n', type=int, choices=range(-1, 11), help='denoise level') - parser.add_argument('-s', type=int, help='upscale ratio') - parser.add_argument('-t', type=int, help='tile size (>=32)') - parser.add_argument('-m', type=str, help='srmd model path') - parser.add_argument('-g', type=int, help='gpu device to use') - parser.add_argument('-j', type=str, help='thread count for load/proc/save') - parser.add_argument('-x', action='store_true', help='enable tta mode') - parser.add_argument('-f', type=str, help=argparse.SUPPRESS) # help='output image format (jpg/png/webp, default=ext/png)') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("-v", action="store_true", help="verbose output") + parser.add_argument("-i", type=str, help=argparse.SUPPRESS) # help="input image path (jpg/png) or directory") + parser.add_argument("-o", type=str, help=argparse.SUPPRESS) # help="output image path (png) or directory") + parser.add_argument("-n", type=int, choices=range(-1, 11), help="denoise level") + parser.add_argument("-s", type=int, help="upscale ratio") + parser.add_argument("-t", type=int, help="tile size (>=32)") + parser.add_argument("-m", type=str, help="srmd model path") + parser.add_argument("-g", type=int, help="gpu device to use") + parser.add_argument("-j", type=str, help="thread count for load/proc/save") + parser.add_argument("-x", action="store_true", help="enable tta mode") + parser.add_argument("-f", type=str, help=argparse.SUPPRESS) # help="output image format (jpg/png/webp, default=ext/png)") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # self.driver_settings['s'] = int(upscaler.scale_ratio) - self.driver_settings['j'] = '{}:{}:{}'.format(upscaler.processes, upscaler.processes, upscaler.processes) - self.driver_settings['f'] = upscaler.extracted_frame_format.lower() + self.driver_settings["j"] = "{}:{}:{}".format( + upscaler.processes, upscaler.processes, upscaler.processes + ) + self.driver_settings["f"] = upscaler.extracted_frame_format.lower() def set_scale_ratio(self, scale_ratio: int): - self.driver_settings['s'] = int(scale_ratio) + self.driver_settings["s"] = int(scale_ratio) def upscale(self, input_directory, output_directory): """This is the core function for SRMD ncnn Vulkan class @@ -73,33 +77,33 @@ class WrapperMain: # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['i'] = input_directory - self.driver_settings['o'] = output_directory + self.driver_settings["i"] = input_directory + self.driver_settings["o"] = output_directory # by default, srmd-ncnn-vulkan will look for the models under the current working directory # change the working directory to its containing folder if model directory not specified - if self.driver_settings['m'] is None and platform.system() == 'Windows': - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + if self.driver_settings["m"] is None and platform.system() == "Windows": + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # list to be executed # initialize the list with the binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -107,6 +111,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/waifu2x_caffe.py b/src/wrappers/waifu2x_caffe.py index 7df3724..66e1277 100755 --- a/src/wrappers/waifu2x_caffe.py +++ b/src/wrappers/waifu2x_caffe.py @@ -37,77 +37,78 @@ class WrapperMain: @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('-t', '--tta', type=int, choices=range(2), help='8x slower and slightly high quality') - parser.add_argument('--gpu', type=int, help='gpu device no') - parser.add_argument('-b', '--batch_size', type=int, help='input batch size') - parser.add_argument('--crop_h', type=int, help='input image split size(height)') - parser.add_argument('--crop_w', type=int, help='input image split size(width)') - parser.add_argument('-c', '--crop_size', type=int, help='input image split size') - parser.add_argument('-d', '--output_depth', type=int, help='output image chaneel depth bit') - parser.add_argument('-q', '--output_quality', type=int, help='output image quality') - parser.add_argument('-p', '--process', choices=['cpu', 'gpu', 'cudnn'], help='process mode') - parser.add_argument('--model_dir', type=str, help='path to custom model directory (don\'t append last / )') - parser.add_argument('-h', '--scale_height', type=int, help='custom scale height') - parser.add_argument('-w', '--scale_width', type=int, help='custom scale width') - parser.add_argument('-s', '--scale_ratio', type=float, help='custom scale ratio') - parser.add_argument('-n', '--noise_level', type=int, choices=range(4), help='noise reduction level') - parser.add_argument('-m', '--mode', choices=['noise', 'scale', 'noise_scale', 'auto_scale'], help='image processing mode') - parser.add_argument('-e', '--output_extention', type=str, help='extention to output image file when output_path is (auto) or input_path is folder') - parser.add_argument('-l', '--input_extention_list', type=str, help='extention to input image file when input_path is folder') - parser.add_argument('-o', '--output_path', type=str, help=argparse.SUPPRESS) # help='path to output image file (when input_path is folder, output_path must be folder)') - parser.add_argument('-i', '--input_path', type=str, help=argparse.SUPPRESS) # help='(required) path to input image file') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("-t", "--tta", type=int, choices=range(2), help="8x slower and slightly high quality") + parser.add_argument("--gpu", type=int, help="gpu device no") + parser.add_argument("-b", "--batch_size", type=int, help="input batch size") + parser.add_argument("--crop_h", type=int, help="input image split size(height)") + parser.add_argument("--crop_w", type=int, help="input image split size(width)") + parser.add_argument("-c", "--crop_size", type=int, help="input image split size") + parser.add_argument("-d", "--output_depth", type=int, help="output image chaneel depth bit") + parser.add_argument("-q", "--output_quality", type=int, help="output image quality") + parser.add_argument("-p", "--process", choices=["cpu", "gpu", "cudnn"], help="process mode") + parser.add_argument("--model_dir", type=str, help="path to custom model directory (don\"t append last / )") + parser.add_argument("-h", "--scale_height", type=int, help="custom scale height") + parser.add_argument("-w", "--scale_width", type=int, help="custom scale width") + parser.add_argument("-s", "--scale_ratio", type=float, help="custom scale ratio") + parser.add_argument("-n", "--noise_level", type=int, choices=range(4), help="noise reduction level") + parser.add_argument("-m", "--mode", choices=["noise", "scale", "noise_scale", "auto_scale"], help="image processing mode") + parser.add_argument("-e", "--output_extention", type=str, help="extention to output image file when output_path is (auto) or input_path is folder") + parser.add_argument("-l", "--input_extention_list", type=str, help="extention to input image file when input_path is folder") + parser.add_argument("-o", "--output_path", type=str, help=argparse.SUPPRESS) # help="path to output image file (when input_path is folder, output_path must be folder)") + parser.add_argument("-i", "--input_path", type=str, help=argparse.SUPPRESS) # help="(required) path to input image file") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # use scale width and scale height if specified # self.driver_settings['scale_ratio'] = upscaler.scale_ratio - self.driver_settings['output_extention'] = upscaler.extracted_frame_format + self.driver_settings["output_extention"] = upscaler.extracted_frame_format # bit_depth will be 12 at this point # it will up updated later - self.driver_settings['output_depth'] = 12 + self.driver_settings["output_depth"] = 12 def set_scale_resolution(self, width: int, height: int): - self.driver_settings['scale_width'] = width - self.driver_settings['scale_height'] = height - self.driver_settings['scale_ratio'] = None + self.driver_settings["scale_width"] = width + self.driver_settings["scale_height"] = height + self.driver_settings["scale_ratio"] = None def set_scale_ratio(self, scale_ratio: float): - self.driver_settings['scale_width'] = None - self.driver_settings['scale_height'] = None - self.driver_settings['scale_ratio'] = scale_ratio + self.driver_settings["scale_width"] = None + self.driver_settings["scale_height"] = None + self.driver_settings["scale_ratio"] = scale_ratio def upscale(self, input_directory, output_directory): - """ start upscaling process - """ + """start upscaling process""" # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['input_path'] = input_directory - self.driver_settings['output_path'] = output_directory + self.driver_settings["input_path"] = input_directory + self.driver_settings["output_path"] = output_directory # list to be executed # initialize the list with waifu2x binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -115,6 +116,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/waifu2x_converter_cpp.py b/src/wrappers/waifu2x_converter_cpp.py index 361edf1..6149d49 100755 --- a/src/wrappers/waifu2x_converter_cpp.py +++ b/src/wrappers/waifu2x_converter_cpp.py @@ -37,45 +37,47 @@ class WrapperMain: @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('--list-supported-formats', action='store_true', help='dump currently supported format list') - parser.add_argument('--list-opencv-formats', action='store_true', help='(deprecated. Use --list-supported-formats) dump opencv supported format list') - parser.add_argument('-l', '--list-processor', action='store_true', help='dump processor list') - parser.add_argument('-f', '--output-format', choices=['png', 'jpg'], help='The format used when running in recursive/folder mode\nSee --list-supported-formats for a list of supported formats/extensions.') - parser.add_argument('-c', '--png-compression', type=int, choices=range(10), help='Set PNG compression level (0-9), 9 = Max compression (slowest & smallest)') - parser.add_argument('-q', '--image-quality', type=int, choices=range(-1, 102), help='JPEG & WebP Compression quality (0-101, 0 being smallest size and lowest quality), use 101 for lossless WebP') - parser.add_argument('--block-size', type=int, help='block size') - parser.add_argument('--disable-gpu', action='store_true', help='disable GPU') - parser.add_argument('--force-OpenCL', action='store_true', help='force to use OpenCL on Intel Platform') - parser.add_argument('-p', '--processor', type=int, help='set target processor') - parser.add_argument('-j', '--jobs', type=int, help='number of threads launching at the same time') - parser.add_argument('--model-dir', type=str, help='path to custom model directory (don\'t append last / )') - parser.add_argument('--scale-ratio', type=float, help='custom scale ratio') - parser.add_argument('--noise-level', type=int, choices=range(4), help='noise reduction level') - parser.add_argument('-m', '--mode', choices=['noise', 'scale', 'noise-scale'], help='image processing mode') - parser.add_argument('-v', '--log-level', type=int, choices=range(5), help='Set log level') - parser.add_argument('-s', '--silent', action='store_true', help='Enable silent mode. (same as --log-level 1)') - parser.add_argument('-t', '--tta', type=int, choices=range(2), help='Enable Test-Time Augmentation mode.') - parser.add_argument('-g', '--generate-subdir', type=int, choices=range(2), help='Generate sub folder when recursive directory is enabled.') - parser.add_argument('-a', '--auto-naming', type=int, choices=range(2), help='Add postfix to output name when output path is not specified.\nSet 0 to disable this.') - parser.add_argument('-r', '--recursive-directory', type=int, choices=range(2), help='Search recursively through directories to find more images to process.') - parser.add_argument('-o', '--output', type=str, help=argparse.SUPPRESS) # help='path to output image file or directory (you should use the full path)') - parser.add_argument('-i', '--input', type=str, help=argparse.SUPPRESS) # help='(required) path to input image file or directory (you should use the full path)') - parser.add_argument('--version', action='store_true', help='Displays version information and exits.') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("--list-supported-formats", action="store_true", help="dump currently supported format list") + parser.add_argument("--list-opencv-formats", action="store_true", help="(deprecated. Use --list-supported-formats) dump opencv supported format list") + parser.add_argument("-l", "--list-processor", action="store_true", help="dump processor list") + parser.add_argument("-f", "--output-format", choices=["png", "jpg"], help="The format used when running in recursive/folder mode\nSee --list-supported-formats for a list of supported formats/extensions.") + parser.add_argument("-c", "--png-compression", type=int, choices=range(10), help="Set PNG compression level (0-9), 9 = Max compression (slowest & smallest)") + parser.add_argument("-q", "--image-quality", type=int, choices=range(-1, 102), help="JPEG & WebP Compression quality (0-101, 0 being smallest size and lowest quality), use 101 for lossless WebP") + parser.add_argument("--block-size", type=int, help="block size") + parser.add_argument("--disable-gpu", action="store_true", help="disable GPU") + parser.add_argument("--force-OpenCL", action="store_true", help="force to use OpenCL on Intel Platform") + parser.add_argument("-p", "--processor", type=int, help="set target processor") + parser.add_argument("-j", "--jobs", type=int, help="number of threads launching at the same time") + parser.add_argument("--model-dir", type=str, help="path to custom model directory (don\"t append last / )") + parser.add_argument("--scale-ratio", type=float, help="custom scale ratio") + parser.add_argument("--noise-level", type=int, choices=range(4), help="noise reduction level") + parser.add_argument("-m", "--mode", choices=["noise", "scale", "noise-scale"], help="image processing mode") + parser.add_argument("-v", "--log-level", type=int, choices=range(5), help="Set log level") + parser.add_argument("-s", "--silent", action="store_true", help="Enable silent mode. (same as --log-level 1)") + parser.add_argument("-t", "--tta", type=int, choices=range(2), help="Enable Test-Time Augmentation mode.") + parser.add_argument("-g", "--generate-subdir", type=int, choices=range(2), help="Generate sub folder when recursive directory is enabled.") + parser.add_argument("-a", "--auto-naming", type=int, choices=range(2), help="Add postfix to output name when output path is not specified.\nSet 0 to disable this.") + parser.add_argument("-r", "--recursive-directory", type=int, choices=range(2), help="Search recursively through directories to find more images to process.") + parser.add_argument("-o", "--output", type=str, help=argparse.SUPPRESS) # help="path to output image file or directory (you should use the full path)") + parser.add_argument("-i", "--input", type=str, help=argparse.SUPPRESS) # help="(required) path to input image file or directory (you should use the full path)") + parser.add_argument("--version", action="store_true", help="Displays version information and exits.") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # self.driver_settings['scale-ratio'] = upscaler.scale_ratio - self.driver_settings['jobs'] = upscaler.processes - self.driver_settings['output-format'] = upscaler.extracted_frame_format.lower() + self.driver_settings["jobs"] = upscaler.processes + self.driver_settings["output-format"] = upscaler.extracted_frame_format.lower() def set_scale_ratio(self, scale_ratio: float): - self.driver_settings['scale-ratio'] = scale_ratio + self.driver_settings["scale-ratio"] = scale_ratio def upscale(self, input_directory, output_directory): - """ Waifu2x Converter Driver Upscaler + """Waifu2x Converter Driver Upscaler This method executes the upscaling of extracted frames. Arguments: @@ -87,33 +89,35 @@ class WrapperMain: # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['input'] = input_directory - self.driver_settings['output'] = output_directory + self.driver_settings["input"] = input_directory + self.driver_settings["output"] = output_directory # models_rgb must be specified manually for waifu2x-converter-cpp # if it's not specified in the arguments, create automatically - if self.driver_settings['model-dir'] is None: - self.driver_settings['model-dir'] = pathlib.Path(self.driver_settings['path']).parent / 'models_rgb' + if self.driver_settings["model-dir"] is None: + self.driver_settings["model-dir"] = ( + pathlib.Path(self.driver_settings["path"]).parent / "models_rgb" + ) # list to be executed # initialize the list with waifu2x binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -121,6 +125,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr) diff --git a/src/wrappers/waifu2x_ncnn_vulkan.py b/src/wrappers/waifu2x_ncnn_vulkan.py index e9b87fa..9ad9834 100755 --- a/src/wrappers/waifu2x_ncnn_vulkan.py +++ b/src/wrappers/waifu2x_ncnn_vulkan.py @@ -41,32 +41,36 @@ class WrapperMain: @staticmethod def parse_arguments(arguments): + # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message)) - parser.add_argument('--help', action='help', help='show this help message and exit') - parser.add_argument('-v', action='store_true', help='verbose output') - parser.add_argument('-i', type=str, help=argparse.SUPPRESS) # help='input image path (jpg/png/webp) or directory') - parser.add_argument('-o', type=str, help=argparse.SUPPRESS) # help='output image path (jpg/png/webp) or directory') - parser.add_argument('-n', type=int, choices=range(-1, 4), help='denoise level') - parser.add_argument('-s', type=int, help='upscale ratio') - parser.add_argument('-t', type=int, help='tile size (>=32)') - parser.add_argument('-m', type=str, help='waifu2x model path') - parser.add_argument('-g', type=int, help='gpu device to use') - parser.add_argument('-j', type=str, help='thread count for load/proc/save') - parser.add_argument('-x', action='store_true', help='enable tta mode') - parser.add_argument('-f', type=str, help=argparse.SUPPRESS) # help='output image format (jpg/png/webp, default=ext/png)') + parser.add_argument("--help", action="help", help="show this help message and exit") + parser.add_argument("-v", action="store_true", help="verbose output") + parser.add_argument("-i", type=str, help=argparse.SUPPRESS) # help="input image path (jpg/png/webp) or directory") + parser.add_argument("-o", type=str, help=argparse.SUPPRESS) # help="output image path (jpg/png/webp) or directory") + parser.add_argument("-n", type=int, choices=range(-1, 4), help="denoise level") + parser.add_argument("-s", type=int, help="upscale ratio") + parser.add_argument("-t", type=int, help="tile size (>=32)") + parser.add_argument("-m", type=str, help="waifu2x model path") + parser.add_argument("-g", type=int, help="gpu device to use") + parser.add_argument("-j", type=str, help="thread count for load/proc/save") + parser.add_argument("-x", action="store_true", help="enable tta mode") + parser.add_argument("-f", type=str, help=argparse.SUPPRESS) # help="output image format (jpg/png/webp, default=ext/png)") return parser.parse_args(arguments) + # fmt: on def load_configurations(self, upscaler): # self.driver_settings['s'] = int(upscaler.scale_ratio) - self.driver_settings['j'] = '{}:{}:{}'.format(upscaler.processes, upscaler.processes, upscaler.processes) - self.driver_settings['f'] = upscaler.extracted_frame_format.lower() + self.driver_settings["j"] = "{}:{}:{}".format( + upscaler.processes, upscaler.processes, upscaler.processes + ) + self.driver_settings["f"] = upscaler.extracted_frame_format.lower() def set_scale_ratio(self, scale_ratio: int): - self.driver_settings['s'] = int(scale_ratio) + self.driver_settings["s"] = int(scale_ratio) def upscale(self, input_directory, output_directory): - """ This is the core function for waifu2x class + """This is the core function for waifu2x class Arguments: input_directory {string} -- source directory path @@ -76,33 +80,33 @@ class WrapperMain: # change the working directory to the binary's parent directory # so the binary can find shared object files and other files - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # overwrite config file settings - self.driver_settings['i'] = input_directory - self.driver_settings['o'] = output_directory + self.driver_settings["i"] = input_directory + self.driver_settings["o"] = output_directory # by default, waifu2x-ncnn-vulkan will look for the models under the current working directory # change the working directory to its containing folder if model directory not specified - if self.driver_settings['m'] is None and platform.system() == 'Windows': - os.chdir(pathlib.Path(self.driver_settings['path']).parent) + if self.driver_settings["m"] is None and platform.system() == "Windows": + os.chdir(pathlib.Path(self.driver_settings["path"]).parent) # list to be executed # initialize the list with waifu2x binary path as the first element - execute = [self.driver_settings['path']] + execute = [self.driver_settings["path"]] for key in self.driver_settings.keys(): value = self.driver_settings[key] # null or None means that leave this option out (keep default) - if key == 'path' or value is None or value is False: + if key == "path" or value is None or value is False: continue else: if len(key) == 1: - execute.append(f'-{key}') + execute.append(f"-{key}") else: - execute.append(f'--{key}') + execute.append(f"--{key}") # true means key is an option if value is not True: @@ -110,6 +114,8 @@ class WrapperMain: # return the Popen object of the new process created self.print_lock.acquire() - Avalon.debug_info(f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}') + Avalon.debug_info( + f'[upscaler] Subprocess {os.getpid()} executing: {" ".join(execute)}' + ) self.print_lock.release() return subprocess.Popen(execute, stdout=sys.stdout, stderr=sys.stderr)