mirror of
https://github.com/k4yt3x/video2x.git
synced 2024-12-29 16:09:10 +00:00
Update waifu2x_ncnn_vulkan arguments to the latest version!
This commit is contained in:
parent
c2260ca709
commit
685648957f
@ -55,15 +55,16 @@ waifu2x_converter_cpp:
|
|||||||
waifu2x_ncnn_vulkan:
|
waifu2x_ncnn_vulkan:
|
||||||
path: '%LOCALAPPDATA%\video2x\waifu2x-ncnn-vulkan\waifu2x-ncnn-vulkan'
|
path: '%LOCALAPPDATA%\video2x\waifu2x-ncnn-vulkan\waifu2x-ncnn-vulkan'
|
||||||
v: null # verbose output
|
v: null # verbose output
|
||||||
#i: null # input-path: input image path (jpg/png) or directory
|
#i: null # input-path: input image path (jpg/png/webp) or directory
|
||||||
#o: null # output-path: output image path (png) or directory
|
#o: null # output-path: output image path (jpg/png/webp) or directory
|
||||||
'n': 2 # noise-level: denoise level (-1/0/1/2/3, default=0)
|
'n': 2 # noise-level: denoise level (-1/0/1/2/3, default=0)
|
||||||
s: 2 # scale: upscale ratio (1/2, default=2)
|
s: 2 # scale: upscale ratio (1/2, default=2)
|
||||||
t: 400 # tile-size: tile size (>=32, default=400)
|
t: 400 # tile-size: tile size (>=32, default=400)
|
||||||
m: null # model-path: waifu2x model path (default=models-cunet)
|
m: null # model-path: waifu2x model path (default=models-cunet)
|
||||||
g: 0 # gpu-id: gpu device to use (default=0)
|
g: 0 # gpu-id: gpu device to use (default=0)
|
||||||
j: '1:2:2' # thread count for load/proc/save (default=1:2:2)
|
j: '1:2:2' # thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu
|
||||||
x: false # enable tta mode
|
x: false # enable tta mode
|
||||||
|
f: png # output image format (jpg/png/webp, default=ext/png)
|
||||||
srmd_ncnn_vulkan:
|
srmd_ncnn_vulkan:
|
||||||
path: '%LOCALAPPDATA%\video2x\srmd-ncnn-vulkan\srmd-ncnn-vulkan'
|
path: '%LOCALAPPDATA%\video2x\srmd-ncnn-vulkan\srmd-ncnn-vulkan'
|
||||||
v: null # verbose output
|
v: null # verbose output
|
||||||
|
@ -44,8 +44,8 @@ class WrapperMain:
|
|||||||
parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message))
|
parser.error = lambda message: (_ for _ in ()).throw(AttributeError(message))
|
||||||
parser.add_argument('--help', action='help', help='show this help message and exit')
|
parser.add_argument('--help', action='help', help='show this help message and exit')
|
||||||
parser.add_argument('-v', action='store_true', help='verbose output')
|
parser.add_argument('-v', action='store_true', help='verbose output')
|
||||||
parser.add_argument('-i', type=str, help=argparse.SUPPRESS) # help='input image path (jpg/png) or directory')
|
parser.add_argument('-i', type=str, help=argparse.SUPPRESS) # help='input image path (jpg/png/webp) or directory')
|
||||||
parser.add_argument('-o', type=str, help=argparse.SUPPRESS) # help='output image path (png) or directory')
|
parser.add_argument('-o', type=str, help=argparse.SUPPRESS) # help='output image path (jpg/png/webp) or directory')
|
||||||
parser.add_argument('-n', type=int, choices=range(-1, 4), help='denoise level')
|
parser.add_argument('-n', type=int, choices=range(-1, 4), help='denoise level')
|
||||||
parser.add_argument('-s', type=int, help='upscale ratio')
|
parser.add_argument('-s', type=int, help='upscale ratio')
|
||||||
parser.add_argument('-t', type=int, help='tile size (>=32)')
|
parser.add_argument('-t', type=int, help='tile size (>=32)')
|
||||||
@ -53,6 +53,7 @@ class WrapperMain:
|
|||||||
parser.add_argument('-g', type=int, help='gpu device to use')
|
parser.add_argument('-g', type=int, help='gpu device to use')
|
||||||
parser.add_argument('-j', type=str, help='thread count for load/proc/save')
|
parser.add_argument('-j', type=str, help='thread count for load/proc/save')
|
||||||
parser.add_argument('-x', action='store_true', help='enable tta mode')
|
parser.add_argument('-x', action='store_true', help='enable tta mode')
|
||||||
|
parser.add_argument('-f', type=str, help='output image format (jpg/png/webp, default=ext/png)')
|
||||||
return parser.parse_args(arguments)
|
return parser.parse_args(arguments)
|
||||||
|
|
||||||
def load_configurations(self, upscaler):
|
def load_configurations(self, upscaler):
|
||||||
|
Loading…
Reference in New Issue
Block a user