diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 5bdea70..5f33add 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,12 +1,2 @@ -# These are supported funding model platforms - github: k4yt3x patreon: k4yt3x -open_collective: # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username -tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel -community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry -liberapay: # Replace with a single Liberapay username -issuehunt: # Replace with a single IssueHunt username -otechie: # Replace with a single Otechie username -custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/CMakeLists.txt b/CMakeLists.txt index 55771a9..86af44d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,13 @@ if(CMAKE_BUILD_TYPE STREQUAL "Release") endif() endif() +# Set global compile options for all targets +if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_compile_options(/W4 /permissive-) +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic -Wconversion -Wshadow) +endif() + # Build options option(BUILD_SHARED_LIBS "Build libvideo2x as a shared library" ON) option(BUILD_VIDEO2X_CLI "Build the video2x executable" ON) @@ -307,7 +314,6 @@ target_include_directories(libvideo2x PRIVATE # Compile options for the shared library target_compile_options(libvideo2x PRIVATE - -Wall -fPIC $<$:-Ofast> $<$:-g -DDEBUG> @@ -344,10 +350,7 @@ if (BUILD_VIDEO2X_CLI) ) # Compile options for the executable - target_compile_options(video2x PRIVATE - -Wall - $<$:-g -DDEBUG> - ) + target_compile_options(video2x PRIVATE $<$:-g -DDEBUG>) # Link the executable with the shared library target_link_libraries(video2x PRIVATE ${ALL_LIBRARIES} libvideo2x) diff --git a/include/libvideo2x/libplacebo_filter.h b/include/libvideo2x/libplacebo_filter.h index fd62932..f03a8ef 100644 --- a/include/libvideo2x/libplacebo_filter.h +++ b/include/libvideo2x/libplacebo_filter.h @@ -28,7 +28,7 @@ class LibplaceboFilter : public Filter { LibplaceboFilter(int width, int height, const std::filesystem::path &shader_path); // Destructor - virtual ~LibplaceboFilter(); + virtual ~LibplaceboFilter() override; // Initializes the filter with decoder and encoder contexts int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override; diff --git a/include/libvideo2x/realesrgan_filter.h b/include/libvideo2x/realesrgan_filter.h index 1415b94..e046c1f 100644 --- a/include/libvideo2x/realesrgan_filter.h +++ b/include/libvideo2x/realesrgan_filter.h @@ -36,7 +36,7 @@ class RealesrganFilter : public Filter { ); // Destructor - virtual ~RealesrganFilter(); + virtual ~RealesrganFilter() override; // Initializes the filter with decoder and encoder contexts int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override; diff --git a/src/conversions.cpp b/src/conversions.cpp index 147e9dc..9d6ae3d 100644 --- a/src/conversions.cpp +++ b/src/conversions.cpp @@ -1,5 +1,6 @@ #include "conversions.h" +#include #include #include @@ -79,14 +80,16 @@ ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) { // Allocate a new ncnn::Mat and copy the data int width = converted_frame->width; int height = converted_frame->height; - ncnn::Mat ncnn_image = ncnn::Mat(width, height, (size_t)3, 3); // BGR has 3 channels + ncnn::Mat ncnn_image = ncnn::Mat(width, height, 3, 3); // BGR has 3 channels // Manually copy the pixel data from AVFrame to the new ncnn::Mat const uint8_t *src_data = converted_frame->data[0]; for (int y = 0; y < height; y++) { uint8_t *dst_row = ncnn_image.row(y); const uint8_t *src_row = src_data + y * converted_frame->linesize[0]; - memcpy(dst_row, src_row, width * 3); // Copy 3 channels (BGR) per pixel + + // Copy 3 channels (BGR) per pixel + memcpy(dst_row, src_row, static_cast(width) * 3); } // If we allocated a converted frame, free it @@ -143,7 +146,9 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) { for (int y = 0; y < mat.h; y++) { uint8_t *dst_row = bgr_frame->data[0] + y * bgr_frame->linesize[0]; const uint8_t *src_row = mat.row(y); - memcpy(dst_row, src_row, mat.w * 3); // Copy 3 channels (BGR) per pixel + + // Copy 3 channels (BGR) per pixel + memcpy(dst_row, src_row, static_cast(mat.w) * 3); } // Step 3: Convert the BGR frame to the desired pixel format diff --git a/src/decoder.cpp b/src/decoder.cpp index 27f20a3..48ae246 100644 --- a/src/decoder.cpp +++ b/src/decoder.cpp @@ -9,7 +9,7 @@ static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE; // Callback function to choose the hardware-accelerated pixel format -static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { +static enum AVPixelFormat get_hw_format(AVCodecContext *_, const enum AVPixelFormat *pix_fmts) { for (const enum AVPixelFormat *p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) { if (*p == hw_pix_fmt) { return *p; @@ -55,7 +55,8 @@ int init_decoder( const AVCodec *decoder = avcodec_find_decoder(video_stream->codecpar->codec_id); if (!decoder) { spdlog::error( - "Failed to find decoder for codec ID {}", (int)video_stream->codecpar->codec_id + "Failed to find decoder for codec ID {}", + static_cast(video_stream->codecpar->codec_id) ); return AVERROR_DECODER_NOT_FOUND; } diff --git a/src/encoder.cpp b/src/encoder.cpp index 0486e75..29fdeb8 100644 --- a/src/encoder.cpp +++ b/src/encoder.cpp @@ -93,7 +93,7 @@ int init_encoder( // Set the CRF and preset for any codecs that support it char crf_str[16]; - snprintf(crf_str, sizeof(crf_str), "%.f", encoder_config->crf); + snprintf(crf_str, sizeof(crf_str), "%.f", static_cast(encoder_config->crf)); av_opt_set(codec_ctx->priv_data, "crf", crf_str, 0); av_opt_set(codec_ctx->priv_data, "preset", encoder_config->preset, 0); @@ -116,7 +116,8 @@ int init_encoder( if (encoder_config->copy_streams) { // Allocate the stream map - *stream_map = (int *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(**stream_map)); + *stream_map = + reinterpret_cast(av_malloc_array(ifmt_ctx->nb_streams, sizeof(**stream_map))); if (!*stream_map) { spdlog::error("Could not allocate stream mapping"); return AVERROR(ENOMEM); @@ -126,7 +127,7 @@ int init_encoder( (*stream_map)[vstream_idx] = stream_index++; // Loop through each stream in the input file - for (int i = 0; i < ifmt_ctx->nb_streams; i++) { + for (int i = 0; i < static_cast(ifmt_ctx->nb_streams); i++) { AVStream *in_stream = ifmt_ctx->streams[i]; AVCodecParameters *in_codecpar = in_stream->codecpar; @@ -142,21 +143,21 @@ int init_encoder( } // Create corresponding output stream - AVStream *out_stream = avformat_new_stream(fmt_ctx, NULL); - if (!out_stream) { + AVStream *out_copied_stream = avformat_new_stream(fmt_ctx, NULL); + if (!out_copied_stream) { spdlog::error("Failed allocating output stream"); return AVERROR_UNKNOWN; } - ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar); + ret = avcodec_parameters_copy(out_copied_stream->codecpar, in_codecpar); if (ret < 0) { spdlog::error("Failed to copy codec parameters"); return ret; } - out_stream->codecpar->codec_tag = 0; + out_copied_stream->codecpar->codec_tag = 0; // Copy time base - out_stream->time_base = in_stream->time_base; + out_copied_stream->time_base = in_stream->time_base; (*stream_map)[i] = stream_index++; } diff --git a/src/fsutils.cpp b/src/fsutils.cpp index a0c7447..6eec2d4 100644 --- a/src/fsutils.cpp +++ b/src/fsutils.cpp @@ -11,7 +11,7 @@ #include #if _WIN32 -std::filesystem::path get_executable_directory() { +static std::filesystem::path get_executable_directory() { std::vector filepath(MAX_PATH); // Get the executable path, expanding the buffer if necessary @@ -36,7 +36,7 @@ std::filesystem::path get_executable_directory() { return execpath.parent_path(); } #else // _WIN32 -std::filesystem::path get_executable_directory() { +static std::filesystem::path get_executable_directory() { std::error_code ec; std::filesystem::path filepath = std::filesystem::read_symlink("/proc/self/exe", ec); diff --git a/src/libvideo2x.cpp b/src/libvideo2x.cpp index 3034eda..7026e6f 100644 --- a/src/libvideo2x.cpp +++ b/src/libvideo2x.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -30,7 +29,7 @@ * @param[in] benchmark Flag to enable benchmarking mode * @return int 0 on success, negative value on error */ -int process_frames( +static int process_frames( EncoderConfig *encoder_config, VideoProcessingContext *proc_ctx, AVFormatContext *ifmt_ctx, @@ -54,7 +53,7 @@ int process_frames( spdlog::error("Failed to open video file with OpenCV"); return -1; } - proc_ctx->total_frames = cap.get(cv::CAP_PROP_FRAME_COUNT); + proc_ctx->total_frames = static_cast(cap.get(cv::CAP_PROP_FRAME_COUNT)); cap.release(); // Check if the total number of frames is still 0 @@ -208,7 +207,7 @@ end: } // Cleanup resources after processing the video -void cleanup( +static void cleanup( AVFormatContext *ifmt_ctx, AVFormatContext *ofmt_ctx, AVCodecContext *dec_ctx, @@ -343,6 +342,11 @@ extern "C" int process_video( // Calculate the output dimensions based on the scaling factor output_width = dec_ctx->width * filter_config->config.realesrgan.scaling_factor; output_height = dec_ctx->height * filter_config->config.realesrgan.scaling_factor; + break; + default: + spdlog::error("Unknown filter type"); + cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_map, filter); + return -1; } spdlog::info("Output video dimensions: {}x{}", output_width, output_height); diff --git a/src/realesrgan_filter.cpp b/src/realesrgan_filter.cpp index db83b15..fbe35ce 100644 --- a/src/realesrgan_filter.cpp +++ b/src/realesrgan_filter.cpp @@ -32,7 +32,7 @@ RealesrganFilter::~RealesrganFilter() { } } -int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) { +int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *_) { // Construct the model paths using std::filesystem std::filesystem::path model_param_path; std::filesystem::path model_bin_path; @@ -113,7 +113,7 @@ int RealesrganFilter::process_frame(AVFrame *in_frame, AVFrame **out_frame) { // Allocate space for ouptut ncnn::Mat int output_width = in_mat.w * realesrgan->scale; int output_height = in_mat.h * realesrgan->scale; - ncnn::Mat out_mat = ncnn::Mat(output_width, output_height, (size_t)3, 3); + ncnn::Mat out_mat = ncnn::Mat(output_width, output_height, 3, 3); ret = realesrgan->process(in_mat, out_mat); if (ret != 0) { diff --git a/src/video2x.c b/src/video2x.c index 184429f..d27771c 100644 --- a/src/video2x.c +++ b/src/video2x.c @@ -134,7 +134,9 @@ int is_valid_realesrgan_model(const char *model) { if (!model) { return 0; } - for (int i = 0; i < sizeof(valid_realesrgan_models) / sizeof(valid_realesrgan_models[0]); i++) { + for (unsigned long i = 0; + i < sizeof(valid_realesrgan_models) / sizeof(valid_realesrgan_models[0]); + i++) { if (strcmp(model, valid_realesrgan_models[i]) == 0) { return 1; } @@ -142,7 +144,7 @@ int is_valid_realesrgan_model(const char *model) { return 0; } -void print_help() { +void print_help(void) { printf("Usage: video2x [OPTIONS]\n"); printf("\nOptions:\n"); printf( @@ -245,7 +247,7 @@ void parse_arguments(int argc, char **argv, struct arguments *arguments) { } break; case 'q': - arguments->crf = atof(optarg); + arguments->crf = (float)atof(optarg); if (arguments->crf < 0.0 || arguments->crf > 51.0) { fprintf(stderr, "Error: CRF must be between 0 and 51.\n"); exit(1);