style(*): add .clang-format and format all files
Some checks failed
Build / ubuntu (push) Has been cancelled
Build / windows (push) Has been cancelled
Build / container (push) Has been cancelled

Signed-off-by: k4yt3x <i@k4yt3x.com>
This commit is contained in:
k4yt3x 2024-12-31 00:00:00 +00:00
parent c95a6a46cd
commit a9b9a71e9b
No known key found for this signature in database
28 changed files with 264 additions and 256 deletions

8
.clang-format Normal file
View File

@ -0,0 +1,8 @@
BasedOnStyle: Chromium
AlignAfterOpenBracket: BlockIndent
AllowShortEnumsOnASingleLine: false
BinPackArguments: false
BreakStringLiterals: false
ColumnLimit: 100
IndentWidth: 4
InsertBraces: true

View File

@ -7,19 +7,19 @@ extern "C" {
namespace video2x {
namespace avutils {
AVRational get_video_frame_rate(AVFormatContext *ifmt_ctx, int in_vstream_idx);
AVRational get_video_frame_rate(AVFormatContext* ifmt_ctx, int in_vstream_idx);
int64_t get_video_frame_count(AVFormatContext *ifmt_ctx, int in_vstream_idx);
int64_t get_video_frame_count(AVFormatContext* ifmt_ctx, int in_vstream_idx);
AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat target_pix_fmt);
AVPixelFormat get_encoder_default_pix_fmt(const AVCodec* encoder, AVPixelFormat target_pix_fmt);
float get_frame_diff(AVFrame *frame1, AVFrame *frame2);
float get_frame_diff(AVFrame* frame1, AVFrame* frame2);
void av_bufferref_deleter(AVBufferRef *bufferref);
void av_bufferref_deleter(AVBufferRef* bufferref);
void av_frame_deleter(AVFrame *frame);
void av_frame_deleter(AVFrame* frame);
void av_packet_deleter(AVPacket *packet);
void av_packet_deleter(AVPacket* packet);
} // namespace avutils
} // namespace video2x

View File

@ -11,13 +11,13 @@ namespace video2x {
namespace conversions {
// Convert AVFrame to another pixel format
AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt);
AVFrame* convert_avframe_pix_fmt(AVFrame* src_frame, AVPixelFormat pix_fmt);
// Convert AVFrame to ncnn::Mat
ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame);
ncnn::Mat avframe_to_ncnn_mat(AVFrame* frame);
// Convert ncnn::Mat to AVFrame
AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt);
AVFrame* ncnn_mat_to_avframe(const ncnn::Mat& mat, AVPixelFormat pix_fmt);
} // namespace conversions
} // namespace video2x

View File

@ -15,18 +15,18 @@ class Decoder {
Decoder();
~Decoder();
int init(AVHWDeviceType hw_type, AVBufferRef *hw_ctx, const std::filesystem::path &in_fpath);
int init(AVHWDeviceType hw_type, AVBufferRef* hw_ctx, const std::filesystem::path& in_fpath);
AVFormatContext *get_format_context() const;
AVCodecContext *get_codec_context() const;
AVFormatContext* get_format_context() const;
AVCodecContext* get_codec_context() const;
int get_video_stream_index() const;
private:
static AVPixelFormat hw_pix_fmt_;
static AVPixelFormat get_hw_format(AVCodecContext *ctx, const AVPixelFormat *pix_fmts);
static AVPixelFormat get_hw_format(AVCodecContext* ctx, const AVPixelFormat* pix_fmts);
AVFormatContext *fmt_ctx_;
AVCodecContext *dec_ctx_;
AVFormatContext* fmt_ctx_;
AVCodecContext* dec_ctx_;
int in_vstream_idx_;
};

View File

@ -54,30 +54,30 @@ class Encoder {
~Encoder();
int init(
AVBufferRef *hw_ctx,
const std::filesystem::path &out_fpath,
AVFormatContext *ifmt_ctx,
AVCodecContext *dec_ctx,
EncoderConfig &enc_cfg,
AVBufferRef* hw_ctx,
const std::filesystem::path& out_fpath,
AVFormatContext* ifmt_ctx,
AVCodecContext* dec_ctx,
EncoderConfig& enc_cfg,
int width,
int height,
int frm_rate_mul,
int in_vstream_idx
);
int write_frame(AVFrame *frame, int64_t frame_idx);
int write_frame(AVFrame* frame, int64_t frame_idx);
int flush();
AVCodecContext *get_encoder_context() const;
AVFormatContext *get_format_context() const;
int *get_stream_map() const;
AVCodecContext* get_encoder_context() const;
AVFormatContext* get_format_context() const;
int* get_stream_map() const;
int get_output_video_stream_index() const;
private:
AVFormatContext *ofmt_ctx_;
AVCodecContext *enc_ctx_;
AVFormatContext* ofmt_ctx_;
AVCodecContext* enc_ctx_;
int out_vstream_idx_;
int *stream_map_;
int* stream_map_;
};
} // namespace encoder

View File

@ -19,7 +19,7 @@ class FilterLibplacebo : public Filter {
// Constructor
FilterLibplacebo(
uint32_t vk_device_index,
const std::filesystem::path &shader_path,
const std::filesystem::path& shader_path,
int width,
int height
);
@ -28,30 +28,30 @@ class FilterLibplacebo : public Filter {
virtual ~FilterLibplacebo() override;
// Initializes the filter with decoder and encoder contexts
int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override;
int init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef* hw_ctx) override;
// Processes an input frame and returns the processed frame
int filter(AVFrame *in_frame, AVFrame **out_frame) override;
int filter(AVFrame* in_frame, AVFrame** out_frame) override;
// Flushes any remaining frames
int flush(std::vector<AVFrame *> &flushed_frames) override;
int flush(std::vector<AVFrame*>& flushed_frames) override;
// Returns the filter's type
ProcessorType get_processor_type() const override { return ProcessorType::Libplacebo; }
// Returns the filter's output dimensions
void get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const override;
private:
AVFilterGraph *filter_graph_;
AVFilterContext *buffersrc_ctx_;
AVFilterContext *buffersink_ctx_;
AVFilterGraph* filter_graph_;
AVFilterContext* buffersrc_ctx_;
AVFilterContext* buffersink_ctx_;
uint32_t vk_device_index_;
const std::filesystem::path shader_path_;
int width_;

View File

@ -28,25 +28,25 @@ class FilterRealcugan : public Filter {
virtual ~FilterRealcugan() override;
// Initializes the filter with decoder and encoder contexts
int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override;
int init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef* hw_ctx) override;
// Processes an input frame and returns the processed frame
int filter(AVFrame *in_frame, AVFrame **out_frame) override;
int filter(AVFrame* in_frame, AVFrame** out_frame) override;
// Returns the filter's type
ProcessorType get_processor_type() const override { return ProcessorType::RealCUGAN; }
// Returns the filter's output dimensions
void get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const override;
private:
RealCUGAN *realcugan_;
RealCUGAN* realcugan_;
int gpuid_;
bool tta_mode_;
int scaling_factor_;

View File

@ -25,25 +25,25 @@ class FilterRealesrgan : public Filter {
virtual ~FilterRealesrgan() override;
// Initializes the filter with decoder and encoder contexts
int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override;
int init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef* hw_ctx) override;
// Processes an input frame and returns the processed frame
int filter(AVFrame *in_frame, AVFrame **out_frame) override;
int filter(AVFrame* in_frame, AVFrame** out_frame) override;
// Returns the filter's type
ProcessorType get_processor_type() const override { return ProcessorType::RealESRGAN; }
// Returns the filter's output dimensions
void get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const override;
private:
RealESRGAN *realesrgan_;
RealESRGAN* realesrgan_;
int gpuid_;
bool tta_mode_;
int scaling_factor_;

View File

@ -20,15 +20,15 @@ typedef std::wstring StringType;
typedef std::string StringType;
#endif
bool filepath_is_readable(const std::filesystem::path &path);
bool filepath_is_readable(const std::filesystem::path& path);
std::filesystem::path find_resource_file(const std::filesystem::path &path);
std::filesystem::path find_resource_file(const std::filesystem::path& path);
std::string path_to_u8string(const std::filesystem::path &path);
std::string path_to_u8string(const std::filesystem::path& path);
std::string wstring_to_u8string(const fsutils::StringType &wstr);
std::string wstring_to_u8string(const fsutils::StringType& wstr);
fsutils::StringType path_to_string_type(const std::filesystem::path &path);
fsutils::StringType path_to_string_type(const std::filesystem::path& path);
fsutils::StringType to_string_type(int value);

View File

@ -27,10 +27,10 @@ class InterpolatorRIFE : public Interpolator {
virtual ~InterpolatorRIFE() override;
// Initializes the interpolator with decoder and encoder contexts
int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) override;
int init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef* hw_ctx) override;
// Processes an input frame and returns the processed frame
int interpolate(AVFrame *prev_frame, AVFrame *in_frame, AVFrame **out_frame, float time_step)
int interpolate(AVFrame* prev_frame, AVFrame* in_frame, AVFrame** out_frame, float time_step)
override;
// Returns the interpolator's type
@ -38,15 +38,15 @@ class InterpolatorRIFE : public Interpolator {
// Returns the interpolator's output dimensions
void get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const override;
private:
RIFE *rife_;
RIFE* rife_;
int gpuid_;
bool tta_mode_;
bool tta_temporal_mode_;

View File

@ -11,14 +11,14 @@ namespace video2x {
namespace processors {
int init_libplacebo(
AVFilterGraph **filter_graph,
AVFilterContext **buffersrc_ctx,
AVFilterContext **buffersink_ctx,
AVCodecContext *dec_ctx,
AVFilterGraph** filter_graph,
AVFilterContext** buffersrc_ctx,
AVFilterContext** buffersink_ctx,
AVCodecContext* dec_ctx,
int out_width,
int out_height,
uint32_t vk_device_index,
const std::filesystem::path &shader_path
const std::filesystem::path& shader_path
);
} // namespace processors

View File

@ -51,33 +51,33 @@ class LIBVIDEO2X_API VideoProcessor {
private:
[[nodiscard]] int process_frames(
decoder::Decoder &decoder,
encoder::Encoder &encoder,
std::unique_ptr<processors::Processor> &processor
decoder::Decoder& decoder,
encoder::Encoder& encoder,
std::unique_ptr<processors::Processor>& processor
);
[[nodiscard]] int write_frame(AVFrame *frame, encoder::Encoder &encoder);
[[nodiscard]] int write_frame(AVFrame* frame, encoder::Encoder& encoder);
[[nodiscard]] inline int write_raw_packet(
AVPacket *packet,
AVFormatContext *ifmt_ctx,
AVFormatContext *ofmt_ctx,
int *stream_map
AVPacket* packet,
AVFormatContext* ifmt_ctx,
AVFormatContext* ofmt_ctx,
int* stream_map
);
[[nodiscard]] inline int process_filtering(
std::unique_ptr<processors::Processor> &processor,
encoder::Encoder &encoder,
AVFrame *frame,
AVFrame *proc_frame
std::unique_ptr<processors::Processor>& processor,
encoder::Encoder& encoder,
AVFrame* frame,
AVFrame* proc_frame
);
[[nodiscard]] inline int process_interpolation(
std::unique_ptr<processors::Processor> &processor,
encoder::Encoder &encoder,
std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)> &prev_frame,
AVFrame *frame,
AVFrame *proc_frame
std::unique_ptr<processors::Processor>& processor,
encoder::Encoder& encoder,
std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)>& prev_frame,
AVFrame* frame,
AVFrame* proc_frame
);
processors::ProcessorConfig proc_cfg_;

View File

@ -14,20 +14,20 @@ namespace logger_manager {
class LIBVIDEO2X_API LoggerManager {
public:
LoggerManager(const LoggerManager &) = delete;
LoggerManager &operator=(const LoggerManager &) = delete;
LoggerManager(const LoggerManager&) = delete;
LoggerManager& operator=(const LoggerManager&) = delete;
static LoggerManager &instance();
static LoggerManager& instance();
std::shared_ptr<spdlog::logger> logger();
bool reconfigure_logger(
const std::string &logger_name,
const std::vector<spdlog::sink_ptr> &sinks,
const std::string &pattern = "%+"
const std::string& logger_name,
const std::vector<spdlog::sink_ptr>& sinks,
const std::string& pattern = "%+"
);
bool set_log_level(const std::string &level_str);
bool set_log_level(const std::string& level_str);
void hook_ffmpeg_logging();
void unhook_ffmpeg_logging();

View File

@ -66,16 +66,16 @@ struct ProcessorConfig {
class Processor {
public:
virtual ~Processor() = default;
virtual int init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *hw_ctx) = 0;
virtual int flush(std::vector<AVFrame *> &) { return 0; }
virtual int init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef* hw_ctx) = 0;
virtual int flush(std::vector<AVFrame*>&) { return 0; }
virtual ProcessingMode get_processing_mode() const = 0;
virtual ProcessorType get_processor_type() const = 0;
virtual void get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int in_width,
int in_height,
int &width,
int &height
int& width,
int& height
) const = 0;
};
@ -83,7 +83,7 @@ class Processor {
class Filter : public Processor {
public:
ProcessingMode get_processing_mode() const override { return ProcessingMode::Filter; }
virtual int filter(AVFrame *in_frame, AVFrame **out_frame) = 0;
virtual int filter(AVFrame* in_frame, AVFrame** out_frame) = 0;
};
// Abstract base class for interpolators
@ -91,7 +91,7 @@ class Interpolator : public Processor {
public:
ProcessingMode get_processing_mode() const override { return ProcessingMode::Interpolate; }
virtual int
interpolate(AVFrame *prev_frame, AVFrame *in_frame, AVFrame **out_frame, float time_step) = 0;
interpolate(AVFrame* prev_frame, AVFrame* in_frame, AVFrame** out_frame, float time_step) = 0;
};
} // namespace processors

View File

@ -12,17 +12,17 @@ namespace processors {
// Processor Factory Class
class ProcessorFactory {
public:
using Creator = std::function<std::unique_ptr<Processor>(const ProcessorConfig &, uint32_t)>;
using Creator = std::function<std::unique_ptr<Processor>(const ProcessorConfig&, uint32_t)>;
// Singleton instance accessor
static ProcessorFactory &instance();
static ProcessorFactory& instance();
// Register a processor type with its creation function
void register_processor(ProcessorType type, Creator creator);
// Create a processor instance based on configuration
std::unique_ptr<Processor>
create_processor(const ProcessorConfig &proc_cfg, uint32_t vk_device_index) const;
create_processor(const ProcessorConfig& proc_cfg, uint32_t vk_device_index) const;
private:
// Private constructor for Singleton
@ -32,7 +32,7 @@ class ProcessorFactory {
std::unordered_map<ProcessorType, Creator> creators;
// Static initializer for default processors
static void init_default_processors(ProcessorFactory &factory);
static void init_default_processors(ProcessorFactory& factory);
};
} // namespace processors

View File

@ -15,7 +15,7 @@ extern "C" {
namespace video2x {
namespace avutils {
AVRational get_video_frame_rate(AVFormatContext *ifmt_ctx, int in_vstream_idx) {
AVRational get_video_frame_rate(AVFormatContext* ifmt_ctx, int in_vstream_idx) {
AVRational frame_rate = ifmt_ctx->streams[in_vstream_idx]->avg_frame_rate;
if (frame_rate.num == 0 && frame_rate.den == 0) {
frame_rate = ifmt_ctx->streams[in_vstream_idx]->r_frame_rate;
@ -32,7 +32,7 @@ AVRational get_video_frame_rate(AVFormatContext *ifmt_ctx, int in_vstream_idx) {
return frame_rate;
}
int64_t get_video_frame_count(AVFormatContext *ifmt_ctx, int in_vstream_idx) {
int64_t get_video_frame_count(AVFormatContext* ifmt_ctx, int in_vstream_idx) {
// Use the 'nb_frames' field if it is available
int64_t nb_frames = ifmt_ctx->streams[in_vstream_idx]->nb_frames;
if (nb_frames != AV_NOPTS_VALUE && nb_frames > 0) {
@ -67,15 +67,15 @@ int64_t get_video_frame_count(AVFormatContext *ifmt_ctx, int in_vstream_idx) {
return static_cast<int64_t>(duration_secs * fps);
}
AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat target_pix_fmt) {
AVPixelFormat get_encoder_default_pix_fmt(const AVCodec* encoder, AVPixelFormat target_pix_fmt) {
int ret;
char errbuf[AV_ERROR_MAX_STRING_SIZE];
// Retrieve the list of supported pixel formats
#if LIBAVCODEC_BUILD >= AV_VERSION_INT(61, 13, 100)
const AVPixelFormat *supported_pix_fmts = nullptr;
const AVPixelFormat* supported_pix_fmts = nullptr;
ret = avcodec_get_supported_config(
nullptr, encoder, AV_CODEC_CONFIG_PIX_FORMAT, 0, (const void **)&supported_pix_fmts, nullptr
nullptr, encoder, AV_CODEC_CONFIG_PIX_FORMAT, 0, (const void**)&supported_pix_fmts, nullptr
);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
@ -93,11 +93,11 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
}
}
#else
const AVPixelFormat *supported_pix_fmts = encoder->pix_fmts;
const AVPixelFormat* supported_pix_fmts = encoder->pix_fmts;
#endif
// Determine if the target pixel format has an alpha channel
const AVPixFmtDescriptor *desc = nullptr;
const AVPixFmtDescriptor* desc = nullptr;
int has_alpha = 0;
if (target_pix_fmt != AV_PIX_FMT_NONE) {
desc = av_pix_fmt_desc_get(target_pix_fmt);
@ -106,7 +106,7 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
// Iterate over supported pixel formats to find the best match
AVPixelFormat best_pix_fmt = AV_PIX_FMT_NONE;
for (const AVPixelFormat *p = supported_pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
for (const AVPixelFormat* p = supported_pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
if (target_pix_fmt != AV_PIX_FMT_NONE) {
best_pix_fmt =
av_find_best_pix_fmt_of_2(best_pix_fmt, *p, target_pix_fmt, has_alpha, nullptr);
@ -136,7 +136,7 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
}
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
float get_frame_diff(AVFrame* frame1, AVFrame* frame2) {
if (!frame1 || !frame2) {
logger()->error("Invalid frame(s) provided for comparison");
return -1.0f;
@ -152,8 +152,8 @@ float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
// Convert both frames to the target pixel format using the provided function
AVPixelFormat target_pix_fmt = AV_PIX_FMT_RGB24;
AVFrame *rgb_frame1 = conversions::convert_avframe_pix_fmt(frame1, target_pix_fmt);
AVFrame *rgb_frame2 = conversions::convert_avframe_pix_fmt(frame2, target_pix_fmt);
AVFrame* rgb_frame1 = conversions::convert_avframe_pix_fmt(frame1, target_pix_fmt);
AVFrame* rgb_frame2 = conversions::convert_avframe_pix_fmt(frame2, target_pix_fmt);
if (!rgb_frame1 || !rgb_frame2) {
logger()->error("Failed to convert frames to target pixel format");
@ -171,8 +171,8 @@ float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
// Calculate difference pixel by pixel
for (int y = 0; y < height; y++) {
uint8_t *ptr1 = rgb_frame1->data[0] + y * rgb_frame1->linesize[0];
uint8_t *ptr2 = rgb_frame2->data[0] + y * rgb_frame2->linesize[0];
uint8_t* ptr1 = rgb_frame1->data[0] + y * rgb_frame1->linesize[0];
uint8_t* ptr2 = rgb_frame2->data[0] + y * rgb_frame2->linesize[0];
for (int x = 0; x < width * 3; x++) {
sum_diff +=
static_cast<uint64_t>(ptr1[x] > ptr2[x] ? ptr1[x] - ptr2[x] : ptr2[x] - ptr1[x]);
@ -191,14 +191,14 @@ float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
}
// Deleter for AVBufferRef unique_ptr
void av_bufferref_deleter(AVBufferRef *bufferref) {
void av_bufferref_deleter(AVBufferRef* bufferref) {
if (bufferref != nullptr) {
av_buffer_unref(&bufferref);
}
}
// Deleter for AVFrame unique_ptr
void av_frame_deleter(AVFrame *frame) {
void av_frame_deleter(AVFrame* frame) {
if (frame != nullptr) {
av_frame_free(&frame);
frame = nullptr;
@ -206,7 +206,7 @@ void av_frame_deleter(AVFrame *frame) {
}
// Deleter for AVPacket unique_ptr
void av_packet_deleter(AVPacket *packet) {
void av_packet_deleter(AVPacket* packet) {
if (packet != nullptr) {
av_packet_unref(packet);
av_packet_free(&packet);

View File

@ -12,8 +12,8 @@ namespace conversions {
// Convert AVFrame format
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
AVFrame *dst_frame = av_frame_alloc();
AVFrame* convert_avframe_pix_fmt(AVFrame* src_frame, AVPixelFormat pix_fmt) {
AVFrame* dst_frame = av_frame_alloc();
if (dst_frame == nullptr) {
logger()->error("Failed to allocate destination AVFrame.");
return nullptr;
@ -31,7 +31,7 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
}
// Create a SwsContext for pixel format conversion
SwsContext *sws_ctx = sws_getContext(
SwsContext* sws_ctx = sws_getContext(
src_frame->width,
src_frame->height,
static_cast<AVPixelFormat>(src_frame->format),
@ -69,8 +69,8 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
// Convert AVFrame to ncnn::Mat by copying the data
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
AVFrame *converted_frame = nullptr;
ncnn::Mat avframe_to_ncnn_mat(AVFrame* frame) {
AVFrame* converted_frame = nullptr;
// Convert to BGR24 format if necessary
if (frame->format != AV_PIX_FMT_BGR24) {
@ -90,10 +90,10 @@ ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
ncnn::Mat ncnn_image = ncnn::Mat(width, height, static_cast<size_t>(3), 3);
// Manually copy the pixel data from AVFrame to the new ncnn::Mat
const uint8_t *src_data = converted_frame->data[0];
const uint8_t* src_data = converted_frame->data[0];
for (int y = 0; y < height; y++) {
uint8_t *dst_row = ncnn_image.row<uint8_t>(y);
const uint8_t *src_row = src_data + y * converted_frame->linesize[0];
uint8_t* dst_row = ncnn_image.row<uint8_t>(y);
const uint8_t* src_row = src_data + y * converted_frame->linesize[0];
// Copy 3 channels (BGR) per pixel
memcpy(dst_row, src_row, static_cast<size_t>(width) * 3);
@ -109,11 +109,11 @@ ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
// Convert ncnn::Mat to AVFrame with a specified pixel format (this part is unchanged)
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
AVFrame* ncnn_mat_to_avframe(const ncnn::Mat& mat, AVPixelFormat pix_fmt) {
int ret;
// Step 1: Allocate a destination AVFrame for the specified pixel format
AVFrame *dst_frame = av_frame_alloc();
AVFrame* dst_frame = av_frame_alloc();
if (!dst_frame) {
logger()->error("Failed to allocate destination AVFrame.");
return nullptr;
@ -131,7 +131,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
}
// Step 2: Convert ncnn::Mat to BGR AVFrame
AVFrame *bgr_frame = av_frame_alloc();
AVFrame* bgr_frame = av_frame_alloc();
if (!bgr_frame) {
logger()->error("Failed to allocate intermediate BGR AVFrame.");
av_frame_free(&dst_frame);
@ -152,15 +152,15 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
// Copy the pixel data from ncnn::Mat to the BGR AVFrame
for (int y = 0; y < mat.h; y++) {
uint8_t *dst_row = bgr_frame->data[0] + y * bgr_frame->linesize[0];
const uint8_t *src_row = mat.row<const uint8_t>(y);
uint8_t* dst_row = bgr_frame->data[0] + y * bgr_frame->linesize[0];
const uint8_t* src_row = mat.row<const uint8_t>(y);
// Copy 3 channels (BGR) per pixel
memcpy(dst_row, src_row, static_cast<size_t>(mat.w) * 3);
}
// Step 3: Convert the BGR frame to the desired pixel format
SwsContext *sws_ctx = sws_getContext(
SwsContext* sws_ctx = sws_getContext(
bgr_frame->width,
bgr_frame->height,
AV_PIX_FMT_BGR24,

View File

@ -22,8 +22,8 @@ Decoder::~Decoder() {
}
}
AVPixelFormat Decoder::get_hw_format(AVCodecContext *, const AVPixelFormat *pix_fmts) {
for (const AVPixelFormat *p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
AVPixelFormat Decoder::get_hw_format(AVCodecContext*, const AVPixelFormat* pix_fmts) {
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
if (*p == hw_pix_fmt_) {
return *p;
}
@ -34,8 +34,8 @@ AVPixelFormat Decoder::get_hw_format(AVCodecContext *, const AVPixelFormat *pix_
int Decoder::init(
AVHWDeviceType hw_type,
AVBufferRef *hw_ctx,
const std::filesystem::path &in_fpath
AVBufferRef* hw_ctx,
const std::filesystem::path& in_fpath
) {
int ret;
@ -59,10 +59,10 @@ int Decoder::init(
}
int stream_index = ret;
AVStream *video_stream = fmt_ctx_->streams[stream_index];
AVStream* video_stream = fmt_ctx_->streams[stream_index];
// Find the decoder for the video stream
const AVCodec *decoder = avcodec_find_decoder(video_stream->codecpar->codec_id);
const AVCodec* decoder = avcodec_find_decoder(video_stream->codecpar->codec_id);
if (!decoder) {
logger()->error(
"Failed to find decoder for codec ID {}",
@ -96,7 +96,7 @@ int Decoder::init(
// Automatically determine the hardware pixel format
for (int i = 0;; i++) {
const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
const AVCodecHWConfig* config = avcodec_get_hw_config(decoder, i);
if (config == nullptr) {
logger()->error(
"Decoder {} does not support device type {}.",
@ -124,11 +124,11 @@ int Decoder::init(
return 0;
}
AVFormatContext *Decoder::get_format_context() const {
AVFormatContext* Decoder::get_format_context() const {
return fmt_ctx_;
}
AVCodecContext *Decoder::get_codec_context() const {
AVCodecContext* Decoder::get_codec_context() const {
return dec_ctx_;
}

View File

@ -33,11 +33,11 @@ Encoder::~Encoder() {
}
int Encoder::init(
AVBufferRef *hw_ctx,
const std::filesystem::path &out_fpath,
AVFormatContext *ifmt_ctx,
AVCodecContext *dec_ctx,
EncoderConfig &enc_cfg,
AVBufferRef* hw_ctx,
const std::filesystem::path& out_fpath,
AVFormatContext* ifmt_ctx,
AVCodecContext* dec_ctx,
EncoderConfig& enc_cfg,
int width,
int height,
int frm_rate_mul,
@ -53,7 +53,7 @@ int Encoder::init(
}
// Find the encoder
const AVCodec *encoder = avcodec_find_encoder(enc_cfg.codec);
const AVCodec* encoder = avcodec_find_encoder(enc_cfg.codec);
if (!encoder) {
logger()->error(
"Required video encoder not found for codec {}", avcodec_get_name(enc_cfg.codec)
@ -62,7 +62,7 @@ int Encoder::init(
}
// Create a new video stream in the output file
AVStream *out_vstream = avformat_new_stream(ofmt_ctx_, nullptr);
AVStream* out_vstream = avformat_new_stream(ofmt_ctx_, nullptr);
if (!out_vstream) {
logger()->error("Failed to allocate the output video stream");
return AVERROR_UNKNOWN;
@ -150,7 +150,7 @@ int Encoder::init(
}
// Set extra AVOptions
for (const auto &[opt_name, opt_value] : enc_cfg.extra_opts) {
for (const auto& [opt_name, opt_value] : enc_cfg.extra_opts) {
std::string opt_name_str = fsutils::wstring_to_u8string(opt_name);
std::string opt_value_str = fsutils::wstring_to_u8string(opt_value);
spdlog::debug("Setting encoder option '{}' to '{}'", opt_name_str, opt_value_str);
@ -193,7 +193,7 @@ int Encoder::init(
if (enc_cfg.copy_streams) {
// Allocate the stream mape frame o
stream_map_ =
reinterpret_cast<int *>(av_malloc_array(ifmt_ctx->nb_streams, sizeof(*stream_map_)));
reinterpret_cast<int*>(av_malloc_array(ifmt_ctx->nb_streams, sizeof(*stream_map_)));
if (!stream_map_) {
logger()->error("Could not allocate stream mapping");
return AVERROR(ENOMEM);
@ -201,8 +201,8 @@ int Encoder::init(
// Map each input stream to an output stream
for (int i = 0; i < static_cast<int>(ifmt_ctx->nb_streams); i++) {
AVStream *in_stream = ifmt_ctx->streams[i];
AVCodecParameters *in_codecpar = in_stream->codecpar;
AVStream* in_stream = ifmt_ctx->streams[i];
AVCodecParameters* in_codecpar = in_stream->codecpar;
// Skip the input video stream as it's already processed
if (i == in_vstream_idx) {
@ -219,7 +219,7 @@ int Encoder::init(
}
// Create corresponding output stream for audio and subtitle streams
AVStream *out_stream = avformat_new_stream(ofmt_ctx_, nullptr);
AVStream* out_stream = avformat_new_stream(ofmt_ctx_, nullptr);
if (!out_stream) {
logger()->error("Failed allocating output stream");
return AVERROR_UNKNOWN;
@ -262,8 +262,8 @@ int Encoder::init(
}
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
AVFrame *converted_frame = nullptr;
int Encoder::write_frame(AVFrame* frame, int64_t frame_idx) {
AVFrame* converted_frame = nullptr;
int ret;
// Let the encoder decide the frame type
@ -282,7 +282,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
converted_frame->pts = frame->pts;
}
AVPacket *enc_pkt = av_packet_alloc();
AVPacket* enc_pkt = av_packet_alloc();
if (!enc_pkt) {
logger()->error("Could not allocate AVPacket");
return AVERROR(ENOMEM);
@ -336,7 +336,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
[[gnu::target_clones("arch=x86-64-v4", "arch=x86-64-v3", "default")]]
int Encoder::flush() {
int ret;
AVPacket *enc_pkt = av_packet_alloc();
AVPacket* enc_pkt = av_packet_alloc();
if (!enc_pkt) {
logger()->error("Could not allocate AVPacket");
return AVERROR(ENOMEM);
@ -382,11 +382,11 @@ int Encoder::flush() {
return 0;
}
AVCodecContext *Encoder::get_encoder_context() const {
AVCodecContext* Encoder::get_encoder_context() const {
return enc_ctx_;
}
AVFormatContext *Encoder::get_format_context() const {
AVFormatContext* Encoder::get_format_context() const {
return ofmt_ctx_;
}
@ -394,7 +394,7 @@ int Encoder::get_output_video_stream_index() const {
return out_vstream_idx_;
}
int *Encoder::get_stream_map() const {
int* Encoder::get_stream_map() const {
return stream_map_;
}

View File

@ -13,7 +13,7 @@ namespace processors {
FilterLibplacebo::FilterLibplacebo(
uint32_t vk_device_index,
const std::filesystem::path &shader_path,
const std::filesystem::path& shader_path,
int width,
int height
)
@ -40,7 +40,7 @@ FilterLibplacebo::~FilterLibplacebo() {
}
}
int FilterLibplacebo::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *) {
int FilterLibplacebo::init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef*) {
// Construct the shader path
std::filesystem::path shader_full_path;
if (fsutils::filepath_is_readable(shader_path_)) {
@ -85,7 +85,7 @@ int FilterLibplacebo::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
return ret;
}
int FilterLibplacebo::filter(AVFrame *in_frame, AVFrame **out_frame) {
int FilterLibplacebo::filter(AVFrame* in_frame, AVFrame** out_frame) {
int ret;
// Get the filtered frame
@ -116,7 +116,7 @@ int FilterLibplacebo::filter(AVFrame *in_frame, AVFrame **out_frame) {
return 0;
}
int FilterLibplacebo::flush(std::vector<AVFrame *> &flushed_frames) {
int FilterLibplacebo::flush(std::vector<AVFrame*>& flushed_frames) {
int ret = av_buffersrc_add_frame(buffersrc_ctx_, nullptr);
if (ret < 0) {
logger()->error("Error while flushing filter graph");
@ -125,7 +125,7 @@ int FilterLibplacebo::flush(std::vector<AVFrame *> &flushed_frames) {
// Retrieve all remaining frames from the filter graph
while (1) {
AVFrame *filt_frame = av_frame_alloc();
AVFrame* filt_frame = av_frame_alloc();
if (filt_frame == nullptr) {
return AVERROR(ENOMEM);
}
@ -151,11 +151,11 @@ int FilterLibplacebo::flush(std::vector<AVFrame *> &flushed_frames) {
}
void FilterLibplacebo::get_output_dimensions(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
int,
int,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const {
out_width = proc_cfg.width;
out_height = proc_cfg.height;

View File

@ -38,7 +38,7 @@ FilterRealcugan::~FilterRealcugan() {
}
}
int FilterRealcugan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *) {
int FilterRealcugan::init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef*) {
// Construct the model paths using std::filesystem
std::filesystem::path model_param_path;
std::filesystem::path model_bin_path;
@ -161,7 +161,7 @@ int FilterRealcugan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBu
return 0;
}
int FilterRealcugan::filter(AVFrame *in_frame, AVFrame **out_frame) {
int FilterRealcugan::filter(AVFrame* in_frame, AVFrame** out_frame) {
int ret;
// Convert the input frame to RGB24
@ -193,11 +193,11 @@ int FilterRealcugan::filter(AVFrame *in_frame, AVFrame **out_frame) {
}
void FilterRealcugan::get_output_dimensions(
const ProcessorConfig &,
const ProcessorConfig&,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const {
out_width = in_width * scaling_factor_;
out_height = in_height * scaling_factor_;

View File

@ -32,7 +32,7 @@ FilterRealesrgan::~FilterRealesrgan() {
}
}
int FilterRealesrgan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *) {
int FilterRealesrgan::init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef*) {
// Construct the model paths using std::filesystem
std::filesystem::path model_param_path;
std::filesystem::path model_bin_path;
@ -93,7 +93,7 @@ int FilterRealesrgan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
return 0;
}
int FilterRealesrgan::filter(AVFrame *in_frame, AVFrame **out_frame) {
int FilterRealesrgan::filter(AVFrame* in_frame, AVFrame** out_frame) {
int ret;
// Convert the input frame to RGB24
@ -125,11 +125,11 @@ int FilterRealesrgan::filter(AVFrame *in_frame, AVFrame **out_frame) {
}
void FilterRealesrgan::get_output_dimensions(
const ProcessorConfig &,
const ProcessorConfig&,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const {
out_width = in_width * scaling_factor_;
out_height = in_height * scaling_factor_;

View File

@ -54,11 +54,11 @@ static std::filesystem::path get_executable_directory() {
}
#endif // _WIN32
bool filepath_is_readable(const std::filesystem::path &path) {
bool filepath_is_readable(const std::filesystem::path& path) {
#if _WIN32
FILE *fp = _wfopen(path.c_str(), L"rb");
FILE* fp = _wfopen(path.c_str(), L"rb");
#else // _WIN32
FILE *fp = fopen(path.c_str(), "rb");
FILE* fp = fopen(path.c_str(), "rb");
#endif // _WIN32
if (!fp) {
return false;
@ -68,7 +68,7 @@ bool filepath_is_readable(const std::filesystem::path &path) {
return true;
}
std::filesystem::path find_resource_file(const std::filesystem::path &path) {
std::filesystem::path find_resource_file(const std::filesystem::path& path) {
if (filepath_is_readable(path)) {
return path;
}
@ -80,7 +80,7 @@ std::filesystem::path find_resource_file(const std::filesystem::path &path) {
return get_executable_directory() / path;
}
std::string path_to_u8string(const std::filesystem::path &path) {
std::string path_to_u8string(const std::filesystem::path& path) {
#if _WIN32
std::wstring wide_path = path.wstring();
int buffer_size =
@ -99,7 +99,7 @@ std::string path_to_u8string(const std::filesystem::path &path) {
}
#ifdef _WIN32
std::string wstring_to_u8string(const std::wstring &wstr) {
std::string wstring_to_u8string(const std::wstring& wstr) {
if (wstr.empty()) {
return std::string();
}
@ -120,12 +120,12 @@ std::string wstring_to_u8string(const std::wstring &wstr) {
return converted_str;
}
#else
std::string wstring_to_u8string(const std::string &str) {
std::string wstring_to_u8string(const std::string& str) {
return str;
}
#endif
fsutils::StringType path_to_string_type(const std::filesystem::path &path) {
fsutils::StringType path_to_string_type(const std::filesystem::path& path) {
#if _WIN32
return path.wstring();
#else

View File

@ -35,7 +35,7 @@ InterpolatorRIFE::~InterpolatorRIFE() {
}
}
int InterpolatorRIFE::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVBufferRef *) {
int InterpolatorRIFE::init(AVCodecContext* dec_ctx, AVCodecContext* enc_ctx, AVBufferRef*) {
// Construct the model directory path using std::filesystem
std::filesystem::path model_param_dir;
@ -84,9 +84,9 @@ int InterpolatorRIFE::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
}
int InterpolatorRIFE::interpolate(
AVFrame *prev_frame,
AVFrame *in_frame,
AVFrame **out_frame,
AVFrame* prev_frame,
AVFrame* in_frame,
AVFrame** out_frame,
float time_step
) {
int ret;
@ -123,11 +123,11 @@ int InterpolatorRIFE::interpolate(
}
void InterpolatorRIFE::get_output_dimensions(
const ProcessorConfig &,
const ProcessorConfig&,
int in_width,
int in_height,
int &out_width,
int &out_height
int& out_width,
int& out_height
) const {
out_width = in_width;
out_height = in_height;

View File

@ -17,19 +17,19 @@ namespace video2x {
namespace processors {
int init_libplacebo(
AVFilterGraph **filter_graph,
AVFilterContext **buffersrc_ctx,
AVFilterContext **buffersink_ctx,
AVCodecContext *dec_ctx,
AVFilterGraph** filter_graph,
AVFilterContext** buffersrc_ctx,
AVFilterContext** buffersink_ctx,
AVCodecContext* dec_ctx,
int out_width,
int out_height,
uint32_t vk_device_index,
const std::filesystem::path &shader_path
const std::filesystem::path& shader_path
) {
int ret;
// Create the Vulkan hardware device context
AVBufferRef *vk_hw_device_ctx = nullptr;
AVBufferRef* vk_hw_device_ctx = nullptr;
ret = av_hwdevice_ctx_create(
&vk_hw_device_ctx, AV_HWDEVICE_TYPE_VULKAN, std::to_string(vk_device_index).c_str(), NULL, 0
);
@ -38,14 +38,14 @@ int init_libplacebo(
vk_hw_device_ctx = nullptr;
}
AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterGraph* graph = avfilter_graph_alloc();
if (!graph) {
logger()->error("Unable to create filter graph.");
return AVERROR(ENOMEM);
}
// Create buffer source
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
if (!buffersrc) {
logger()->error("Filter 'buffer' not found.");
avfilter_graph_free(&graph);
@ -65,7 +65,7 @@ int init_libplacebo(
// Make a copy of the AVClass on the stack
AVClass priv_class_copy = *buffersrc->priv_class;
AVClass *priv_class_copy_ptr = &priv_class_copy;
AVClass* priv_class_copy_ptr = &priv_class_copy;
// Check if the colorspace option is supported
if (av_opt_find(&priv_class_copy_ptr, "colorspace", NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)) {
@ -89,10 +89,10 @@ int init_libplacebo(
return ret;
}
AVFilterContext *last_filter = *buffersrc_ctx;
AVFilterContext* last_filter = *buffersrc_ctx;
// Create the libplacebo filter
const AVFilter *libplacebo_filter = avfilter_get_by_name("libplacebo");
const AVFilter* libplacebo_filter = avfilter_get_by_name("libplacebo");
if (!libplacebo_filter) {
logger()->error("Filter 'libplacebo' not found.");
avfilter_graph_free(&graph);
@ -112,7 +112,7 @@ int init_libplacebo(
":h=" + std::to_string(out_height) + ":custom_shader_path='" +
shader_path_string + "'";
AVFilterContext *libplacebo_ctx;
AVFilterContext* libplacebo_ctx;
ret = avfilter_graph_create_filter(
&libplacebo_ctx, libplacebo_filter, "libplacebo", filter_args.c_str(), NULL, graph
);
@ -139,7 +139,7 @@ int init_libplacebo(
last_filter = libplacebo_ctx;
// Create buffer sink
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, graph);
if (ret < 0) {
logger()->error("Cannot create buffer sink.");

View File

@ -36,7 +36,7 @@ int VideoProcessor::process(
int ret = 0;
// Helper lambda to handle errors:
auto handle_error = [&](int error_code, const std::string &msg) {
auto handle_error = [&](int error_code, const std::string& msg) {
// Format and log the error message
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(error_code, errbuf, sizeof(errbuf));
@ -57,7 +57,7 @@ int VideoProcessor::process(
// Initialize hardware device context
if (hw_device_type_ != AV_HWDEVICE_TYPE_NONE) {
AVBufferRef *tmp_hw_ctx = nullptr;
AVBufferRef* tmp_hw_ctx = nullptr;
ret = av_hwdevice_ctx_create(&tmp_hw_ctx, hw_device_type_, NULL, NULL, 0);
if (ret < 0) {
return handle_error(ret, "Error initializing hardware device context");
@ -72,8 +72,8 @@ int VideoProcessor::process(
return handle_error(ret, "Failed to initialize decoder");
}
AVFormatContext *ifmt_ctx = decoder.get_format_context();
AVCodecContext *dec_ctx = decoder.get_codec_context();
AVFormatContext* ifmt_ctx = decoder.get_format_context();
AVCodecContext* dec_ctx = decoder.get_codec_context();
int in_vstream_idx = decoder.get_video_stream_index();
// Create and initialize the appropriate filter
@ -140,19 +140,19 @@ int VideoProcessor::process(
// Process frames using the selected filter.
int VideoProcessor::process_frames(
decoder::Decoder &decoder,
encoder::Encoder &encoder,
std::unique_ptr<processors::Processor> &processor
decoder::Decoder& decoder,
encoder::Encoder& encoder,
std::unique_ptr<processors::Processor>& processor
) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
int ret = 0;
// Get required objects
AVFormatContext *ifmt_ctx = decoder.get_format_context();
AVCodecContext *dec_ctx = decoder.get_codec_context();
AVFormatContext* ifmt_ctx = decoder.get_format_context();
AVCodecContext* dec_ctx = decoder.get_codec_context();
int in_vstream_idx = decoder.get_video_stream_index();
AVFormatContext *ofmt_ctx = encoder.get_format_context();
int *stream_map = encoder.get_stream_map();
AVFormatContext* ofmt_ctx = encoder.get_format_context();
int* stream_map = encoder.get_stream_map();
// Reference to the previous frame does not require allocation
// It will be cloned from the current frame
@ -236,7 +236,7 @@ int VideoProcessor::process_frames(
}
// Process the frame based on the selected processing mode
AVFrame *proc_frame = nullptr;
AVFrame* proc_frame = nullptr;
switch (processor->get_processing_mode()) {
case processors::ProcessingMode::Filter: {
ret = process_filtering(processor, encoder, frame.get(), proc_frame);
@ -269,7 +269,7 @@ int VideoProcessor::process_frames(
}
// Flush the filter
std::vector<AVFrame *> raw_flushed_frames;
std::vector<AVFrame*> raw_flushed_frames;
ret = processor->flush(raw_flushed_frames);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
@ -279,12 +279,12 @@ int VideoProcessor::process_frames(
// Wrap flushed frames in unique_ptrs
std::vector<std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)>> flushed_frames;
for (AVFrame *raw_frame : raw_flushed_frames) {
for (AVFrame* raw_frame : raw_flushed_frames) {
flushed_frames.emplace_back(raw_frame, &avutils::av_frame_deleter);
}
// Encode and write all flushed frames
for (auto &flushed_frame : flushed_frames) {
for (auto& flushed_frame : flushed_frames) {
ret = write_frame(flushed_frame.get(), encoder);
if (ret < 0) {
return ret;
@ -303,7 +303,7 @@ int VideoProcessor::process_frames(
return ret;
}
int VideoProcessor::write_frame(AVFrame *frame, encoder::Encoder &encoder) {
int VideoProcessor::write_frame(AVFrame* frame, encoder::Encoder& encoder) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
int ret = 0;
@ -318,17 +318,17 @@ int VideoProcessor::write_frame(AVFrame *frame, encoder::Encoder &encoder) {
}
int VideoProcessor::write_raw_packet(
AVPacket *packet,
AVFormatContext *ifmt_ctx,
AVFormatContext *ofmt_ctx,
int *stream_map
AVPacket* packet,
AVFormatContext* ifmt_ctx,
AVFormatContext* ofmt_ctx,
int* stream_map
) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
int ret = 0;
AVStream *in_stream = ifmt_ctx->streams[packet->stream_index];
AVStream* in_stream = ifmt_ctx->streams[packet->stream_index];
int out_stream_idx = stream_map[packet->stream_index];
AVStream *out_stream = ofmt_ctx->streams[out_stream_idx];
AVStream* out_stream = ofmt_ctx->streams[out_stream_idx];
av_packet_rescale_ts(packet, in_stream->time_base, out_stream->time_base);
packet->stream_index = out_stream_idx;
@ -342,16 +342,16 @@ int VideoProcessor::write_raw_packet(
}
int VideoProcessor::process_filtering(
std::unique_ptr<processors::Processor> &processor,
encoder::Encoder &encoder,
AVFrame *frame,
AVFrame *proc_frame
std::unique_ptr<processors::Processor>& processor,
encoder::Encoder& encoder,
AVFrame* frame,
AVFrame* proc_frame
) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
int ret = 0;
// Cast the processor to a Filter
processors::Filter *filter = static_cast<processors::Filter *>(processor.get());
processors::Filter* filter = static_cast<processors::Filter*>(processor.get());
// Process the frame using the filter
ret = filter->filter(frame, &proc_frame);
@ -370,18 +370,18 @@ int VideoProcessor::process_filtering(
}
int VideoProcessor::process_interpolation(
std::unique_ptr<processors::Processor> &processor,
encoder::Encoder &encoder,
std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)> &prev_frame,
AVFrame *frame,
AVFrame *proc_frame
std::unique_ptr<processors::Processor>& processor,
encoder::Encoder& encoder,
std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)>& prev_frame,
AVFrame* frame,
AVFrame* proc_frame
) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
int ret = 0;
// Cast the processor to an Interpolator
processors::Interpolator *interpolator =
static_cast<processors::Interpolator *>(processor.get());
processors::Interpolator* interpolator =
static_cast<processors::Interpolator*>(processor.get());
// Calculate the time step for each frame
float time_step = 1.0f / static_cast<float>(proc_cfg_.frm_rate_mul);

View File

@ -26,7 +26,7 @@ static spdlog::level::level_enum ffmpeg_level_to_spdlog(int av_level) {
}
}
static void ffmpeg_log_callback(void *avcl, int level, const char *fmt, va_list vargs) {
static void ffmpeg_log_callback(void* avcl, int level, const char* fmt, va_list vargs) {
// Format the message the same way as the default callback
char line[1024];
int print_prefix = 1;
@ -53,7 +53,7 @@ LoggerManager::LoggerManager() {
spdlog::register_logger(logger_);
}
LoggerManager &LoggerManager::instance() {
LoggerManager& LoggerManager::instance() {
static LoggerManager instance;
return instance;
}
@ -63,9 +63,9 @@ std::shared_ptr<spdlog::logger> LoggerManager::logger() {
}
bool LoggerManager::reconfigure_logger(
const std::string &logger_name,
const std::vector<spdlog::sink_ptr> &sinks,
const std::string &pattern
const std::string& logger_name,
const std::vector<spdlog::sink_ptr>& sinks,
const std::string& pattern
) {
if (logger_name.empty() || sinks.empty()) {
return false;
@ -94,7 +94,7 @@ bool LoggerManager::reconfigure_logger(
return true;
}
bool LoggerManager::set_log_level(const std::string &level_str) {
bool LoggerManager::set_log_level(const std::string& level_str) {
spdlog::level::level_enum log_level = spdlog::level::from_str(level_str);
if (log_level == spdlog::level::off && level_str != "off") {
// Invalid level_str

View File

@ -13,7 +13,7 @@ namespace video2x {
namespace processors {
// Access the singleton instance
ProcessorFactory &ProcessorFactory::instance() {
ProcessorFactory& ProcessorFactory::instance() {
static ProcessorFactory factory;
// Ensure default processors are registered only once
@ -33,7 +33,7 @@ void ProcessorFactory::register_processor(ProcessorType type, Creator creator) {
// Create a processor instance
std::unique_ptr<Processor> ProcessorFactory::create_processor(
const ProcessorConfig &proc_cfg,
const ProcessorConfig& proc_cfg,
uint32_t vk_device_index
) const {
auto it = creators.find(proc_cfg.processor_type);
@ -49,12 +49,12 @@ std::unique_ptr<Processor> ProcessorFactory::create_processor(
}
// Initialize default processors
void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
void ProcessorFactory::init_default_processors(ProcessorFactory& factory) {
factory.register_processor(
ProcessorType::Libplacebo,
[](const ProcessorConfig &proc_cfg,
[](const ProcessorConfig& proc_cfg,
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
const auto &config = std::get<LibplaceboConfig>(proc_cfg.config);
const auto& config = std::get<LibplaceboConfig>(proc_cfg.config);
if (config.shader_path.empty()) {
logger()->critical("Shader path must be provided for the libplacebo filter");
return nullptr;
@ -76,9 +76,9 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
factory.register_processor(
ProcessorType::RealESRGAN,
[](const ProcessorConfig &proc_cfg,
[](const ProcessorConfig& proc_cfg,
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
const auto &config = std::get<RealESRGANConfig>(proc_cfg.config);
const auto& config = std::get<RealESRGANConfig>(proc_cfg.config);
if (proc_cfg.scaling_factor <= 0) {
logger()->critical("Scaling factor must be provided for the RealESRGAN filter");
return nullptr;
@ -98,9 +98,9 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
factory.register_processor(
ProcessorType::RealCUGAN,
[](const ProcessorConfig &proc_cfg,
[](const ProcessorConfig& proc_cfg,
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
const auto &config = std::get<RealCUGANConfig>(proc_cfg.config);
const auto& config = std::get<RealCUGANConfig>(proc_cfg.config);
if (proc_cfg.scaling_factor <= 0) {
logger()->critical("Scaling factor must be provided for the RealCUGAN filter");
return nullptr;
@ -123,9 +123,9 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
factory.register_processor(
ProcessorType::RIFE,
[](const ProcessorConfig &proc_cfg,
[](const ProcessorConfig& proc_cfg,
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
const auto &cfg = std::get<RIFEConfig>(proc_cfg.config);
const auto& cfg = std::get<RIFEConfig>(proc_cfg.config);
if (cfg.model_name.empty()) {
logger()->critical("Model name must be provided for the RIFE filter");
return nullptr;