diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..b3b8792 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,51 @@ +cmake_minimum_required(VERSION 3.18) +set(PROJECT_NAME TestDecode) +project(${PROJECT_NAME}) +message(STATUS "project name : ${PROJECT_NAME}") + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +#set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) + +# 设置项目生成目录 +set(EXECUTABLE_OUTPUT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/app) + +set(OpenCV_DIR "/usr/local/opencv4.9") +find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR}) +message(STATUS ${OpenCV_VERSION}) + +include_directories(${OpenCV_DIR}) +message(STATUS ${OpenCV_INCLUDE_DIRS}) + +set(RTSP_DECODER_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/RTSPDocker) + +# 添加子项目 +add_subdirectory(RTSPDecoder) + +# 设置库搜索路径 +set(LIB_DIR "${CMAKE_CURRENT_SOURCE_DIR}/app/lib") +#message(STATUS ${LIB_DIR}) +#find_library(RTSPDecoder_LIB RTSPDecoder +# PATHS ${LIB_DIR} +# NO_DEFAULT_PATH) + +add_executable(${PROJECT_NAME} main.cpp) + + + +#message(STATUS ${RTSPDecoder_LIB}) + +# 链接动态库 +target_link_libraries(${PROJECT_NAME} + PRIVATE +# ${RTSPDecoder_LIB} + RTSPDecoder + ${OpenCV_LIBS} +) + +# 包含子项目头文件 +target_include_directories(${PROJECT_NAME} + PRIVATE + ${RTSP_DECODER_ROOT} +) diff --git a/README.md b/README.md index c331ec8..febb0d0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,30 @@ # RtspDecoderByFFmpeg + 基于FFmpeg制作的多路视频RTSP拉流解码库 + +## 解码工具库 +### 目录结构 +- app 编译结果 (动态库+调用demo) + - lib 编译出来的动态库 +- cmake +- RTSPDecoder 解码库源码 + +### 编译 +#### 依赖库版本 +- cmake 3.10+ +- cuda 12.1 +- cudnn +- FFmpeg 4.4.5 +- Opencv 4.9 +### 编译方式 +``` +mkdir build && cd build + +cmake .. + +make -j10 + +# 可选 +make install +``` + -# 基于FFmpeg制作的多路视频RTSP拉流解码库 \ No newline at end of file diff --git a/RTSPDecoder/CMakeLists.txt b/RTSPDecoder/CMakeLists.txt new file mode 100644 index 0000000..c7f6f5b --- /dev/null +++ b/RTSPDecoder/CMakeLists.txt @@ -0,0 +1,60 @@ +cmake_minimum_required(VERSION 3.18) +set(PROJECT_NAME RTSPDecoder) +project(${PROJECT_NAME}) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +if(POLICY CMP0146) + cmake_policy(SET CMP0146 OLD) +endif() + +# 设置项目生成目录 +set(EXECUTABLE_OUTPUT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/app/lib) + +set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) + +# 如果使用CUDA +option(USE_CUDA "Enable CUDA support" ON) +if(USE_CUDA) + find_package(CUDA REQUIRED) +endif() + +# 查找依赖库 +find_package(OpenCV REQUIRED) +find_package(FFmpeg REQUIRED) + + +# 添加动态库 +add_library(${PROJECT_NAME} SHARED + RTSPDecoder.cpp + RTSPDecoder.h +) + +target_include_directories(${PROJECT_NAME} PRIVATE + ${OpenCV_INCLUDE_DIRS} + ${FFmpeg_INCLUDE_DIRS} +) + +target_link_libraries(${PROJECT_NAME} PRIVATE + ${OpenCV_LIBS} +# ${FFmpeg_LIBRARIES} + avutil avcodec avformat avdevice avfilter swscale swresample +) + +if(USE_CUDA AND CUDA_FOUND) + target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_INCLUDE_DIRS}) + target_link_libraries(${PROJECT_NAME} PRIVATE ${CUDA_LIBRARIES}) + target_compile_definitions(${PROJECT_NAME} PRIVATE USE_CUDA_ACCEL) +endif() + +# 安装规则 +install(TARGETS ${PROJECT_NAME} + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) + +install(FILES RTSPDecoder.h + DESTINATION include +) + diff --git a/RTSPDecoder/RTSPDecoder.cpp b/RTSPDecoder/RTSPDecoder.cpp new file mode 100644 index 0000000..6bd2a32 --- /dev/null +++ b/RTSPDecoder/RTSPDecoder.cpp @@ -0,0 +1,538 @@ +#include "RTSPDecoder.h" +#include +#include +#include +#include +#include + +#define MAX_RETRY_COUNT 5 +#define RETRY_DELAY_MS 1000 + +RTSPDecoder::RTSPDecoder() { + avformat_network_init(); +} + +RTSPDecoder::~RTSPDecoder() { + // 停止所有解码线程 + for (auto& stream : streams_) { + if (stream && stream->running) { + stream->running = false; + if (stream->decode_thread.joinable()) { + stream->decode_thread.join(); + } + } + cleanupStream(stream.get()); + } + + avformat_network_deinit(); +} + +bool RTSPDecoder::init(const DecoderConfig& config) { + if (initialized_) return true; + + config_ = config; + + // 验证配置 + if (config_.max_streams < 1 || config_.max_streams > 60) { + throw std::invalid_argument("Max streams must be between 1 and 60"); + } + + // 初始化GPU分配 + if (config_.use_hw_accel) { + int gpu_count = getGPUCount(); + if (gpu_count == 0) { + config_.use_hw_accel = false; + } else if (config_.gpu_id >= 0 && config_.gpu_id < gpu_count) { + next_gpu_id_ = config_.gpu_id; + } + } + + initialized_ = true; + return true; +} + +int RTSPDecoder::addStream(const std::string& rtsp_url) { + if (!initialized_) { + throw std::runtime_error("Decoder not initialized"); + } + + std::lock_guard lock(streams_mutex_); + + if (streams_.size() >= config_.max_streams) { + throw std::runtime_error("Maximum number of streams reached"); + } + + auto ctx = std::make_unique(); + int stream_id = static_cast(streams_.size()); + + // 分配GPU + int gpu_id = config_.use_hw_accel ? allocateGPU() : -1; + + // 初始化格式上下文 + ctx->format_ctx = avformat_alloc_context(); + if (!ctx->format_ctx) { + throw std::runtime_error("Failed to allocate format context"); + } + + // 设置RTSP参数 + AVDictionary* options = nullptr; + av_dict_set(&options, "rtsp_transport", "tcp", 0); + av_dict_set(&options, "stimeout", "5000000", 0); // 5秒超时 + + // 打开输入流 + int retry_count = 0; + while (retry_count < MAX_RETRY_COUNT) { + int ret = avformat_open_input(&ctx->format_ctx, rtsp_url.c_str(), nullptr, &options); + if (ret == 0) break; + + retry_count++; + if (retry_count < MAX_RETRY_COUNT) { + std::this_thread::sleep_for(std::chrono::milliseconds(RETRY_DELAY_MS)); + } + } + + av_dict_free(&options); + + if (retry_count == MAX_RETRY_COUNT) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to open RTSP stream after retries"); + } + + // 查找流信息 + if (avformat_find_stream_info(ctx->format_ctx, nullptr) < 0) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to find stream information"); + } + + // 查找视频流 + for (unsigned int i = 0; i < ctx->format_ctx->nb_streams; i++) { + if (ctx->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + ctx->video_stream_idx = i; + break; + } + } + + if (ctx->video_stream_idx == -1) { + cleanupStream(ctx.get()); + throw std::runtime_error("No video stream found"); + } + + // 获取解码器参数 + AVCodecParameters* codecpar = ctx->format_ctx->streams[ctx->video_stream_idx]->codecpar; + + // 查找解码器 + const AVCodec* codec = nullptr; + if (config_.use_hw_accel && gpu_id >= 0) { + codec = avcodec_find_decoder(codecpar->codec_id); + } else { + codec = avcodec_find_decoder(codecpar->codec_id); + } + + if (!codec) { + cleanupStream(ctx.get()); + throw std::runtime_error("Unsupported codec"); + } + + // 创建解码器上下文 + ctx->codec_ctx = avcodec_alloc_context3(codec); + if (!ctx->codec_ctx) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to allocate codec context"); + } + + // 复制参数到解码器上下文 + if (avcodec_parameters_to_context(ctx->codec_ctx, codecpar) < 0) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to copy codec parameters"); + } + + // 初始化硬件加速 + if (config_.use_hw_accel && gpu_id >= 0) { + if (!initHWAccel(*ctx, gpu_id)) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to initialize hardware acceleration"); + } + } + + // 打开解码器 + if (avcodec_open2(ctx->codec_ctx, codec, nullptr) < 0) { + cleanupStream(ctx.get()); + throw std::runtime_error("Failed to open codec"); + } + + // 设置线程数 (根据CPU核心数自动调整) + ctx->codec_ctx->thread_count = std::thread::hardware_concurrency(); + if (ctx->codec_ctx->thread_count == 0) { + ctx->codec_ctx->thread_count = 4; // 默认值 + } + + // 启动解码线程 + ctx->gpu_id = gpu_id; + ctx->running = true; + ctx->decode_thread = std::thread(&RTSPDecoder::decodeThreadFunc, this, stream_id, ctx.get()); + + streams_.push_back(std::move(ctx)); + return stream_id; +} + +bool RTSPDecoder::removeStream(int stream_id) { + std::lock_guard lock(streams_mutex_); + + if (stream_id < 0 || stream_id >= streams_.size() || !streams_[stream_id]) { + return false; + } + + auto& ctx = streams_[stream_id]; + + // 停止解码线程 + ctx->running = false; + if (ctx->decode_thread.joinable()) { + ctx->decode_thread.join(); + } + + // 清理资源 + cleanupStream(ctx.get()); + + // 从列表中移除 + streams_[stream_id].reset(); + + return true; +} + +bool RTSPDecoder::getFrame(int stream_id, cv::Mat& frame, int timeout_ms) { + if (stream_id < 0 || stream_id >= streams_.size() || !streams_[stream_id]) { + return false; + } + + auto& ctx = streams_[stream_id]; + std::unique_lock lock(ctx->mutex); + + if (ctx->frame_queue.empty()) { + if (timeout_ms <= 0) { + return false; + } + + if (ctx->cond.wait_for(lock, std::chrono::milliseconds(timeout_ms)) == std::cv_status::timeout) { + return false; + } + } + + if (ctx->frame_queue.empty()) { + return false; + } + + frame = ctx->frame_queue.front(); + ctx->frame_queue.pop(); + + return true; +} + +int RTSPDecoder::getActiveStreamCount() const { + int count = 0; + for (const auto& stream : streams_) { + if (stream && stream->running) { + count++; + } + } + return count; +} + +int RTSPDecoder::getGPUCount() { + int count = 0; + cudaError_t err = cudaGetDeviceCount(&count); + if (err != cudaSuccess) { + return 0; + } + return count; +} + +bool RTSPDecoder::initHWAccel(StreamContext& ctx, int gpu_id) { + AVBufferRef* hw_device_ctx = nullptr; + + // 创建CUDA设备上下文 + char device_name[32]; + snprintf(device_name, sizeof(device_name), "%d", gpu_id); + + if (av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_CUDA, device_name, nullptr, 0) < 0) { + return false; + } + + ctx.hw_device_ctx = av_buffer_ref(hw_device_ctx); + ctx.codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx); + av_buffer_unref(&hw_device_ctx); + + return true; +} + +void RTSPDecoder::decodeThreadFunc(int stream_id, StreamContext* ctx) { + AVFrame* frame = av_frame_alloc(); + AVFrame* sw_frame = nullptr; + AVPacket* pkt = av_packet_alloc(); + + if (!frame || !pkt) { + ctx->running = false; + if (frame) av_frame_free(&frame); + if (pkt) av_packet_free(&pkt); + return; + } + + // 如果使用硬件加速,准备软件帧 + if (ctx->hw_device_ctx) { + sw_frame = av_frame_alloc(); + if (!sw_frame) { + ctx->running = false; + av_frame_free(&frame); + av_packet_free(&pkt); + return; + } + } + + // 解码循环 + while (ctx->running) { + // 读取数据包 + int ret = av_read_frame(ctx->format_ctx, pkt); + if (ret < 0) { + if (ret == AVERROR(EAGAIN)) { + continue; + } + + // 处理错误或EOF + if (ret != AVERROR_EOF) { + // 重试逻辑 + int retry_count = 0; + while (retry_count < MAX_RETRY_COUNT && ctx->running) { + avformat_close_input(&ctx->format_ctx); + AVDictionary* options = nullptr; + av_dict_set(&options, "rtsp_transport", "tcp", 0); + av_dict_set(&options, "stimeout", "5000000", 0); + + ret = avformat_open_input(&ctx->format_ctx, ctx->format_ctx->url, nullptr, &options); + av_dict_free(&options); + + if (ret == 0) { + avformat_find_stream_info(ctx->format_ctx, nullptr); + break; + } + + retry_count++; + std::this_thread::sleep_for(std::chrono::milliseconds(RETRY_DELAY_MS)); + } + + if (retry_count == MAX_RETRY_COUNT) { + ctx->running = false; + break; + } + } else { + // EOF, 重新开始 + av_seek_frame(ctx->format_ctx, ctx->video_stream_idx, 0, AVSEEK_FLAG_BACKWARD); + } + + av_packet_unref(pkt); + continue; + } + + // 检查是否是视频流 + if (pkt->stream_index != ctx->video_stream_idx) { + av_packet_unref(pkt); + continue; + } + + // 发送数据包到解码器 + ret = avcodec_send_packet(ctx->codec_ctx, pkt); + av_packet_unref(pkt); + + if (ret < 0 && ret != AVERROR(EAGAIN)) { + continue; + } + + // 接收解码后的帧 + while (ctx->running) { + ret = avcodec_receive_frame(ctx->codec_ctx, frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + break; + } + + if (ret < 0) { + break; + } + + AVFrame* output_frame = frame; + + // 如果使用硬件加速,需要将帧从GPU内存复制到CPU内存 + if (ctx->hw_device_ctx) { + if (av_hwframe_transfer_data(sw_frame, frame, 0) < 0) { + av_frame_unref(frame); + break; + } + + output_frame = sw_frame; + } + + // 转换为OpenCV Mat + int width = config_.width > 0 ? config_.width : output_frame->width; + int height = config_.height > 0 ? config_.height : output_frame->height; + + // 检查是否需要缩放 + if (width != output_frame->width || height != output_frame->height || + output_frame->format != AV_PIX_FMT_BGR24) { + if (!ctx->sws_ctx) { + ctx->sws_ctx = sws_getContext( + output_frame->width, output_frame->height, + static_cast(output_frame->format), + width, height, AV_PIX_FMT_BGR24, + SWS_BILINEAR, nullptr, nullptr, nullptr); + + if (!ctx->sws_ctx) { + av_frame_unref(output_frame); + break; + } + } + + // 创建目标帧 + AVFrame* bgr_frame = av_frame_alloc(); + if (!bgr_frame) { + av_frame_unref(output_frame); + break; + } + + bgr_frame->format = AV_PIX_FMT_BGR24; + bgr_frame->width = width; + bgr_frame->height = height; + + if (av_frame_get_buffer(bgr_frame, 0) < 0) { + av_frame_free(&bgr_frame); + av_frame_unref(output_frame); + break; + } + + // 执行转换 + sws_scale(ctx->sws_ctx, + output_frame->data, output_frame->linesize, 0, output_frame->height, + bgr_frame->data, bgr_frame->linesize); + + // 创建OpenCV Mat + cv::Mat mat(height, width, CV_8UC3, bgr_frame->data[0], bgr_frame->linesize[0]); + + // 复制数据 (因为bgr_frame会被释放) + cv::Mat mat_copy = mat.clone(); + + av_frame_free(&bgr_frame); + + // 将帧添加到队列 + { + std::lock_guard lock(ctx->mutex); + if (ctx->frame_queue.size() >= config_.buffer_size) { + ctx->frame_queue.pop(); + } + ctx->frame_queue.push(mat_copy); + ctx->cond.notify_one(); + } + } else { + // 直接转换格式 + cv::Mat mat(output_frame->height, output_frame->width, CV_8UC3); + + // 根据像素格式处理 + if (output_frame->format == AV_PIX_FMT_NV12) { + // NV12转BGR + cv::Mat y_plane(output_frame->height, output_frame->width, CV_8UC1, output_frame->data[0]); + cv::Mat uv_plane(output_frame->height / 2, output_frame->width / 2, CV_8UC2, output_frame->data[1]); + cv::cvtColorTwoPlane(y_plane, uv_plane, mat, cv::COLOR_YUV2BGR_NV12); + } else if (output_frame->format == AV_PIX_FMT_YUV420P) { + // YUV420P转BGR + cv::Mat yuv_mat(output_frame->height * 3 / 2, output_frame->width, CV_8UC1, output_frame->data[0]); + cv::cvtColor(yuv_mat, mat, cv::COLOR_YUV2BGR_I420); + } else if (output_frame->format == AV_PIX_FMT_BGR24) { + // 直接复制 + cv::Mat(output_frame->height, output_frame->width, CV_8UC3, output_frame->data[0]).copyTo(mat); + } else { + // 其他格式使用SWS转换 + if (!ctx->sws_ctx) { + ctx->sws_ctx = sws_getContext( + output_frame->width, output_frame->height, + static_cast(output_frame->format), + width, height, AV_PIX_FMT_BGR24, + SWS_BILINEAR, nullptr, nullptr, nullptr); + } + + if (ctx->sws_ctx) { + AVFrame* bgr_frame = av_frame_alloc(); + bgr_frame->format = AV_PIX_FMT_BGR24; + bgr_frame->width = width; + bgr_frame->height = height; + + if (av_frame_get_buffer(bgr_frame, 0) == 0) { + sws_scale(ctx->sws_ctx, + output_frame->data, output_frame->linesize, 0, output_frame->height, + bgr_frame->data, bgr_frame->linesize); + + mat = cv::Mat(height, width, CV_8UC3, bgr_frame->data[0]); + } + + av_frame_free(&bgr_frame); + } + } + + // 将帧添加到队列 + if (!mat.empty()) { + std::lock_guard lock(ctx->mutex); + if (ctx->frame_queue.size() >= config_.buffer_size) { + ctx->frame_queue.pop(); + } + ctx->frame_queue.push(mat); + ctx->cond.notify_one(); + } + } + + av_frame_unref(output_frame); + } + } + + // 清理资源 + av_frame_free(&frame); + av_frame_free(&sw_frame); + av_packet_free(&pkt); + + if (ctx->sws_ctx) { + sws_freeContext(ctx->sws_ctx); + ctx->sws_ctx = nullptr; + } +} + +void RTSPDecoder::cleanupStream(StreamContext* ctx) { + if (!ctx) return; + + if (ctx->sws_ctx) { + sws_freeContext(ctx->sws_ctx); + ctx->sws_ctx = nullptr; + } + + if (ctx->codec_ctx) { + avcodec_free_context(&ctx->codec_ctx); + } + + if (ctx->format_ctx) { + avformat_close_input(&ctx->format_ctx); + } + + if (ctx->hw_device_ctx) { + av_buffer_unref(&ctx->hw_device_ctx); + } + + // 清空帧队列 + std::lock_guard lock(ctx->mutex); + while (!ctx->frame_queue.empty()) { + ctx->frame_queue.pop(); + } +} + +int RTSPDecoder::allocateGPU() { + if (!config_.use_hw_accel) return -1; + + int gpu_count = getGPUCount(); + if (gpu_count == 0) return -1; + + // 简单的轮询分配策略 + int gpu_id = next_gpu_id_++ % gpu_count; + return gpu_id; +} + diff --git a/RTSPDecoder/RTSPDecoder.h b/RTSPDecoder/RTSPDecoder.h new file mode 100644 index 0000000..c480c12 --- /dev/null +++ b/RTSPDecoder/RTSPDecoder.h @@ -0,0 +1,91 @@ +#ifndef RTSP_DECODER_H +#define RTSP_DECODER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +// #include +#include +} + +class RTSPDecoder { +public: + struct DecoderConfig { + int max_streams = 60; // 最大支持流数 + int gpu_id = -1; // 指定GPU ID (-1表示自动分配) + int width = 0; // 输出宽度 (0表示保持原始) + int height = 0; // 输出高度 (0表示保持原始) + int fps = 0; // 目标帧率 (0表示保持原始) + bool use_hw_accel = true; // 是否使用硬件加速 + int buffer_size = 1024; // 帧缓冲队列大小 + }; + + // 构造函数 + RTSPDecoder(); + + // 析构函数 + ~RTSPDecoder(); + + // 初始化解码器 + bool init(const DecoderConfig& config); + + // 添加RTSP流 + int addStream(const std::string& rtsp_url); + + // 移除RTSP流 + bool removeStream(int stream_id); + + // 获取解码后的帧 + bool getFrame(int stream_id, cv::Mat& frame, int timeout_ms = 1000); + + // 获取当前活跃的流数量 + int getActiveStreamCount() const; + + // 获取GPU数量 + static int getGPUCount(); + +private: + struct StreamContext { + AVFormatContext* format_ctx = nullptr; + AVCodecContext* codec_ctx = nullptr; + AVBufferRef* hw_device_ctx = nullptr; + SwsContext* sws_ctx = nullptr; + int video_stream_idx = -1; + int gpu_id = 0; + std::atomic running{false}; + std::mutex mutex; + std::condition_variable cond; + std::queue frame_queue; + std::thread decode_thread; + }; + + DecoderConfig config_; + std::vector> streams_; + std::mutex streams_mutex_; + std::atomic next_gpu_id_{0}; + std::atomic initialized_{false}; + + // 初始化硬件加速 + bool initHWAccel(StreamContext& ctx, int gpu_id); + + // 解码线程函数 + void decodeThreadFunc(int stream_id, StreamContext* ctx); + + // 清理流上下文 + void cleanupStream(StreamContext* ctx); + + // 分配GPU + int allocateGPU(); +}; + +#endif // RTSP_DECODER_H diff --git a/app/TestDecode b/app/TestDecode new file mode 100755 index 0000000..3981997 Binary files /dev/null and b/app/TestDecode differ diff --git a/app/lib/libRTSPDecoder.so b/app/lib/libRTSPDecoder.so new file mode 100755 index 0000000..ee998d4 Binary files /dev/null and b/app/lib/libRTSPDecoder.so differ diff --git a/cmake/FindFFmpeg.cmake b/cmake/FindFFmpeg.cmake new file mode 100644 index 0000000..1ed54d5 --- /dev/null +++ b/cmake/FindFFmpeg.cmake @@ -0,0 +1,146 @@ +# vim: ts=2 sw=2 +# - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC) +# +# Once done this will define +# FFMPEG_FOUND - System has the all required components. +# FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers. +# FFMPEG_LIBRARIES - Link these to use the required ffmpeg components. +# FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components. +# +# For each of the components it will additionaly set. +# - AVCODEC +# - AVDEVICE +# - AVFORMAT +# - AVUTIL +# - POSTPROCESS +# - SWSCALE +# the following variables will be defined +# _FOUND - System has +# _INCLUDE_DIRS - Include directory necessary for using the headers +# _LIBRARIES - Link these to use +# _DEFINITIONS - Compiler switches required for using +# _VERSION - The components version +# +# Copyright (c) 2006, Matthias Kretz, +# Copyright (c) 2008, Alexander Neundorf, +# Copyright (c) 2011, Michael Jansen, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +include(FindPackageHandleStandardArgs) + +# The default components were taken from a survey over other FindFFMPEG.cmake files +if (NOT FFmpeg_FIND_COMPONENTS) + set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL) +endif () + +# +### Macro: set_component_found +# +# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present. +# +macro(set_component_found _component ) + if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS) + # message(STATUS " - ${_component} found.") + set(${_component}_FOUND TRUE) + else () + # message(STATUS " - ${_component} not found.") + endif () +endmacro() + +# +### Macro: find_component +# +# Checks for the given component by invoking pkgconfig and then looking up the libraries and +# include directories. +# +macro(find_component _component _pkgconfig _library _header) + + if (NOT WIN32) + # use pkg-config to get the directories and then use these values + # in the FIND_PATH() and FIND_LIBRARY() calls + find_package(PkgConfig) + if (PKG_CONFIG_FOUND) + pkg_check_modules(PC_${_component} ${_pkgconfig}) + endif () + endif (NOT WIN32) + + find_path(${_component}_INCLUDE_DIRS ${_header} + HINTS + ${PC_LIB${_component}_INCLUDEDIR} + ${PC_LIB${_component}_INCLUDE_DIRS} + ) + + find_library(${_component}_LIBRARIES NAMES ${_library} + HINTS + ${PC_LIB${_component}_LIBDIR} + ${PC_LIB${_component}_LIBRARY_DIRS} + ) + + set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.") + set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.") + + set_component_found(${_component}) + + mark_as_advanced( + ${_component}_INCLUDE_DIRS + ${_component}_LIBRARIES + ${_component}_DEFINITIONS + ${_component}_VERSION) + +endmacro() + + +# Check for cached results. If there are skip the costly part. +if (NOT FFMPEG_LIBRARIES) + + # Check for all possible component. + find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h) + find_component(AVFORMAT libavformat avformat libavformat/avformat.h) + find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h) + find_component(AVUTIL libavutil avutil libavutil/avutil.h) + find_component(SWSCALE libswscale swscale libswscale/swscale.h) + find_component(POSTPROC libpostproc postproc libpostproc/postprocess.h) + + # Check if the required components were found and add their stuff to the FFMPEG_* vars. + foreach (_component ${FFmpeg_FIND_COMPONENTS}) + if (${_component}_FOUND) + # message(STATUS "Required component ${_component} present.") + set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES}) + set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS}) + list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS}) + else () + # message(STATUS "Required component ${_component} missing.") + endif () + endforeach () + + # Build the include path with duplicates removed. + if (FFMPEG_INCLUDE_DIRS) + list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS) + endif () + + # cache the vars. + set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE) + set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE) + set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE) + + mark_as_advanced(FFMPEG_INCLUDE_DIRS + FFMPEG_LIBRARIES + FFMPEG_DEFINITIONS) + +endif () + +# Now set the noncached _FOUND vars for the components. +foreach (_component AVCODEC AVDEVICE AVFORMAT AVUTIL POSTPROCESS SWSCALE) + set_component_found(${_component}) +endforeach () + +# Compile the list of required vars +set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS) +foreach (_component ${FFmpeg_FIND_COMPONENTS}) + list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS) +endforeach () + +# Give a nice error message if some of the required vars are missing. +find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS}) \ No newline at end of file diff --git a/main.cpp b/main.cpp new file mode 100644 index 0000000..9dfe3a7 --- /dev/null +++ b/main.cpp @@ -0,0 +1,90 @@ +#include "RTSPDecoder/RTSPDecoder.h" +#include +#include +#include + +std::string getDateTime_usec() +{ + time_t timep = time(NULL); + struct tm *p = localtime(&timep); + + struct timeval tv; + gettimeofday(&tv, NULL); + + int msec = tv.tv_usec / 1000; + + char tmp[30] = {0}; + sprintf(tmp, "%04d-%02d-%02d %02d:%02d:%02d.%03d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec, msec); + + return std::string(tmp); +} + +int main() { + RTSPDecoder decoder; + RTSPDecoder::DecoderConfig config; + config.max_streams = 10; // 最大10路流 + config.use_hw_accel = true; // 使用硬件加速 + config.width = 1920; // 输出宽度 + config.height = 1080; // 输出高度 + config.buffer_size = 5; // 每路流缓冲5帧 + + if (!decoder.init(config)) { + std::cerr << "Failed to initialize decoder" << std::endl; + return 1; + } + + // 添加RTSP流 + std::vector rtsp_urls = { + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + "./1.mp4", + // 添加更多流... + }; + + std::vector stream_ids; + for (const auto& url : rtsp_urls) { + try { + int stream_id = decoder.addStream(url); + stream_ids.push_back(stream_id); + std::cout << "Added stream " << stream_id << ": " << url << std::endl; + } catch (const std::exception& e) { + std::cerr << "Failed to add stream: " << e.what() << std::endl; + } + } + + int iID = 0; + // 主循环 + while (true) { + for (int stream_id : stream_ids) { + ++iID; + cv::Mat frame; + if (decoder.getFrame(stream_id, frame, 1)) { + + // 处理帧... + if (!cv::imwrite("./jpg/Stream_" + std::to_string(stream_id) + "_" + std::to_string(iID) + ".jpg", frame)) + { + std::cerr << "Save Failed ./jpg/Stream_" + std::to_string(stream_id) + "_" + std::to_string(iID) + ".jpg" << " size:" << frame.size() << std::endl; + } + std::cout << getDateTime_usec() << " Stream " << stream_id << " -- " << std::to_string(iID) << std::endl; + } + } + + if (cv::waitKey(1) == 27) { // ESC键退出 + break; + } + } + + // 清理 + for (int stream_id : stream_ids) { + decoder.removeStream(stream_id); + } + + return 0; +}