Skip to end of metadata
Go to start of metadata

You are viewing an old version of this page. View the current version.

Compare with Current View Page History

« Previous Version 2 Next »

FFmpegH264Decoder is to use the FFmpeg library to decode video H264 data into YUV data. On C3V we will use the decoder named h264_v4l2m2m which is a hardware decoder.

API Instructions

namespace com { namespace sunplus { namespace media {

using YUVDataCallback = std::function<void(AVFrame* yuvFrame, int index)>;

class FFmpegH264Decoder {
public:
    FFmpegH264Decoder();
	~FFmpegH264Decoder();

public:
	int init(VideoStreamParam_t param, YUVDataCallback dataCallback);
    void uninit();

    AVCodecContext* getAVCodecContext();

	int decode(AVPacket* h264Packet);
    int flush();
};

}}}

Constructors

FFmpegH264Decoder();

init

Set decode parameters, such as width, height, timebase etc, and initialize decode

/**
 * Set decode parameters and initialize decode.
 *
 * @param param set parameters of the decoder.
 *
 * @param dataCallback to get yuv frame.
 *
 */
int init(VideoStreamParam_t param, YUVDataCallback dataCallback);

Sample

VideoStreamParam_t param;
param.width = 1280;
param.height = 720;
param.time_base = AV_TIME_BASE_Q;

auto h264Decoder = make_shared<FFmpegH264Decoder>();

ret = h264Decoder->init(param, [](AVFrame* yuvFrame, int index){
    printf("YUVDataCallback, yuv frame[%d] pts: %lld, width: %d, height: %d, pix_fmt: %s\n", 
    index, yuvFrame->pts, yuvFrame->width, yuvFrame->height, av_get_pix_fmt_name((enum AVPixelFormat)yuvFrame->format));

});

uninit

Release all resources allocated by the init method and close the decoder.

/**
 * Release all resources allocated by the init method.
 */
void uninit();

getAVCodecContext

Get AVCodecContext to get the info of the encoder, such as width, height, etc.

/**
 * get decoder context.
 */
AVCodecContext* getAVCodecContext();

decode

Send the h264 packet to the decoder. the h264Packet must be freed with av_packet_unref()+av_packet_free() when it is no longer needed.

/**
 * send the packet to the decoder.
 * the AVPacket must be freed with av_packet_unref()+av_packet_free() when
 * it is no longer needed. 
 * @return 0 if OK, < 0 on error. 
 */
int decode(AVPacket* h264Packet);

flush

Send a NULL packet, in which case it is considered a flush packet.

If the decoder still has packets buffered, it will return them after this call.

/**
 * send a NULL packet, in which case it is considered a flush packet.
 * If the decoder still has packets buffered, it will return them after this call.
 * Once flushing mode has been entered, additional flush packets are ignored, 
 * and sending packets will return AVERROR_EOF. 
 * @return 0 if OK, < 0 on error. 
 */
int flush();

Sample Code

This is a sample of H264 decode using FFmpegH264Decoder. FFmpegH264Provider provides the h264 source, then using the h264 provider gets the h264 packet and sends it to the decoder.

the flow of decode h264:

create h264 decoder --> init --> create h264 provider --> provider prepare --> create the thread of get h264 packet --> send to decoder --> get yuv data by YUVDataCallback.

void FFmpegH264Decoder_Test() {
    /* init output format */
    auto videoSource = make_shared<FFmpegV4L2VideoSource>("/dev/video0");
    AVDictionary *options = nullptr;
    av_dict_set(&options, "video_size", "1280x720", 0);
    av_dict_set(&options, "framerate", "30", 0);
    av_dict_set(&options, "pixel_format", "uyvy422", 0);

    /* open the device */
    int ret = videoSource->open(options);

    /* creat h264 provider */
    auto h264Provider = make_shared<FFmpegH264Provider>(videoSource);

    VideoStreamParam_t param;
    param.width = videoSource->getAVStream()->codecpar->width;
    param.height = videoSource->getAVStream()->codecpar->height;
    param.pix_fmt = (enum AVPixelFormat)videoSource->getAVStream()->codecpar->format;
    param.time_base = videoSource->getAVStream()->time_base;
    param.gop = 30;
    param.bitrate = 1000000;
    param.fps = 30;

    /* prepare h264 provider */
    ret = h264Provider->prepare(param);

    auto h264Decoder = make_shared<FFmpegH264Decoder>();

    /* init h264 decoder */ 
    ret = h264Decoder->init(param, [&](AVFrame* yuvFrame, int index){
        printf("YUVDataCallback, yuv frame[%d] pts: %lld, width: %d, height: %d, pix_fmt: %s\n", 
        index, yuvFrame->pts, yuvFrame->width, yuvFrame->height, av_get_pix_fmt_name((enum AVPixelFormat)yuvFrame->format));

    });

    /* create the decode thread to send h264 packet to the decoder */
    auto decodeThreadFunc = [&](){
        int index = 0;
        while(!is_exit) {
            AVPacket* packet = nullptr;
            auto ret = h264Provider->getFrame(packet);
            
            if (ret < 0 || packet == nullptr) {
                std::this_thread::sleep_for(std::chrono::milliseconds(10));
                continue;
            }

            printf("decodeThread, get h264 frame[%d] pts: %lld, size: %d, isKeyFrame: %d\n", index, packet->pts, packet->size, packet->flags & AV_PKT_FLAG_KEY);

            ret = h264Decoder->decode(packet);
    
            av_packet_unref(packet);
            av_packet_free(&packet);
            index++;
        }
        h264Decoder->flush();
    };

    auto thread = make_shared<std::thread>(decodeThreadFunc);

    _wait_exit("FFmpegH264Decoder_Test");

    h264Decoder->uninit();
    h264Provider->destroy();
    videoSource->close();

}

Test Result

./ffmpeg_sample h264dec
FFmpegH264DecoderTestResult.png

  • No labels