请教使用ffmpeg yuv转h264将mov/mp4格式文件解码为YUV文件

[原]图像视频编码和FFmpeg(6)-----用FFmpeg编码和解码的例子 - 推酷
[原]图像视频编码和FFmpeg(6)-----用FFmpeg编码和解码的例子
前面大概地介绍了图像格式和不同格式之间的转换、h264的一些知识和FFmpeg编码和解码的基本流程。现在分别为编码和解码给出一个例子。例子是使用参考output_example.cpp和前面介绍中说到的老外写的一个例子。不说那么多了,上代码。
#ifdef __cplusplus
#define __STDC_CONSTANT_MACROS
#ifdef _STDINT_H
#undef _STDINT_H
# include &stdint.h&
extern &C&
#include&libavcodec/avcodec.h&
#include&libavformat/avformat.h&
#include&libswscale/swscale.h&
#include &iostream&
#include&stdio.h&
#include&string.h&
#include&stdlib.h&
int g_width = 352;
int g_height = 288;
int g_video_outbuff_
uint8_t* g_video_outbuff = NULL;
AVPixelFormat g_pix_fmt = AV_PIX_FMT_YUV420P;
//init Video Stream and return it
AVStream* getVideoStream(AVFormatContext* fmt_ctx)
AVStream* stream = NULL;
stream = avformat_new_stream(fmt_ctx, NULL);
if( stream == NULL)
fprintf(stderr, &new stream fail\n&);
AVCodecContext* codec_ctx = stream-&
codec_ctx-&codec_id = fmt_ctx-&oformat-&video_
codec_ctx-&codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx-&bit_rate = 400000;
codec_ctx-&gop_size = 3;
codec_ctx-&pix_fmt = g_pix_
codec_ctx-&width = g_
codec_ctx-&height = g_
codec_ctx-&time_base.num = 1;
codec_ctx-&time_base.den = 25;
codec_ctx-&me_range = 16;
codec_ctx-&max_qdiff = 4;
codec_ctx-&qmin = 10;
codec_ctx-&qmax = 51;
codec_ctx-&qcompress = 0.6;
if( codec_ctx-&codec_id == CODEC_ID_MPEG2VIDEO )
codec_ctx-&max_b_frames = 2;
if( codec_ctx-&codec_id == CODEC_ID_MPEG1VIDEO)
codec_ctx-&mb_decision = 2;
// some formats want stream headers to be separate
if(!strcmp(fmt_ctx-&oformat-&name, &mp4&)
|| !strcmp(fmt_ctx-&oformat-&name, &mov&)
|| !strcmp(fmt_ctx-&oformat-&name, &3gp&)
codec_ctx-&flags |= CODEC_FLAG_GLOBAL_HEADER;
void initEncoder(AVStream* stream)
AVCodecContext* codec_ctx = stream-&
AVCodec* encoder = avcodec_find_encoder(codec_ctx-&codec_id);
if( encoder == NULL )
fprintf(stderr, &cann't find the encoder\n&);
if( avcodec_open2(codec_ctx, encoder, NULL) & 0 )
fprintf(stderr, &could not open video codec\n&);
AVFrame* getAVFrame()
int size = avpicture_get_size(g_pix_fmt, g_width, g_height);
uint8_t* buff = (uint8_t*)av_malloc(size);
if( buff == NULL)
fprintf(stderr, &av malloc fail\n&);
AVFrame* frame = av_frame_alloc();
if( frame == NULL)
fprintf(stderr, &alloc frame fail\n&);
avpicture_fill((AVPicture*)frame, buff, g_pix_fmt,
g_width, g_height);
void writeFrame(AVFormatContext* fmt_ctx, AVStream* stream, AVFrame* frame)
int ret, out_
AVCodecContext* codec_ctx = stream-&
if (fmt_ctx-&oformat-&flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
av_init_packet(&packet);
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index= stream-&
packet.data= (uint8_t *)
packet.size= sizeof(AVPicture);
ret = av_write_frame(fmt_ctx, &packet);
/* encode the image */
out_size = avcodec_encode_video(codec_ctx, g_video_outbuff, g_video_outbuff_size, frame);
/* if zero size, it means the image was buffered */
if (out_size & 0) {
av_init_packet(&packet);
if(codec_ctx-&coded_frame-&key_frame)
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index= stream-&
packet.data= g_video_
// not the video_outbuf_size, note!
packet.size= out_
/* write the compressed frame in the media file */
ret = av_write_frame(fmt_ctx, &packet);
if (ret != 0)
fprintf(stderr, &Error while writing video frame\n&);
int main(int argc, char** argv)
const char* input_file = argc & 2 ? &flower_cif.yuv& : argv[1];
FILE* fin = fopen(input_file, &rb&);
if( fin == NULL )
fprintf(stderr, &cann't open the yuv file&);
return -1;
const char* output_filename = argc & 3 ? &flower.mpeg& : argv[2];
g_width = 352;
g_height = 288;
av_register_all();
//正如其函数名,该函数就是在猜。根据文件名的后缀猜应该使用什么编码
//当然,也可能找不到对应后缀的编码方法。此时返回NULL。
AVOutputFormat* output_fmt = av_guess_format(NULL, output_filename, NULL);
if( output_fmt == NULL )
fprintf(stderr, &Couldn't deduce output format from file extension: using MPEG.\n&);
output_fmt = av_guess_format(&mpeg&, NULL, NULL);
if( output_fmt == NULL )
fprintf(stderr, &Could not find suitable output format&);
return -1;
//负责申请一个AVFormatContext结构的内存,并进行简单初始化
//需要使用avformat_free_context()来释放
//avformat_free_context()可以用来释放该结构里的所有东西以及该结构本身
AVFormatContext* fmt_ctx = avformat_alloc_context();
if( fmt_ctx == NULL )
fprintf(stderr, &Memory error&);
return -1;
//把要使用的编码器复制给format_ctx
fmt_ctx-&oformat = output_
strncpy(fmt_ctx-&filename, output_filename, sizeof(fmt_ctx-&filename));
//http://www.ffmpeg.org/doxygen/1.0/group__lavc__core.html
//can see #define
if( output_fmt-&video_codec == AV_CODEC_ID_NONE)
return -1;
AVStream* stream = getVideoStream(fmt_ctx);
av_dump_format(fmt_ctx, 0, output_filename, 1);
initEncoder(stream);
if( avio_open(&fmt_ctx-&pb, output_filename, AVIO_FLAG_WRITE) & 0)
fprintf(stderr, &cann't open the output file\n&);
return -1;
if (!(fmt_ctx-&oformat-&flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
g_video_outbuff_size = 200000;
g_video_outbuff = (uint8_t *)av_malloc(g_video_outbuff_size);
AVFrame* frame = getAVFrame();
if( avformat_write_header(fmt_ctx, NULL) & 0 )
fprintf(stderr, &cann't write the file head\n&);
return -1;
int yuv_frame_size = avpicture_get_size(g_pix_fmt, g_width, g_height);
int ret = 0;
while( 1 )
ret = fread(frame-&data[0], 1, yuv_frame_size, fin);
if( ret != yuv_frame_size )
fprintf(stderr, &%d don't read enough data\n&, ret);
writeFrame(fmt_ctx, stream, frame);
av_write_trailer(fmt_ctx);
avio_close(fmt_ctx-&pb);
av_free(fmt_ctx);
在getVideoStream函数中,可以设置视频的比特率。对其进行修改可以发现:
& & & & mp4格式的文件比mpg有更大的压缩比。当比特率取Kbps)时,mpg文件就已经比较清晰了,但对应的mp4文件却很模糊。同时可以看到两者的文件大小也差了10多倍。如果把比特率调为,那么得到的mp4文件才比较清晰,当然此时的mp4文件已经增大了10多倍,比刚才400000比特率下的mpg文件还要大。
这部分基本和老外写的例子是一样的,我加入了一些注释,并保存到bmp图片格式中。
#ifdef __cplusplus
#define __STDC_CONSTANT_MACROS
#ifdef _STDINT_H
#undef _STDINT_H
# include &stdint.h&
extern &C&
#include&libavcodec/avcodec.h&
#include&libavformat/avformat.h&
#include&libavutil/log.h&
#include&libswscale/swscale.h&
#include &iostream&
#include&stdio.h&
#include &windows.h&
bool saveAsBitmap(AVFrame *pFrameRGB, int width, int height, int iFrame)
FILE *pFile = NULL;
BITMAPFILEHEADER
BITMAPINFO
char fileName[32];
int bpp = 24;
// open file
sprintf(fileName, &frame%d.bmp&, iFrame);
pFile = fopen(fileName, &wb&);
if (!pFile)
bmpheader.bfType = ('M' &&8)|'B';
bmpheader.bfReserved1 = 0;
bmpheader.bfReserved2 = 0;
bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp/8;
bmpinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.bmiHeader.biWidth =
bmpinfo.bmiHeader.biHeight = - //reverse the image
bmpinfo.bmiHeader.biPlanes = 1;
bmpinfo.bmiHeader.biBitCount =
bmpinfo.bmiHeader.biCompression = BI_RGB;
bmpinfo.bmiHeader.biSizeImage = 0;
bmpinfo.bmiHeader.biXPelsPerMeter = 100;
bmpinfo.bmiHeader.biYPelsPerMeter = 100;
bmpinfo.bmiHeader.biClrUsed = 0;
bmpinfo.bmiHeader.biClrImportant = 0;
fwrite(&bmpheader, sizeof(BITMAPFILEHEADER), 1, pFile);
fwrite(&bmpinfo.bmiHeader, sizeof(BITMAPINFOHEADER), 1, pFile);
uint8_t *buffer = pFrameRGB-&data[0];
for (int h=0; h& h++)
for (int w=0; w& w++)
fwrite(buffer+2, 1, 1, pFile);
fwrite(buffer+1, 1, 1, pFile);
fwrite(buffer, 1, 1, pFile);
buffer += 3;
fclose(pFile);
int main(int argc, char** argv)
//This registers all available file formats and codecs with
//the library so they will be used automatically
//when a file with the corresponding format/codec is opened.
//Note that you only need to call av_register_all() once,
av_register_all();
char err_msg[50];
const char* filename = argc & 1 ? argv[1] : &19.mp4&;
//在使用前,一定要初始化,有网友就是因为没有初始化而被坑
//avformat_open_input其内部实现为:
//int avformat_open_input(AVFormatContext **ps, const char *filename,
// AVInputFormat *fmt, AVDictionary **options)
AVFormatContext *s = *
int ret = 0;
AVDictionary *tmp = NULL;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
if (!s && !(s = avformat_alloc_context()))
return AVERROR(ENOMEM);
AVFormatContext* format_ctx = NULL;
//我用的是2.2.1版本,没有av_open_input_file函数了,可以到
///builds/win32/dev/
//下载低版本的ffmpeg。
//第三个参数用来指定视频的格式,第四个参数是格式选项。设置为NULL即可。
//libavformat可以自动获取。
if( (ret = avformat_open_input(&format_ctx, filename, NULL, NULL)) & 0 )
//如果是gcc编译的话,用av_err2str是没有问题的,但用C++编译就有问题。
//查看av_err2str是一个宏。其实现用了一个临近数组吧。
//fun( (char[34]){0} );像这种形式,c语言支持这种形式,
//但C++不支持。所以,不能用av_err2str。取而代之用av_strerror
av_strerror(ret, err_msg, sizeof(err_msg) );
fprintf(stderr, &Cann't open the file: %s&, err_msg);
return -1;
//通过读取媒体文件的中的包来获取媒体文件中的流信息
//也就是把媒体文件中的音视频流等信息读出来,保存在容器中,以便解码时使用
if( (ret = avformat_find_stream_info(format_ctx, NULL)) & 0 )
av_strerror(ret, err_msg, sizeof(err_msg) );
fprintf(stderr, &cann't find stream info: %s&, err_msg);
return -1;
//这个函数将填充format_ctx-&streams成员。
//注意,并不是用filename来填充.
//streams的声明是 AVStream * streams[MAX_STREAMS]
//最后一个参数输出是否为output
av_dump_format(format_ctx, 0, filename, 0);
//此时,streams成员数组指向了一系列的帧,帧数由nb_streams成员指明
int video_stream = -1;
for(i = 0; i & format_ctx-&nb_ ++i)
//查找第一个视频帧
if( format_ctx-&streams[i]-&codec-&codec_type == AVMEDIA_TYPE_VIDEO )
video_stream =
if( video_stream == -1 )
fputs(&cann't find the video frame&, stderr);
return -1;
//stream里面关于编解码器的信息 被称为 编解码器上下文。
//其包含了stream会使用的所有编解码器信息。现在用codec_ctx指向其
AVCodecContext* codec_ctx = format_ctx-&streams[video_stream]-&
//下面来获取编解码器,并打开之
//查找解码器之前,必须先调用av_register_all注册所有支持的解码器
//查找成功返回解码器指针,否则返回NULL
//音视频解码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较解码器的ID来查找
AVCodec* decoder = avcodec_find_decoder(codec_ctx-&codec_id);
if( decoder == NULL )
fputs(&Unsupported codec!\n&, stderr);
return -1;
//AVDictionary* option_dict = NULL;
//使用给定的AVCodec初始化AVCodecContext
if( avcodec_open2(codec_ctx, decoder, NULL) & 0)
fputs(&Could not open codec\n&, stderr);
return -1;
//获取存放一个帧的空间
AVFrame* frame = av_frame_alloc();
//av_molloc只是malloc函数的简单包装,
//其保证分配的地址都是经过对齐的。当我们不再使用的时候,
//必须要用av_free来释放内存
AVFrame* frame_rgb = av_frame_alloc();
if( frame == NULL || frame_rgb == NULL)
fputs(&alloc frame place fail&, stderr);
return -1;
//虽然有了申请存放帧的空间,但还需要申请一个空间来存放帧对应的原始数据,
//因为我们转换的时候需要用到。
//get the size we need
int byte_num = avpicture_get_size(PIX_FMT_RGB24, codec_ctx-&width,
codec_ctx-&height);
uint8_t* buffer = (uint8_t*)av_malloc(byte_num * sizeof(uint8_t));
//用avpicture_fill函数把一个帧和一个buffer, 格式、大小关联起来
avpicture_fill((AVPicture*)frame_rgb, buffer, PIX_FMT_RGB24,
codec_ctx-&width, codec_ctx-&height);
SwsContext *sws_ctx = sws_getContext(codec_ctx-&width,
codec_ctx-&height,
codec_ctx-&pix_fmt,
codec_ctx-&width,
codec_ctx-&height,
PIX_FMT_RGB24,
SWS_BILINEAR,
int yuv420p_bytes = avpicture_get_size(AV_PIX_FMT_YUV420P,
codec_ctx-&width,
codec_ctx-&height);
uint8_t* yuv420p_buff = (uint8_t*)av_malloc(yuv420p_bytes);
AVFrame* yuv420p_frame = av_frame_alloc();
avpicture_fill((AVPicture*)yuv420p_frame, yuv420p_buff,
AV_PIX_FMT_YUV420P, codec_ctx-&width,
codec_ctx-&height);
SwsContext* yuv_sws_ctx = sws_getContext(codec_ctx-&width,
codec_ctx-&height,
codec_ctx-&pix_fmt,
codec_ctx-&width,
codec_ctx-&height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL, NULL, NULL);
//之前的结构体都是声明为指针,内存由ffmpeg分配,而这个是变量。
//估计是因为这个包的空间肯定会重复使用,没必要要求ffmpeg库分配。
int frame_
//从视频中读取一个包,并保存到packet中
while( av_read_frame(format_ctx, &packet) &= 0 )
//前面我们已经获取了第一个视频帧的下标,video_stream保存了该下标
if( packet.stream_index == video_stream )
//对视频帧进行解码.
//如果有帧被解压,frame_finished将被赋予非0值,否则为0
//数据将从packet中解压到frame中。
//在前面的avcodec_open2函数中,codec_ctx获取了解码器。现在将用这个解码器来解码
avcodec_decode_video2(codec_ctx, frame, &frame_finished,
if( frame_finished )
//Convert the image from its native format to RGB
sws_scale(sws_ctx, frame-&data, frame-&linesize,
0, codec_ctx-&height,
frame_rgb-&data, frame_rgb-&linesize);
sws_scale(yuv_sws_ctx, frame-&data, frame-&linesize,
0, codec_ctx-&height,
yuv420p_frame-&data, yuv420p_frame-&linesize);
fwrite(yuv420p_frame-&data[0], 1, yuv420p_bytes, fout);
if( ++i &= 5 )
saveAsBitmap(frame_rgb, codec_ctx-&width,
codec_ctx-&height, i);
//需要注意的是:我们只是定义了一个packet 结构体。
//而结构体有一个data指针,我们定义这个变量时,
//并没有申请一个内存并让data指向之。这个工作有ffmpeg去完成。
//所以需要调用av_free_packet
av_free_packet(&packet);
av_free(frame);
av_free(frame_rgb);
av_free(buffer);
avcodec_close(codec_ctx);
avformat_close_input(&format_ctx);
sws_freeContext(sws_ctx);
已发表评论数()
请填写推刊名
描述不能大于100个字符!
权限设置: 公开
仅自己可见
正文不准确
标题不准确
排版有问题
主题不准确
没有分页内容
图片无法显示
视频无法显示
与原文不一致使用FFmpeg实现的转码器
#include &stdafx.h&
extern &C& {
class rtsp2mp4
int inputFile(char *file);
int outputFile(char *file);
int transStart();
int decode();
int encode();
int transEnd();
int init();
int unit();
int prepare();
int rtsp2mp4 :: flush_encoder(AVFormatContext *fmt_ctx, unsigned int stream_index);
void ResolutionChange(AVCodecContext *pCodecCtx,AVFrame *pFrame,AVFrame *pNewFrame,int pNewWidth,int pNewHeight);
AVFormatContext *in_fmt_
AVStream *in_video_
AVFormatContext *out_fmt_
AVStream *out_video_
char *rtsp_
char *name_
int64_t pts,
int video_
int frame_
具体函数实现
#include &rtsp2mp4.h&
/*卸载函数:释放内存*/
int rtsp2mp4 :: unit()
if (out_fmt_ctx && !(out_fmt_ctx-&oformat-&flags & AVFMT_NOFILE)) {
avio_close(out_fmt_ctx-&pb);
avcodec_close(out_fmt_ctx-&streams[0]-&codec);
av_freep(&out_fmt_ctx-&streams[0]-&codec);
av_freep(&out_fmt_ctx-&streams[0]);
avformat_free_context(in_fmt_ctx);
avformat_free_context(out_fmt_ctx);
/*输入读取文件的地址,这是读取的是rtsp流地址,在之后的进行一点修改就可以转码普通格式*/
int rtsp2mp4 :: inputFile(char *file)
rtsp_url =
/*输出文件名*/
int rtsp2mp4 :: outputFile(char *file)
name_path =
/*FFmpeg组件的注册与一些初始化操作*/
int rtsp2mp4 :: init()
av_register_all();
avformat_network_init();
in_fmt_ctx = NULL;
out_fmt_ctx = NULL;
width = 800;
height = 600;
video_index = -1;
pts = -3600;
dts = -3600;
frame_index = 0;
/*准备和一些参数的设定*/
int rtsp2mp4 :: prepare()
int ret = 0;
/*下面四行为rtsp流通过TCP协议读取的操作*/
AVDictionary *avdic=NULL;
char option_key[]=&rtsp_transport&;
char option_value[]=&tcp&;
av_dict_set(&avdic,option_key,option_value,0);
/*如果输入的不是rtsp流地址,把avformat_open_input(&in_fmt_ctx,rtsp_url,NULL,&avdic)
avformat_open_input(&in_fmt_ctx,rtsp_url,NULL,NULL);*/
if(ret = avformat_open_input(&in_fmt_ctx,rtsp_url,NULL,&avdic) & 0) {
printf(&无法打开文件\n&);
if(ret = avformat_find_stream_info(in_fmt_ctx,0) & 0) {
printf(&无法找到流信息\n&);
av_dump_format(in_fmt_ctx,0,rtsp_url,0);
for(unsigned i =0;i & in_fmt_ctx-&nb_i ++) {
if(in_fmt_ctx-&streams[i]-&codec-&coder_type == AVMEDIA_TYPE_VIDEO) {
in_video_stream = in_fmt_ctx-&streams[i];
video_index =
/*寻找与打开解码器*/
if(0 & avcodec_open2(in_video_stream-&codec, avcodec_find_decoder(in_video_stream-&codec-&codec_id), NULL)) {
printf(&无法找到解码器\n&);
/*创建输出文件*/
avformat_alloc_output_context2(&out_fmt_ctx,NULL,NULL,name_path);
if(!out_fmt_ctx) {
printf(&不能创建输出文件\n&);
ret = AVERROR_UNKNOWN;
/*创建输出流*/
out_video_stream = avformat_new_stream(out_fmt_ctx, NULL);
if (!out_video_stream) {
printf(&无法创造新流\n&);
ret = AVERROR_UNKNOWN;
/*参数设置&开始*/
out_video_stream-&codec-&codec = avcodec_find_encoder(out_fmt_ctx-&oformat-&video_codec);
out_video_stream-&codec-&height =
out_video_stream-&codec-&width =
out_video_stream-&codec-&time_base.num = in_fmt_ctx-&streams[i]-&avg_frame_rate.
out_video_stream-&codec-&time_base.den = in_fmt_ctx-&streams[i]-&avg_frame_rate.
out_video_stream-&codec-&sample_ect_ratio = in_video_stream-&codec-&sample_aspect_
out_video_stream-&codec-&pix_fmt = in_video_stream-&codec-&pix_
out_video_stream-&codec-&pix_fmt = out_video_stream-&codec-&codec-&pix_fmts[0];
out_video_stream-&codec-&bit_rate = 500000;
out_video_stream-&codec-&codec_type = AVMEDIA_TYPE_VIDEO;
/*按照命令输出的文件修改*/
out_video_stream-&codec-&thread_count = 6;
out_video_stream-&codec-&max_b_frames = 3;
out_video_stream-&codec-&b_frame_strategy = 1;
out_video_stream-&codec-&gop_size = 250;
out_video_stream-&codec-&keyint_min = 25;
out_video_stream-&codec-&trellis = 1;
out_video_stream-&codec-&me_subpel_quality = 7;
out_video_stream-&codec-&refs = 3;
out_video_stream-&codec-&me_method = ME_HEX;
out_video_stream-&codec-&coder_type = FF_CODER_TYPE_AC;
out_video_stream-&codec-&me_range = 16;
out_video_stream-&codec-&max_qdiff = 4;
out_video_stream-&codec-&qmin = 0;
out_video_stream-&codec-&qmax = 69;
out_video_stream-&codec-&qcompress = 0.6;
out_video_stream-&codec-&flags |= CODEC_FLAG_GLOBAL_HEADER;
/*参数设置&结束*/
/*寻找编码器*/
if (!out_video_stream-&codec-&codec) {
printf(&找不到编码器\n&);
ret = AVERROR_UNKNOWN;
AVDictionary *param = 0;
av_dict_set(?m, &profile&, &baseline&, 0);
av_opt_set(out_video_stream-&codec-&priv_data, &preset&, &fast&, 0);
av_opt_set(out_video_stream-&codec-&priv_data, &tune&, &zerolatency&, 0);
/*打开编码器*/
if ((aodec_open2(out_video_stream-&codec, out_video_stream-&codec-&codec, NULL)) & 0) {
printf(&无法打开编码器\n&);
ret = AVERROR_UNKNOWN;
if(video_index == -1) {
printf(&没找到视频流\n&);
ret = AVERROR_UNKNOWN;
/*转码之前的准备,写头文件等*/
int rtsp2mp4 :: transStart()
prepare();
avio_open(&out_fmt_ctx-&pb,name_path,AVIO_FLAG_WRITE);
AVDictionary *opt = NULL;
av_dict_set_int(&opt,&video_track_timescale&,25,0);
avformat_write_header(out_fmt_ctx,&opt);
/*解码一帧*/
int rtsp2mp4 :: decode()
AVFrame *old_
old_frame = av_frame_alloc();
frame = av_frame_alloc();
int size = avpicture_get_size(out_video_stream-&codec-&pix_fmt, width,height);
uint8_t* frame_buf = (uint8_t *)av_malloc(size);
avpicture_fill((AVPicture *)frame, frame_buf, out_video_stream-&codec-&pix_fmt, width, height);
frame-&format = AV_PIX_FMT_YUV420P;
frame-&width =
frame-&height =
AVPacket *pkt_in = NULL;
int y_size = out_video_stream-&codec-&width * out_video_stream-&codec-&
pkt_in = (AVPacket *)av_malloc(sizeof(AVPacket));
av_new_packet(pkt_in, y_size);
int ret = 0;
got_frame = -1;
if (av_read_frame(in_fmt_ctx, pkt_in) & 0) {
av_frame_free(&frame);
av_frame_free(&old_frame);
ret = AVERROR_UNKNOWN;
av_free_packet(pkt_in);
if(pkt_in-&stream_index == video_index) {
if (avcodec_decode_video2(in_fmt_ctx-&streams[video_index]-&codec, old_frame, &got_frame, pkt_in) & 0) {
av_frame_free(&frame);
av_frame_free(&old_frame);
av_free_packet(pkt_in);
printf(&无法解码\n&);
ret = AVERROR_UNKNOWN;
printf(&got_frame:%d\n&,got_frame);
av_free_packet(pkt_in);
if(got_frame & 0) {
ResolutionChange(in_fmt_ctx-&streams[video_index]-&codec,old_frame,frame,width,height);
av_frame_free(&old_frame);
/*编码一帧*/
int rtsp2mp4 :: encode()
AVPacket pkt_
av_init_packet(&pkt_out);
int ret = 0;
got_picture = -1;
if (got_frame & 0) {
frame-&pts = i++;
pkt_out.data = NULL;
pkt_out.size = 0;
if (avcodec_encode_video2(out_video_stream-&codec, &pkt_out, frame, &got_picture) & 0) {
av_free_packet(&pkt_out);
av_frame_free(&frame);
printf(&无法编码\n&);
ret = AVERROR_UNKNOWN;
if (got_picture & 0) {
printf(&成功编码 %5d\t大小:%5d\n&,frame_index,pkt_out.size);
printf(&dts:%10d\tpts:%6d\n&,pkt_out.dts,pkt_out.pts);
frame_index++;
av_write_frame(out_fmt_ctx, &pkt_out);
av_free_packet(&pkt_out);
av_free_packet(&pkt_out);
av_frame_free(&frame);
/*释放内存内的帧并且写尾文件*/
int rtsp2mp4 :: transEnd()
int ret = flush_encoder(out_fmt_ctx, video_index);
if (ret & 0) {
printf(&释放失败\n&);
return -1;
avformat_close_input(&in_fmt_ctx);
av_write_trailer(out_fmt_ctx);
int rtsp2mp4 :: flush_encoder(AVFormatContext *fmt_ctx, unsigned int stream_index)
AVPacket enc_
if (!(fmt_ctx-&streams[stream_index]-&codec-&codec-&capabilities &CODEC_CAP_DELAY)) {
while(1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_encode_video2(fmt_ctx-&streams[stream_index]-&codec, &enc_pkt, NULL, &got_frame);
av_frame_free(NULL);
if (ret & 0) {
if (!got_frame) {
printf(&释放编码数据: 成功编码 1 帧!\tsize:%5d\n&,enc_pkt.size);
ret = av_write_frame(fmt_ctx, &enc_pkt);
if (ret & 0)
void rtsp2mp4 :: ResolutionChange(AVCodecContext *pCodecCtx,AVFrame *pFrame,AVFrame *pNewFrame,int pNewWidth,int pNewHeight)
pNewFrame-&linesize[0] = pNewW
pNewFrame-&linesize[1] = pNewWidth/2;
pNewFrame-&linesize[2] = pNewWidth/2;
//用 sws_getContext函数 得到 sws_scale函数 运行的上下文,之后用 sws_scale函数 将图形缩放
struct SwsContext *pSwsCtx = NULL;
pSwsCtx = sws_getContext(pCodecCtx-&width,pCodecCtx-&height,AV_PIX_FMT_YUV420P,pNewWidth,pNewHeight,AV_PIX_FMT_YUV420P,SWS_SINC,NULL,NULL,NULL);
if(pSwsCtx == NULL)
sws_scale(pSwsCtx,pFrame-&data,pFrame-&linesize,0,pCodecCtx-&height,pNewFrame-&data,pNewFrame-&linesize);
sws_freeContext(pSwsCtx);
(window.slotbydup=window.slotbydup || []).push({
id: '2467140',
container: s,
size: '1000,90',
display: 'inlay-fix'
(window.slotbydup=window.slotbydup || []).push({
id: '2467141',
container: s,
size: '1000,90',
display: 'inlay-fix'
(window.slotbydup=window.slotbydup || []).push({
id: '2467143',
container: s,
size: '1000,90',
display: 'inlay-fix'
(window.slotbydup=window.slotbydup || []).push({
id: '2467148',
container: s,
size: '1000,90',
display: 'inlay-fix'}

我要回帖

更多关于 ffmpeg h264解码 yuv 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信