当前位置: 代码迷 >> 多媒体/流媒体开发 >> ffmpeg 线程 decode 有关问题
  详细解决方案

ffmpeg 线程 decode 有关问题

热度:10858   发布时间:2013-02-26 00:00:00.0
ffmpeg 线程 decode 问题
有人遇到过吗,我在另一个线程 avcodec_decode_video 结果第一帧检测出来是 B 帧,事实上应该是 I 帧,造成后面的都花屏了。

如果直接在 avcodec_read_frame 后面调用的话是正常的,但是我单独单出来一个线程就不行了。只有 ts 的特别的文件是这样子的。

是我用错了吗,还是别的什么问题?



void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;

// Open file
sprintf(szFilename, "c:\\frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;

// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);

// Write pixel data
for(y=0; y<height; y++)
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

// Close file
fclose(pFile);
}

int testcase(const char * file)
{
int i;
AVCodecContext *pCodecCtx;
AVFrame *pFrame;
AVCodec *pCodec;
AVFormatContext *pFormatCtx;

av_register_all();

// Open video file
if(av_open_input_file(&pFormatCtx, file, NULL, 0, NULL) != 0)
return -1; // Couldn't open file
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information

// Find the first video stream
int videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream

// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;

// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec

// Allocate video frame
pFrame=avcodec_alloc_frame();

// Allocate an AVFrame structure
AVFrame *pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;

uint8_t *buffer;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);

int frameFinished;
AVPacket packet;

i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);

// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
// img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, 
// (AVPicture*)pFrame, pCodecCtx->pix_fmt, 
// pCodecCtx->width, pCodecCtx->height);

static struct SwsContext* m_convertCtx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_RGB24,
SWS_POINT,
NULL,
NULL,
NULL);

// Convert the image from its native format to RGB
sws_scale(m_convertCtx,
pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize);

// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, 
pCodecCtx->height, i);
}
}
else
  相关解决方案