2016-03-09 194 views
1

我编译并测试了tutorialhere,它工作得很好。在我尝试编辑教程以读取/转换帧到grayscale后。我刚刚将pFrameRGB更改为pFrameGray,PIX_FMT_RGB24PIX_FMT_GRAY16,并只保存了第200帧。它编译并运行,但图像不显示预期。怎么了?如何使用ffmpeg-library从视频中提取灰度图像?

的图像: image

的编辑代码:

#include <libavcodec/avcodec.h> 
#include <libavformat/avformat.h> 
#include <libswscale/swscale.h> 

#include <stdio.h> 

// compatibility with newer API 
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1) 
#define av_frame_alloc avcodec_alloc_frame 
#define av_frame_free avcodec_free_frame 
#endif 

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) { 
    FILE *pFile; 
    char szFilename[32]; 
    int y; 

    // Open file 
    sprintf(szFilename, "frame%d.ppm", iFrame); 
    pFile=fopen(szFilename, "wb"); 
    if(pFile==NULL) 
    return; 

    // Write header 
    fprintf(pFile, "P6\n%d %d\n255\n", width, height); 

    // Write pixel data 
    for(y=0; y<height; y++) 
    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile); 

    // Close file 
    fclose(pFile); 
} 

int main(int argc, char *argv[]) { 
    // Initalizing these to NULL prevents segfaults! 
    AVFormatContext *pFormatCtx = NULL; 
    int    i, videoStream; 
    AVCodecContext *pCodecCtxOrig = NULL; 
    AVCodecContext *pCodecCtx = NULL; 
    AVCodec   *pCodec = NULL; 
    AVFrame   *pFrame = NULL; 
    AVFrame   *pFrameGRAY = NULL; 
    AVPacket   packet; 
    int    frameFinished; 
    int    numBytes; 
    uint8_t   *buffer = NULL; 
    struct SwsContext *sws_ctx = NULL; 

    if(argc < 2) { 
    printf("Please provide a movie file\n"); 
    return -1; 
    } 
    // Register all formats and codecs 
    av_register_all(); 

    // Open video file 
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 
    return -1; // Couldn't open file 

    // Retrieve stream information 
    if(avformat_find_stream_info(pFormatCtx, NULL)<0) 
    return -1; // Couldn't find stream information 

    // Dump information about file onto standard error 
    av_dump_format(pFormatCtx, 0, argv[1], 0); 

    // Find the first video stream 
    videoStream=-1; 
    for(i=0; i<pFormatCtx->nb_streams; i++) 
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 
     videoStream=i; 
     break; 
    } 
    if(videoStream==-1) 
    return -1; // Didn't find a video stream 

    // Get a pointer to the codec context for the video stream 
    pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec; 
    // Find the decoder for the video stream 
    pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id); 
    if(pCodec==NULL) { 
    fprintf(stderr, "Unsupported codec!\n"); 
    return -1; // Codec not found 
    } 
    // Copy context 
    pCodecCtx = avcodec_alloc_context3(pCodec); 
    if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 
    fprintf(stderr, "Couldn't copy codec context"); 
    return -1; // Error copying codec context 
    } 

    // Open codec 
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) 
    return -1; // Could not open codec 

    // Allocate video frame 
    pFrame=av_frame_alloc(); 

    // Allocate an AVFrame structure 
    pFrameGRAY=av_frame_alloc(); 
    if(pFrameGRAY==NULL) 
    return -1; 

    // Determine required buffer size and allocate buffer 
    numBytes=avpicture_get_size(PIX_FMT_GRAY16, pCodecCtx->width, 
        pCodecCtx->height); 
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 

    // Assign appropriate parts of buffer to image planes in pFrameGRAY 
    // Note that pFrameGRAY is an AVFrame, but AVFrame is a superset 
    // of AVPicture 
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY16, 
     pCodecCtx->width, pCodecCtx->height); 

    // initialize SWS context for software scaling 
    sws_ctx = sws_getContext(pCodecCtx->width, 
       pCodecCtx->height, 
       pCodecCtx->pix_fmt, 
       pCodecCtx->width, 
       pCodecCtx->height, 
       PIX_FMT_GRAY16, 
       SWS_BILINEAR, 
       NULL, 
       NULL, 
       NULL 
       ); 

    // Read frames and save first five frames to disk 
    i=0; 
    while(av_read_frame(pFormatCtx, &packet)>=0) { 
    // Is this a packet from the video stream? 
    if(packet.stream_index==videoStream) { 
     // Decode video frame 
     avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 

     // Did we get a video frame? 
     if(frameFinished) { 
    // Convert the image from its native format to GRAY 
    sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, 
      pFrame->linesize, 0, pCodecCtx->height, 
      pFrameGRAY->data, pFrameGRAY->linesize); 

    // Save the frame to disk 
    if(++i==200) 
     SaveFrame(pFrameGRAY, pCodecCtx->width, pCodecCtx->height, 
      i); 
     } 
    } 

    // Free the packet that was allocated by av_read_frame 
    av_free_packet(&packet); 
    } 

    // Free the GRAY image 
    av_free(buffer); 
    av_frame_free(&pFrameGRAY); 

    // Free the YUV frame 
    av_frame_free(&pFrame); 

    // Close the codecs 
    avcodec_close(pCodecCtx); 
    avcodec_close(pCodecCtxOrig); 

    // Close the video file 
    avformat_close_input(&pFormatCtx); 

    return 0; 
} 
+0

它显示了什么? –

+0

更新了我的问题并添加了一张图片。图像扭曲。左侧部分添加到右侧,但尺寸仍然正确。既不是灰度。 – user1587451

回答

2

你的主要问题是在这里:

fprintf(pFile, "P6\n%d %d\n255\n", width, height); 

据对PPM一些信息,我发现:http://paulbourke.net/dataformats/ppm/你用标识符P6写入PPM标题,表示“rgb颜色”。如果您制作灰度图像,则需要使用P5

同样,您使用ffmpeg的像素格式PIX_FMT_GRAY16,但是您将它解释为字节。您可能打算使用PIX_FMT_GRAY8代替。如果您的输出中实际需要16位灰度,则应将PPM标头中的第三个数字从255更改为65535.

+0

谢谢,我已经像你写的那样编辑过,但现在图像的内部部分被放大到图片中。其他图像部分被裁剪。 – user1587451

+1

@ user1587451只需在'SaveFrame'函数的'fwrite'中将'3 * width'更改为'width'。这只是为了理解为什么它是必要的。 –

相关问题