2013-07-10 69 views
11

所以我一直试图去通过下面的教程上的ffmpeg:http://dranger.com/ffmpeg/tutorial02.htmlSDL2.0替代的SDL_Overlay

然而,当我尝试使用gcc来编译,我得到以下的输出:

root:/Users/mbrodeur/Downloads/HACKATHON CONTENT/Tutorials-> gcc -o tutorial02 tutorial02.c -lavutil -lavformat -lavcodec -lz -lavutil -lm -lswscale -D_THREAD_SAFE -lSDL2 
tutorial02.c: In function ‘main’: 
tutorial02.c:41: error: ‘SDL_Overlay’ undeclared (first use in this function) 
tutorial02.c:41: error: (Each undeclared identifier is reported only once 
tutorial02.c:41: error: for each function it appears in.) 
tutorial02.c:41: error: ‘bmp’ undeclared (first use in this function) 
tutorial02.c:98: warning: assignment makes pointer from integer without a cast 
tutorial02.c:110: error: ‘SDL_YV12_OVERLAY’ undeclared (first use in this function) 

现在,我读到SDL_Overlay已不再用于SDL2,因此存在这个问题。我一直在四处寻找,但似乎找不到任何有用的东西。是否有SDL_Overlay的替代品?有必要吗?

SDL_Overlay在下文上下文中使用:

SDL_Overlay  *bmp; 
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, 
         SDL_YV12_OVERLAY, screen); 
+0

无关,但您可能想查看教程的[更新后的源代码](https://github.com/chelyaev/ffmpeg-tutorial)版本。 – LordNeckbeard

+0

谢谢,我不知道那个页面存在。不幸的是,更新后的源代码仍然无法解决我的问题。 – Josh

+1

你有没有更新教程,并获得代码与sdl2?如果有,你可以分享吗?提前致谢。 :D – jofra

回答

5

尝试SDL_CreateTexture()与您的解码帧的布局和平面两者均匹配的YUV像素格式。

或使用libswscaleffmpeg的YUV缓冲区转换为RGB。

EDIT:SDL2> = 2.0.1具有SDL_UpdateYUVTexture()用于更新平面YUV纹理,这样你就不必手动聚结libav的缓冲器任何更多。

+0

“这是一个相当慢的功能,旨在用于静态纹理。”似乎不太适合视频? –

+0

@MikeVersteeg:你在读什么? – genpfault

+0

在SDL_UpdateTexture(https://wiki.libsdl.org/SDL_UpdateTexture)中,链接到上面的链接(我假设它是从SDL_UpdateTexture派生的)。我是OpenGL的新手,但认为写纹理是最快的方法之一,所以我对你感到很惊讶。也许SDL的实现本身很慢?无论如何,这似乎并不像这个问题的正确答案,因为我认为OP想要显示视频。 –

1

我也遇到了这个问题。我使用了SDL 1.2,因为我不知道如何替换SDL2.0中的SDL_Overlay。如果你是在Mac 10.10,您可以在/src/video/x11/SDL_x11sym.h使用这个补丁 http://www.emaculation.com/doku.php/compiling_sheepshaver_basilisk#tidbits

patch < no-CGDirectPaletteRef.patch 然后,替换线168和169这

SDL_X11_SYM(int,_XData32,(Display *dpy,register _Xconst long *data,unsigned len),(dpy,data,len),return) SDL_X11_SYM(void,_XRead32,(Display *dpy,register long *data,long len),(dpy,data,len), return)

它和我一起工作。

14

我已经更新了教程以使用SDL 2.0.1。它用YV12格式的SDL_Texture替换SDL_Overlay。

int main(int argc, char *argv[]) { 
    AVFormatContext *pFormatCtx = NULL; 
    int videoStream; 
    unsigned i; 
    AVCodecContext *pCodecCtxOrig = NULL; 
    AVCodecContext *pCodecCtx = NULL; 
    AVCodec *pCodec = NULL; 
    AVFrame *pFrame = NULL; 
    AVPacket packet; 
    int frameFinished; 
    struct SwsContext *sws_ctx = NULL; 
    SDL_Event event; 
    SDL_Window *screen; 
    SDL_Renderer *renderer; 
    SDL_Texture *texture; 
    Uint8 *yPlane, *uPlane, *vPlane; 
    size_t yPlaneSz, uvPlaneSz; 
    int uvPitch; 

    if (argc < 2) { 
     fprintf(stderr, "Usage: test <file>\n"); 
     exit(1); 
    } 
    // Register all formats and codecs 
    av_register_all(); 

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 
     fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 
     exit(1); 
    } 

    // Open video file 
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) 
     return -1; // Couldn't open file 

    // Retrieve stream information 
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) 
     return -1; // Couldn't find stream information 

    // Dump information about file onto standard error 
    av_dump_format(pFormatCtx, 0, argv[1], 0); 

    // Find the first video stream 
    videoStream = -1; 
    for (i = 0; i < pFormatCtx->nb_streams; i++) 
     if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 
      videoStream = i; 
      break; 
     } 
    if (videoStream == -1) 
     return -1; // Didn't find a video stream 

    // Get a pointer to the codec context for the video stream 
    pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 
    // Find the decoder for the video stream 
    pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 
    if (pCodec == NULL) { 
     fprintf(stderr, "Unsupported codec!\n"); 
     return -1; // Codec not found 
    } 

    // Copy context 
    pCodecCtx = avcodec_alloc_context3(pCodec); 
    if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 
     fprintf(stderr, "Couldn't copy codec context"); 
     return -1; // Error copying codec context 
    } 

    // Open codec 
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) 
     return -1; // Could not open codec 

    // Allocate video frame 
    pFrame = av_frame_alloc(); 

    // Make a screen to put our video 
    screen = SDL_CreateWindow(
      "FFmpeg Tutorial", 
      SDL_WINDOWPOS_UNDEFINED, 
      SDL_WINDOWPOS_UNDEFINED, 
      pCodecCtx->width, 
      pCodecCtx->height, 
      0 
     ); 

    if (!screen) { 
     fprintf(stderr, "SDL: could not create window - exiting\n"); 
     exit(1); 
    } 

    renderer = SDL_CreateRenderer(screen, -1, 0); 
    if (!renderer) { 
     fprintf(stderr, "SDL: could not create renderer - exiting\n"); 
     exit(1); 
    } 

    // Allocate a place to put our YUV image on that screen 
    texture = SDL_CreateTexture(
      renderer, 
      SDL_PIXELFORMAT_YV12, 
      SDL_TEXTUREACCESS_STREAMING, 
      pCodecCtx->width, 
      pCodecCtx->height 
     ); 
    if (!texture) { 
     fprintf(stderr, "SDL: could not create texture - exiting\n"); 
     exit(1); 
    } 

    // initialize SWS context for software scaling 
    sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 
      pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 
      PIX_FMT_YUV420P, 
      SWS_BILINEAR, 
      NULL, 
      NULL, 
      NULL); 

    // set up YV12 pixel array (12 bits per pixel) 
    yPlaneSz = pCodecCtx->width * pCodecCtx->height; 
    uvPlaneSz = pCodecCtx->width * pCodecCtx->height/4; 
    yPlane = (Uint8*)malloc(yPlaneSz); 
    uPlane = (Uint8*)malloc(uvPlaneSz); 
    vPlane = (Uint8*)malloc(uvPlaneSz); 
    if (!yPlane || !uPlane || !vPlane) { 
     fprintf(stderr, "Could not allocate pixel buffers - exiting\n"); 
     exit(1); 
    } 

    uvPitch = pCodecCtx->width/2; 
    while (av_read_frame(pFormatCtx, &packet) >= 0) { 
     // Is this a packet from the video stream? 
     if (packet.stream_index == videoStream) { 
      // Decode video frame 
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 

      // Did we get a video frame? 
      if (frameFinished) { 
       AVPicture pict; 
       pict.data[0] = yPlane; 
       pict.data[1] = uPlane; 
       pict.data[2] = vPlane; 
       pict.linesize[0] = pCodecCtx->width; 
       pict.linesize[1] = uvPitch; 
       pict.linesize[2] = uvPitch; 

       // Convert the image into YUV format that SDL uses 
       sws_scale(sws_ctx, (uint8_t const * const *) pFrame->data, 
         pFrame->linesize, 0, pCodecCtx->height, pict.data, 
         pict.linesize); 

       SDL_UpdateYUVTexture(
         texture, 
         NULL, 
         yPlane, 
         pCodecCtx->width, 
         uPlane, 
         uvPitch, 
         vPlane, 
         uvPitch 
        ); 

       SDL_RenderClear(renderer); 
       SDL_RenderCopy(renderer, texture, NULL, NULL); 
       SDL_RenderPresent(renderer); 

      } 
     } 

     // Free the packet that was allocated by av_read_frame 
     av_free_packet(&packet); 
     SDL_PollEvent(&event); 
     switch (event.type) { 
     case SDL_QUIT: 
      SDL_DestroyTexture(texture); 
      SDL_DestroyRenderer(renderer); 
      SDL_DestroyWindow(screen); 
      SDL_Quit(); 
      exit(0); 
      break; 
     default: 
      break; 
     } 

    } 

    // Free the YUV frame 
    av_frame_free(&pFrame); 
    free(yPlane); 
    free(uPlane); 
    free(vPlane); 

    // Close the codec 
    avcodec_close(pCodecCtx); 
    avcodec_close(pCodecCtxOrig); 

    // Close the video file 
    avformat_close_input(&pFormatCtx); 

    return 0; 
} 
+1

你是否更新过其他​​教程?分享心灵?我使用的是SDL 2.0,因为我无法在1.2下载文件中找到必要的内容。 +1的努力。 – 2015-10-15 01:08:06