This commit is to support even newer versions of ffmpeg.

This commit is contained in:
rdb 2009-07-03 12:02:00 +00:00
parent 3c2c9340d6
commit 2d14a28244
7 changed files with 34 additions and 42 deletions

View File

@ -728,7 +728,7 @@
#defer HAVE_OPENCV $[libtest $[OPENCV_LPATH],$[OPENCV_LIBS]] #defer HAVE_OPENCV $[libtest $[OPENCV_LPATH],$[OPENCV_LIBS]]
// Is FFMPEG installed, and where? // Is FFMPEG installed, and where?
#define FFMPEG_IPATH /usr/include/ffmpeg /usr/include/libavcodec /usr/include/libavformat /usr/include/libavutil /usr/include/libswscale #define FFMPEG_IPATH /usr/include/ffmpeg
#define FFMPEG_LPATH #define FFMPEG_LPATH
#define FFMPEG_LIBS $[if $[WINDOWS_PLATFORM],libavcodec.lib libavformat.lib libavutil.lib libgcc.lib libswscale.lib,avcodec avformat avutil swscale] #define FFMPEG_LIBS $[if $[WINDOWS_PLATFORM],libavcodec.lib libavformat.lib libavutil.lib libgcc.lib libswscale.lib,avcodec avformat avutil swscale]
#defer HAVE_FFMPEG $[libtest $[FFMPEG_LPATH],$[FFMPEG_LIBS]] #defer HAVE_FFMPEG $[libtest $[FFMPEG_LPATH],$[FFMPEG_LIBS]]

View File

@ -237,7 +237,7 @@ update_frame(int frame) {
source -= source_row_width; source -= source_row_width;
} }
// Next best option, we're a 4 component alpha video on one stream // Next best option, we're a 4 component alpha video on one stream
} else if (page._color._codec_context->pix_fmt==PIX_FMT_RGBA32) { } else if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) {
int source_row_width= page._color._codec_context->width * 4; int source_row_width= page._color._codec_context->width * 4;
unsigned char * source=(unsigned char *)page._color._frame_out->data[0] unsigned char * source=(unsigned char *)page._color._frame_out->data[0]
+source_row_width*(get_video_height()-1); +source_row_width*(get_video_height()-1);
@ -356,7 +356,7 @@ do_read_one(const Filename &fullpath, const Filename &alpha_fullpath,
_fullpath = fullpath; _fullpath = fullpath;
_alpha_fullpath = alpha_fullpath; _alpha_fullpath = alpha_fullpath;
} }
if (page._color._codec_context->pix_fmt==PIX_FMT_RGBA32) { if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) {
// There had better not be an alpha interleave here. // There had better not be an alpha interleave here.
nassertr(alpha_fullpath.empty(), false); nassertr(alpha_fullpath.empty(), false);
@ -519,8 +519,7 @@ get_frame_data(int frame_number) {
if (err < 0) { if (err < 0) {
return false; return false;
} }
avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet);
packet.size);
av_free_packet(&packet); av_free_packet(&packet);
++coming_from; ++coming_from;
} }
@ -559,8 +558,7 @@ get_frame_data(int frame_number) {
break; break;
} }
avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet);
packet.size);
av_free_packet(&packet); av_free_packet(&packet);
} while (true); } while (true);
@ -576,8 +574,7 @@ get_frame_data(int frame_number) {
// Is this a packet from the video stream? // Is this a packet from the video stream?
if (packet.stream_index == _stream_number) { if (packet.stream_index == _stream_number) {
// Decode video frame // Decode video frame
avcodec_decode_video(_codec_context, _frame, &frame_finished, avcodec_decode_video2(_codec_context, _frame, &frame_finished, &packet);
packet.data, packet.size);
// Did we get a video frame? // Did we get a video frame?
if (frame_finished) { if (frame_finished) {
@ -588,10 +585,10 @@ get_frame_data(int frame_number) {
// this code. I have no idea if it works well or not, but // this code. I have no idea if it works well or not, but
// it seems to compile and run without crashing. // it seems to compile and run without crashing.
PixelFormat dst_format; PixelFormat dst_format;
if (_codec_context->pix_fmt != PIX_FMT_RGBA32) { if (_codec_context->pix_fmt != PIX_FMT_RGB32) {
dst_format = PIX_FMT_BGR24; dst_format = PIX_FMT_BGR24;
} else { } else {
dst_format = PIX_FMT_RGBA32; dst_format = PIX_FMT_RGB32;
} }
struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height, struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height,
_codec_context->pix_fmt, _codec_context->width, _codec_context->height, _codec_context->pix_fmt, _codec_context->width, _codec_context->height,
@ -601,13 +598,13 @@ get_frame_data(int frame_number) {
0, _codec_context->height, _frame_out->data, _frame_out->linesize); 0, _codec_context->height, _frame_out->data, _frame_out->linesize);
sws_freeContext(convert_ctx); sws_freeContext(convert_ctx);
#else #else
if (_codec_context->pix_fmt != PIX_FMT_RGBA32) { if (_codec_context->pix_fmt != PIX_FMT_RGB32) {
img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24,
(AVPicture *)_frame, _codec_context->pix_fmt, (AVPicture *)_frame, _codec_context->pix_fmt,
_codec_context->width, _codec_context->height); _codec_context->width, _codec_context->height);
} else { // _codec_context->pix_fmt == PIX_FMT_RGBA32 } else { // _codec_context->pix_fmt == PIX_FMT_RGB32
img_convert((AVPicture *)_frame_out, PIX_FMT_RGBA32, img_convert((AVPicture *)_frame_out, PIX_FMT_RGB32,
(AVPicture *)_frame, _codec_context->pix_fmt, (AVPicture *)_frame, _codec_context->pix_fmt,
_codec_context->width, _codec_context->height); _codec_context->width, _codec_context->height);
} }
@ -698,7 +695,7 @@ read(const Filename &filename) {
_frame = avcodec_alloc_frame(); _frame = avcodec_alloc_frame();
if (_codec_context->pix_fmt != PIX_FMT_RGBA32) { if (_codec_context->pix_fmt != PIX_FMT_RGB32) {
_frame_out = avcodec_alloc_frame(); _frame_out = avcodec_alloc_frame();
if (_frame_out == NULL) { if (_frame_out == NULL) {
grutil_cat.error() grutil_cat.error()
@ -727,12 +724,12 @@ read(const Filename &filename) {
} }
// Determine required buffer size and allocate buffer // Determine required buffer size and allocate buffer
_image_size_bytes = avpicture_get_size(PIX_FMT_RGBA32, _codec_context->width, _image_size_bytes = avpicture_get_size(PIX_FMT_RGB32, _codec_context->width,
_codec_context->height); _codec_context->height);
_raw_data = new uint8_t[_image_size_bytes]; _raw_data = new uint8_t[_image_size_bytes];
// Assign appropriate parts of buffer to image planes in _frameRGB // Assign appropriate parts of buffer to image planes in _frameRGB
avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_RGBA32, avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_RGB32,
_codec_context->width, _codec_context->height); _codec_context->width, _codec_context->height);
} }
// We could put an option here for single channel frames. // We could put an option here for single channel frames.

View File

@ -21,10 +21,10 @@
#include "videoTexture.h" #include "videoTexture.h"
extern "C" { extern "C" {
#include "avcodec.h" #include "libavcodec/avcodec.h"
#include "avformat.h" #include "libavformat/avformat.h"
#ifdef HAVE_SWSCALE #ifdef HAVE_SWSCALE
#include "swscale.h" #include "libswscale/swscale.h"
#endif #endif
} }

View File

@ -18,7 +18,7 @@
#ifdef HAVE_FFMPEG #ifdef HAVE_FFMPEG
extern "C" { extern "C" {
#include "avcodec.h" #include "libavcodec/avcodec.h"
} }
#endif #endif

View File

@ -16,8 +16,8 @@
#include "ffmpegAudioCursor.h" #include "ffmpegAudioCursor.h"
extern "C" { extern "C" {
#include "avcodec.h" #include "libavcodec/avcodec.h"
#include "avformat.h" #include "libavformat/avformat.h"
} }
TypeHandle FfmpegAudioCursor::_type_handle; TypeHandle FfmpegAudioCursor::_type_handle;
@ -190,9 +190,13 @@ reload_buffer() {
return; return;
} else if (_packet_size > 0) { } else if (_packet_size > 0) {
int bufsize = _buffer_size * 2; int bufsize = _buffer_size * 2;
int len = avcodec_decode_audio2(_audio_ctx, _buffer, &bufsize, AVPacket pkt;
_packet_data, _packet_size); av_init_packet(&pkt);
movies_debug("avcodec_decode_audio2 returned " << len); pkt.data = _packet_data;
pkt.size = _packet_size;
int len = avcodec_decode_audio3(_audio_ctx, _buffer, &bufsize, &pkt);
movies_debug("avcodec_decode_audio3 returned " << len);
av_free_packet(&pkt); // Not sure about this
if (len <= 0) { if (len <= 0) {
break; break;
} }

View File

@ -17,25 +17,15 @@
#include "ffmpegVideoCursor.h" #include "ffmpegVideoCursor.h"
#include "config_movies.h" #include "config_movies.h"
extern "C" { extern "C" {
#include "avcodec.h" #include "libavcodec/avcodec.h"
#include "avformat.h" #include "libavformat/avformat.h"
#ifdef HAVE_SWSCALE #ifdef HAVE_SWSCALE
#include "swscale.h" #include "libswscale/swscale.h"
#endif #endif
} }
#include "pStatCollector.h" #include "pStatCollector.h"
#include "pStatTimer.h" #include "pStatTimer.h"
// Earlier versions of ffmpeg didn't define this symbol.
#ifndef PIX_FMT_BGRA
#ifdef WORDS_BIGENDIAN
#define PIX_FMT_BGRA PIX_FMT_BGR32_1
#else
#define PIX_FMT_BGRA PIX_FMT_RGBA32
#endif
#endif // PIX_FMT_BGRA
TypeHandle FfmpegVideoCursor::_type_handle; TypeHandle FfmpegVideoCursor::_type_handle;
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
@ -240,8 +230,7 @@ fetch_frame() {
int finished = 0; int finished = 0;
_last_start = _packet_time; _last_start = _packet_time;
while (!finished && _packet->data) { while (!finished && _packet->data) {
avcodec_decode_video(_video_ctx, _frame, avcodec_decode_video2(_video_ctx, _frame, &finished, _packet);
&finished, _packet->data, _packet->size);
fetch_packet(_last_start + 1.0); fetch_packet(_last_start + 1.0);
} }
_next_start = _packet_time; _next_start = _packet_time;

View File

@ -18,7 +18,9 @@
#include "config_movies.h" #include "config_movies.h"
#include "ffmpegVirtualFile.h" #include "ffmpegVirtualFile.h"
#include "virtualFileSystem.h" #include "virtualFileSystem.h"
#include "avio.h" extern "C" {
#include "libavformat/avio.h"
}
#ifndef AVSEEK_SIZE #ifndef AVSEEK_SIZE
#define AVSEEK_SIZE 0x10000 #define AVSEEK_SIZE 0x10000