This commit:

* Enables us to build against the latest version of FFMPEG.
* Fixes the ffmpeg code not to use deprecated stuff anymore.
* Adds support for libswscale. Undef HAVE_SWSCALE in Config.pp if you compiled ffmpeg without.
* Uses sws_scale instead of the now-removed img_convert.
* Fixes a couple of deadlocks in FFMpegTexture.
This commit is contained in:
rdb 2009-07-03 08:15:54 +00:00
parent 006ec7931e
commit c9aee0fa5b
7 changed files with 71 additions and 14 deletions

View File

@ -728,10 +728,12 @@
#defer HAVE_OPENCV $[libtest $[OPENCV_LPATH],$[OPENCV_LIBS]]
// Is FFMPEG installed, and where?
#define FFMPEG_IPATH /usr/include/ffmpeg
#define FFMPEG_IPATH /usr/include/ffmpeg /usr/include/libavcodec /usr/include/libavformat /usr/include/libavutil /usr/include/libswscale
#define FFMPEG_LPATH
#define FFMPEG_LIBS $[if $[WINDOWS_PLATFORM],libavcodec.lib libavformat.lib libavutil.lib libgcc.lib,avcodec avformat avutil]
#define FFMPEG_LIBS $[if $[WINDOWS_PLATFORM],libavcodec.lib libavformat.lib libavutil.lib libgcc.lib libswscale.lib,avcodec avformat avutil swscale]
#defer HAVE_FFMPEG $[libtest $[FFMPEG_LPATH],$[FFMPEG_LIBS]]
// Define this if you compiled ffmpeg with libswscale enabled.
#define HAVE_SWSCALE 1
// Is ODE installed, and where?
#define ODE_IPATH

View File

@ -161,7 +161,11 @@
#print - Did not find OpenCV
#endif
#if $[HAVE_FFMPEG]
#if $[HAVE_SWSCALE]
#print + FFMPEG, with libswscale
#else
#print + FFMPEG
#endif
#else
#print - Did not find FFMPEG
#endif
@ -341,6 +345,7 @@ $[cdefine HAVE_OPENCV]
/* Define if we have FFMPEG installed and want to build for FFMPEG. */
$[cdefine HAVE_FFMPEG]
$[cdefine HAVE_SWSCALE]
/* Define if we have ODE installed and want to build for ODE. */
$[cdefine HAVE_ODE]

View File

@ -0,0 +1,3 @@
#ifndef SWSCALE_SWSCALE_H
#define SWSCALE_SWSCALE_H
#endif

View File

@ -209,10 +209,10 @@ update_frame(int frame) {
for (int z = 0; z < max_z; ++z) {
VideoPage &page = _pages.at(z);
if (page._color.is_valid() || page._alpha.is_valid()) {
modify_ram_image();
do_modify_ram_image();
}
if (page._color.is_valid()) {
nassertv(get_num_components() >= 3 && get_component_width() == 1);
nassertv(_num_components >= 3 && _component_width == 1);
// A little different from the opencv implementation
// The frame is kept on the stream itself. This is partially
@ -221,11 +221,11 @@ update_frame(int frame) {
// that I don't convert, even if the IO formats are the same!)
if (page._color.get_frame_data(frame)) {
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
unsigned char *dest = _ram_images[0]._image.p() + get_expected_ram_page_size() * z;
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
int dest_row_width = (_x_size * _num_components * _component_width);
// Simplest case, where we deal with an rgb texture
if (get_num_components() == 3) {
if (_num_components == 3) {
int source_row_width=3*page._color._codec_context->width;
unsigned char * source=(unsigned char *)page._color._frame_out->data[0]
+source_row_width*(get_video_height()-1);
@ -256,7 +256,7 @@ update_frame(int frame) {
// The harder case--interleave the color in with the alpha,
// pixel by pixel.
nassertv(get_num_components() == 4);
nassertv(_num_components == 4);
for (int y = 0; y < get_video_height(); ++y) {
int dx = 0;
int sx = 0;
@ -277,14 +277,14 @@ update_frame(int frame) {
}
if (page._alpha.is_valid()) {
nassertv(get_num_components() == 4 && get_component_width() == 1);
nassertv(_num_components == 4 && _component_width == 1);
if (page._alpha.get_frame_data(frame)) {
nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size);
// Currently, we assume the alpha has been converted to an rgb format
// There is no reason it can't be a 256 color grayscale though.
unsigned char *dest = _ram_images[0]._image.p() + get_expected_ram_page_size() * z;
unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z;
int dest_row_width = (_x_size * _num_components * _component_width);
int source_row_width= page._alpha._codec_context->width * 3;
@ -348,7 +348,7 @@ do_read_one(const Filename &fullpath, const Filename &alpha_fullpath,
if (!has_name()) {
set_name(fullpath.get_basename_wo_extension());
}
if (!has_filename()) {
if (!_filename.empty()) {
_filename = fullpath;
_alpha_filename = alpha_fullpath;
}
@ -582,6 +582,25 @@ get_frame_data(int frame_number) {
// Did we get a video frame?
if (frame_finished) {
// Convert the image from its native format to RGB
#ifdef HAVE_SWSCALE
// Note from pro-rsoft: ffmpeg removed img_convert and told
// everyone to use sws_scale instead - that's why I wrote
// this code. I have no idea if it works well or not, but
// it seems to compile and run without crashing.
PixelFormat dst_format;
if (_codec_context->pix_fmt != PIX_FMT_RGBA32) {
dst_format = PIX_FMT_BGR24;
} else {
dst_format = PIX_FMT_RGBA32;
}
struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height,
_codec_context->pix_fmt, _codec_context->width, _codec_context->height,
dst_format, 2, NULL, NULL, NULL);
nassertr(convert_ctx != NULL, false);
sws_scale(convert_ctx, _frame->data, _frame->linesize,
0, _codec_context->height, _frame_out->data, _frame_out->linesize);
sws_freeContext(convert_ctx);
#else
if (_codec_context->pix_fmt != PIX_FMT_RGBA32) {
img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24,
(AVPicture *)_frame, _codec_context->pix_fmt,
@ -592,6 +611,7 @@ get_frame_data(int frame_number) {
(AVPicture *)_frame, _codec_context->pix_fmt,
_codec_context->width, _codec_context->height);
}
#endif
}
}
@ -749,7 +769,7 @@ clear() {
av_free(_frame_out);
_frame_out = NULL;
}
_next_frame_number = 0;
}

View File

@ -23,6 +23,9 @@
extern "C" {
#include "avcodec.h"
#include "avformat.h"
#ifdef HAVE_SWSCALE
#include "swscale.h"
#endif
}
////////////////////////////////////////////////////////////////////

View File

@ -19,6 +19,9 @@
extern "C" {
#include "avcodec.h"
#include "avformat.h"
#ifdef HAVE_SWSCALE
#include "swscale.h"
#endif
}
#include "pStatCollector.h"
#include "pStatTimer.h"
@ -154,6 +157,7 @@ cleanup() {
}
_video_ctx = 0;
_video_index = -1;
}
////////////////////////////////////////////////////////////////////
@ -169,13 +173,33 @@ export_frame(unsigned char *data, bool bgra, int bufx) {
if (bgra) {
_frame_out->data[0] = data + ((_size_y - 1) * bufx * 4);
_frame_out->linesize[0] = bufx * -4;
#ifdef HAVE_SWSCALE
struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y,
_video_ctx->pix_fmt, _size_x, _size_y,
PIX_FMT_BGRA, 2, NULL, NULL, NULL);
nassertv(convert_ctx != NULL);
sws_scale(convert_ctx, _frame->data, _frame->linesize,
0, _size_y, _frame_out->data, _frame_out->linesize);
sws_freeContext(convert_ctx);
#else
img_convert((AVPicture *)_frame_out, PIX_FMT_BGRA,
(AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y);
#endif
} else {
_frame_out->data[0] = data + ((_size_y - 1) * bufx * 3);
_frame_out->linesize[0] = bufx * -3;
#ifdef HAVE_SWSCALE
struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y,
_video_ctx->pix_fmt, _size_x, _size_y,
PIX_FMT_BGR24, 2, NULL, NULL, NULL);
nassertv(convert_ctx != NULL);
sws_scale(convert_ctx, _frame->data, _frame->linesize,
0, _size_y, _frame_out->data, _frame_out->linesize);
sws_freeContext(convert_ctx);
#else
img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24,
(AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y);
#endif
}
}

View File

@ -105,9 +105,9 @@ pandavfs_seek(URLContext *h, PN_int64 pos, int whence) {
case SEEK_END: s->seekg(pos, ios::end); break;
case AVSEEK_SIZE: {
s->seekg(0, ios::cur);
offset_t p = s->tellg();
int p = s->tellg();
s->seekg(-1, ios::end);
offset_t size = s->tellg();
int size = s->tellg();
if (size < 0) {
movies_cat.error() << "Failed to determine filesize in ffmpegVirtualFile\n";
s->clear();
@ -164,7 +164,7 @@ register_protocol() {
protocol.url_write = pandavfs_write;
protocol.url_seek = pandavfs_seek;
protocol.url_close = pandavfs_close;
::register_protocol(&protocol);
av_register_protocol(&protocol);
}
#endif // HAVE_FFMPEG