diff --git a/panda/src/grutil/ffmpegTexture.cxx b/panda/src/grutil/ffmpegTexture.cxx index e35afc4aff..5a4df8d265 100644 --- a/panda/src/grutil/ffmpegTexture.cxx +++ b/panda/src/grutil/ffmpegTexture.cxx @@ -1,821 +1,821 @@ -// Filename: ffmpegTexture.cxx -// Created by: zacpavlov (05May06) -// -//////////////////////////////////////////////////////////////////// -// -// PANDA 3D SOFTWARE -// Copyright (c) Carnegie Mellon University. All rights reserved. -// -// All use of this software is subject to the terms of the revised BSD -// license. You should have received a copy of this license along -// with this source code in a file named "LICENSE." -// -//////////////////////////////////////////////////////////////////// - -#include "pandabase.h" - -#ifdef HAVE_FFMPEG -#include "ffmpegTexture.h" -#include "clockObject.h" -#include "config_gobj.h" -#include "config_grutil.h" -#include "bamCacheRecord.h" -#include "bamReader.h" - -TypeHandle FFMpegTexture::_type_handle; - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::Constructor -// Access: Published -// Description: -//////////////////////////////////////////////////////////////////// -FFMpegTexture:: -FFMpegTexture(const string &name) : - VideoTexture(name) -{ -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::Copy Constructor -// Access: Protected -// Description: Use FFmpegTexture::make_copy() to make a duplicate copy of -// an existing FFMpegTexture. -//////////////////////////////////////////////////////////////////// -FFMpegTexture:: -FFMpegTexture(const FFMpegTexture ©) : - VideoTexture(copy), - _pages(copy._pages) -{ - nassertv(false); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::Destructor -// Access: Published, Virtual -// Description: I'm betting that texture takes care of the, so we'll just do a clear. -//////////////////////////////////////////////////////////////////// -FFMpegTexture:: -~FFMpegTexture() { - clear(); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::do_make_copy -// Access: Protected, Virtual -// Description: Returns a new copy of the same Texture. This copy, -// if applied to geometry, will be copied into texture -// as a separate texture from the original, so it will -// be duplicated in texture memory (and may be -// independently modified if desired). -// -// If the Texture is an FFMpegTexture, the resulting -// duplicate may be animated independently of the -// original. -//////////////////////////////////////////////////////////////////// -PT(Texture) FFMpegTexture:: -do_make_copy() { - PT(FFMpegTexture) tex = new FFMpegTexture(get_name()); - tex->do_assign(*this); - - return tex.p(); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::do_assign -// Access: Protected -// Description: Implements make_copy(). -//////////////////////////////////////////////////////////////////// -void FFMpegTexture:: -do_assign(const FFMpegTexture ©) { - VideoTexture::do_assign(copy); - _pages = copy._pages; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMPegTexture::modify_page -// Access: Private -// Description: Returns a reference to the zth VideoPage (level) of -// the texture. In the case of a 2-d texture, there is -// only one page, level 0; but cube maps and 3-d -// textures have more. -//////////////////////////////////////////////////////////////////// -FFMpegTexture::VideoPage &FFMpegTexture:: -modify_page(int z) { - nassertr(z < _z_size, _pages[0]); - while (z >= (int)_pages.size()) { - _pages.push_back(VideoPage()); - } - return _pages[z]; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFmpegTexture::do_reconsider_video_properties -// Access: Private -// Description: Resets the internal Texture properties when a new -// video file is loaded. Returns true if the new image -// is valid, false otherwise. -//////////////////////////////////////////////////////////////////// -bool FFMpegTexture:: -do_reconsider_video_properties(const FFMpegTexture::VideoStream &stream, - int num_components, int z, - const LoaderOptions &options) { - double frame_rate = 0.0f; - int num_frames = 0; - if (!stream._codec_context) { - // printf("not valid yet\n"); - return true; - } - - AVStream *vstream = stream._format_context->streams[stream._stream_number]; - - if (stream.is_from_file()) { - // frame rate comes from ffmpeg as an avRational. - frame_rate = vstream->r_frame_rate.num/(float)vstream->r_frame_rate.den; - - // Number of frames is a little questionable if we've got variable - // frame rate. Duration comes in as a generic timestamp, - // and is therefore multiplied by AV_TIME_BASE. - num_frames = (int)((stream._format_context->duration*frame_rate)/AV_TIME_BASE); - if (grutil_cat.is_debug()) { - grutil_cat.debug() - << "Loaded " << stream._filename << ", " << num_frames << " frames at " - << frame_rate << " fps\n"; - } - } - - int width = stream._codec_context->width; - int height = stream._codec_context->height; - - int x_size = width; - int y_size = height; - - if (Texture::get_textures_power_2() != ATS_none) { - x_size = up_to_power_2(width); - y_size = up_to_power_2(height); - } - - if (grutil_cat.is_debug()) { - grutil_cat.debug() - << "Video stream is " << width << " by " << height - << " pixels; fitting in texture " << x_size << " by " - << y_size << " texels.\n"; - } - - if (!do_reconsider_image_properties(x_size, y_size, num_components, - T_unsigned_byte, z, options)) { - return false; - } - - if (_loaded_from_image && - (get_video_width() != width || get_video_height() != height || - get_num_frames() != num_frames || get_frame_rate() != frame_rate)) { - grutil_cat.error() - << "Video properties have changed for texture " << get_name() - << " level " << z << ".\n"; - return false; - } - - set_frame_rate(frame_rate); - set_num_frames(num_frames); - set_video_size(width, height); - - // By default, the newly-loaded video stream will immediately start - // looping. - loop(true); - - return true; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::make_texture -// Access: Public, Static -// Description: A factory function to make a new FFMpegTexture, used -// to pass to the TexturePool. -//////////////////////////////////////////////////////////////////// -PT(Texture) FFMpegTexture:: -make_texture() { - return new FFMpegTexture; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMPegTexture::update_frame -// Access: Protected, Virtual -// Description: Called once per frame, as needed, to load the new -// image contents. -//////////////////////////////////////////////////////////////////// -void FFMpegTexture:: -update_frame(int frame) { - int max_z = min(_z_size, (int)_pages.size()); - for (int z = 0; z < max_z; ++z) { - VideoPage &page = _pages.at(z); - if (page._color.is_valid() || page._alpha.is_valid()) { - do_modify_ram_image(); - } - if (page._color.is_valid()) { - nassertv(_num_components >= 3 && _component_width == 1); - - // A little different from the opencv implementation - // The frame is kept on the stream itself. This is partially - // because there is a conversion step that must be done for - // every video (I've gotten very odd results with any video - // that I don't convert, even if the IO formats are the same!) - if (page._color.get_frame_data(frame)) { - nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); - unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; - int dest_row_width = (_x_size * _num_components * _component_width); - - // Simplest case, where we deal with an rgb texture - if (_num_components == 3) { - int source_row_width=3*page._color._codec_context->width; - unsigned char * source=(unsigned char *)page._color._frame_out->data[0] - +source_row_width*(get_video_height()-1); - - // row by row copy. - for (int y = 0; y < get_video_height(); ++y) { - memcpy(dest, source, source_row_width); - dest += dest_row_width; - source -= source_row_width; - } - // Next best option, we're a 4 component alpha video on one stream - } else if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { - int source_row_width= page._color._codec_context->width * 4; - unsigned char * source=(unsigned char *)page._color._frame_out->data[0] - +source_row_width*(get_video_height()-1); - - // row by row copy. - for (int y = 0; y < get_video_height(); ++y) { - memcpy(dest,source,source_row_width); - dest += dest_row_width; - source -= source_row_width; - } - // Otherwise, we've got to be tricky - } else { - int source_row_width= page._color._codec_context->width * 3; - unsigned char * source=(unsigned char *)page._color._frame_out->data[0] - +source_row_width*(get_video_height()-1); - - // The harder case--interleave the color in with the alpha, - // pixel by pixel. - nassertv(_num_components == 4); - for (int y = 0; y < get_video_height(); ++y) { - int dx = 0; - int sx = 0; - for (int x = 0; x < get_video_width(); ++x) { - dest[dx] = source[sx]; - dest[dx + 1] = source[sx + 1]; - dest[dx + 2] = source[sx + 2]; - dx += 4; - sx += 3; - } - dest += dest_row_width; - source -= source_row_width; - } - } - - - } - } - - if (page._alpha.is_valid()) { - nassertv(_num_components == 4 && _component_width == 1); - - if (page._alpha.get_frame_data(frame)) { - nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); - - // Currently, we assume the alpha has been converted to an rgb format - // There is no reason it can't be a 256 color grayscale though. - unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; - int dest_row_width = (_x_size * _num_components * _component_width); - - int source_row_width= page._alpha._codec_context->width * 3; - unsigned char * source=(unsigned char *)page._alpha._frame_out->data[0] - +source_row_width*(get_video_height()-1); - for (int y = 0; y < get_video_height(); ++y) { - int dx = 3; - int sx = 0; - for (int x = 0; x < get_video_width(); ++x) { - dest[dx] = source[sx]; - dx += 4; - sx += 3; - } - dest += dest_row_width; - source -= source_row_width; - } - - } - } - } -} - - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::do_read_one -// Access: Protected, Virtual -// Description: Combines a color and alpha video image from the two -// indicated filenames. Both must be the same kind of -// video with similar properties. -//////////////////////////////////////////////////////////////////// -bool FFMpegTexture:: -do_read_one(const Filename &fullpath, const Filename &alpha_fullpath, - int z, int n, int primary_file_num_channels, int alpha_file_channel, - const LoaderOptions &options, - bool header_only, BamCacheRecord *record) { - if (record != (BamCacheRecord *)NULL) { - record->add_dependent_file(fullpath); - } - - nassertr(n == 0, false); - nassertr(z >= 0 && z < get_z_size(), false); - - VideoPage &page = modify_page(z); - if (!page._color.read(fullpath)) { - grutil_cat.error() - << "FFMpeg couldn't read " << fullpath << " as video.\n"; - return false; - } - - if (!alpha_fullpath.empty()) { - if (!page._alpha.read(alpha_fullpath)) { - grutil_cat.error() - << "FFMPEG couldn't read " << alpha_fullpath << " as video.\n"; - page._color.clear(); - return false; - } - } - - - if (z == 0) { - if (!has_name()) { - set_name(fullpath.get_basename_wo_extension()); - } - if (!_filename.empty()) { - _filename = fullpath; - _alpha_filename = alpha_fullpath; - } - - _fullpath = fullpath; - _alpha_fullpath = alpha_fullpath; - } - if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { - // There had better not be an alpha interleave here. - nassertr(alpha_fullpath.empty(), false); - - _primary_file_num_channels = 4; - _alpha_file_channel = 0; - if (!do_reconsider_video_properties(page._color, 4, z, options)) { - page._color.clear(); - return false; - } - - } else { - _primary_file_num_channels = 3; - _alpha_file_channel = alpha_file_channel; - - if (page._alpha.is_valid()) { - if (!do_reconsider_video_properties(page._color, 4, z, options)) { - page._color.clear(); - page._alpha.clear(); - return false; - } - if (!do_reconsider_video_properties(page._alpha, 4, z, options)) { - page._color.clear(); - page._alpha.clear(); - return false; - } - } else { - if (!do_reconsider_video_properties(page._color, 3, z, options)) { - page._color.clear(); - page._alpha.clear(); - return false; - } - - } - - } - set_loaded_from_image(); - clear_current_frame(); - update_frame(0); - return true; -} - - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::do_load_one -// Access: Protected, Virtual -// Description: Resets the texture (or the particular level of the -// texture) to the indicated static image. -//////////////////////////////////////////////////////////////////// -bool FFMpegTexture:: -do_load_one(const PNMImage &pnmimage, const string &name, int z, int n, - const LoaderOptions &options) { - if (z <= (int)_pages.size()) { - VideoPage &page = modify_page(z); - page._color.clear(); - } - - return Texture::do_load_one(pnmimage, name, z, n, options); -} - - - - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::register_with_read_factory -// Access: Public, Static -// Description: Factory method to generate a Texture object -//////////////////////////////////////////////////////////////////// -void FFMpegTexture:: -register_with_read_factory() { - // Since Texture is such a funny object that is reloaded from the - // TexturePool each time, instead of actually being read fully from - // the bam file, and since the VideoTexture and FFMpegTexture - // classes don't really add any useful data to the bam record, we - // don't need to define make_from_bam(), fillin(), or - // write_datagram() in this class--we just inherit the same - // functions from Texture. - - // We do, however, have to register this class with the BamReader, - // to avoid warnings about creating the wrong kind of object from - // the bam file. - BamReader::get_factory()->register_factory(get_class_type(), make_from_bam); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::Constructor -// Access: Public -// Description: -//////////////////////////////////////////////////////////////////// -FFMpegTexture::VideoStream:: -VideoStream() : - _codec_context(NULL), - _format_context(NULL), - _frame(NULL), - _frame_out(NULL), - _next_frame_number(0) -{ - // printf("creating video stream\n"); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::Copy Constructor -// Access: Public -// Description: -//////////////////////////////////////////////////////////////////// -FFMpegTexture::VideoStream:: -VideoStream(const FFMpegTexture::VideoStream ©) : - _codec_context(NULL), - _format_context(NULL), - _frame(NULL), - _frame_out(NULL), - _next_frame_number(0) -{ - // Rather than copying the _capture pointer, we must open a new - // stream that references the same file. - if (copy.is_valid()) { - if (copy.is_from_file()) { - read(copy._filename); - } - } -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::Copy Constructor -// Access: Public -// Description: -//////////////////////////////////////////////////////////////////// -FFMpegTexture::VideoStream:: -~VideoStream() { - clear(); -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::get_frame_data -// Access: Public -// Description: Returns the pointer to the beginning of the -// decompressed buffer for the indicated frame number. -// It is most efficient to call this in increasing order -// of frame number. -//////////////////////////////////////////////////////////////////// -bool FFMpegTexture::VideoStream:: -get_frame_data(int frame_number) { - nassertr(is_valid(), false); - int coming_from = _next_frame_number; - - _next_frame_number = frame_number + 1; - AVPacket packet; - AVStream *vstream = _format_context->streams[_stream_number]; - - int got_frame; - - // Can we get to our target frame just by skipping forward a few - // frames? We arbitrarily draw the line at 50 frames for now. - if (frame_number >= coming_from && frame_number - coming_from < 50) { - - if (frame_number > coming_from) { - // Ok, we do have to skip a few frames. - _codec_context->hurry_up = true; - while (frame_number > coming_from) { - int err = read_video_frame(&packet); - if (err < 0) { - return false; - - } -#if LIBAVCODEC_VERSION_INT < 3414272 - avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); -#else - avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); -#endif - av_free_packet(&packet); - ++coming_from; - } - _codec_context->hurry_up = false; - } - - // Now we're ready to read a frame. - int err = read_video_frame(&packet); - if (err < 0) { - return false; - } - - } else { - // We have to skip backward, or maybe forward a whole bunch of - // frames. Better off seeking through the stream. - - double time_stamp = ((double)AV_TIME_BASE * frame_number * vstream->r_frame_rate.den) / vstream->r_frame_rate.num; - double curr_time_stamp; - - // find point in time - av_seek_frame(_format_context, -1, (long long)time_stamp, - AVSEEK_FLAG_BACKWARD); - - // Okay, now we're at the nearest keyframe behind our timestamp. - // Hurry up and move through frames until we find a frame just after it. - _codec_context->hurry_up = true; - do { - int err = read_video_frame(&packet); - if (err < 0) { - return false; - } - - curr_time_stamp = (((double)AV_TIME_BASE * packet.pts) / - ((double)packet.duration * av_q2d(vstream->r_frame_rate))); - if (curr_time_stamp > time_stamp) { - break; - } - -#if LIBAVCODEC_VERSION_INT < 3414272 - avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); -#else - avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); -#endif - - av_free_packet(&packet); - } while (true); - - _codec_context->hurry_up = false; - // Now near frame with Packet ready for decode (and free) - } - - // Now we have a packet from someone. Lets get this in a frame - - int frame_finished; - - // Is this a packet from the video stream? - if (packet.stream_index == _stream_number) { - // Decode video frame -#if LIBAVCODEC_VERSION_INT < 3414272 - avcodec_decode_video(_codec_context, _frame, &frame_finished, packet.data, packet.size); -#else - avcodec_decode_video2(_codec_context, _frame, &frame_finished, &packet); -#endif - - // Did we get a video frame? - if (frame_finished) { - // Convert the image from its native format to RGB -#ifdef HAVE_SWSCALE - // Note from pro-rsoft: ffmpeg removed img_convert and told - // everyone to use sws_scale instead - that's why I wrote - // this code. I have no idea if it works well or not, but - // it seems to compile and run without crashing. - PixelFormat dst_format; - if (_codec_context->pix_fmt != PIX_FMT_RGB32) { - dst_format = PIX_FMT_BGR24; - } else { - dst_format = PIX_FMT_RGB32; - } - struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height, - _codec_context->pix_fmt, _codec_context->width, _codec_context->height, - dst_format, 2, NULL, NULL, NULL); - nassertr(convert_ctx != NULL, false); - sws_scale(convert_ctx, _frame->data, _frame->linesize, - 0, _codec_context->height, _frame_out->data, _frame_out->linesize); - sws_freeContext(convert_ctx); -#else - if (_codec_context->pix_fmt != PIX_FMT_RGB32) { - img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, - (AVPicture *)_frame, _codec_context->pix_fmt, - _codec_context->width, _codec_context->height); - - } else { // _codec_context->pix_fmt == PIX_FMT_RGB32 - img_convert((AVPicture *)_frame_out, PIX_FMT_RGB32, - (AVPicture *)_frame, _codec_context->pix_fmt, - _codec_context->width, _codec_context->height); - } -#endif - } - } - - // Free the packet that was allocated by av_read_frame - av_free_packet(&packet); - - return true; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::read -// Access: Public -// Description: Sets up the stream to read the indicated file. -// Returns true on success, false on failure. -//////////////////////////////////////////////////////////////////// -bool FFMpegTexture::VideoStream:: -read(const Filename &filename) { - // Clear out the last stream - clear(); - - string os_specific = filename.to_os_specific(); - // Open video file - int result = av_open_input_file(&_format_context, os_specific.c_str(), NULL, - 0, NULL); - if (result != 0) { - grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; - // Don't call clear(), because nothing happened yet - return false; - } - - // Retrieve stream information - result = av_find_stream_info(_format_context); - if (result < 0) { - grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; - clear(); - return false; - } - dump_format(_format_context, 0, os_specific.c_str(), false); - - _stream_number = -1; - for(int i = 0; i < _format_context->nb_streams; i++) { - if ((*_format_context->streams[i]->codec).codec_type == CODEC_TYPE_VIDEO) { - _stream_number = i; - break; - } - } - - if (_stream_number == -1) { - grutil_cat.error() - << "ffmpeg: no stream found with codec of type CODEC_TYPE_VIDEO" << endl; - clear(); - return false; - } - - // Get a pointer to the codec context for the video stream - AVCodecContext *codec_context = _format_context->streams[_stream_number]->codec; - - if (grutil_cat.is_debug()) { - grutil_cat.debug() - << "ffmpeg: codec id is " << codec_context->codec_id << endl; - } - - // Find the decoder for the video stream - _codec = avcodec_find_decoder(codec_context->codec_id); - if (_codec == NULL) { - grutil_cat.error() << "ffmpeg: no appropriate decoder found" << endl; - clear(); - return false; - } - - if (_codec->capabilities & CODEC_CAP_TRUNCATED) { - codec_context->flags |= CODEC_FLAG_TRUNCATED; - } - - // Open codec - _codec_context = codec_context; - result = avcodec_open(_codec_context, _codec); - if (result < 0) { - grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; - _codec_context = NULL; - clear(); - return false; - } - - _frame = avcodec_alloc_frame(); - - if (_codec_context->pix_fmt != PIX_FMT_RGB32) { - _frame_out = avcodec_alloc_frame(); - if (_frame_out == NULL) { - grutil_cat.error() - << "ffmpeg: unable to allocate AVPFrame (BGR24)" << endl; - clear(); - return false; - } - - // Determine required buffer size and allocate buffer - _image_size_bytes = avpicture_get_size(PIX_FMT_BGR24, _codec_context->width, - _codec_context->height); - - _raw_data = new uint8_t[_image_size_bytes]; - - // Assign appropriate parts of buffer to image planes in _frameRGB - avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_BGR24, - _codec_context->width, _codec_context->height); - - } else { - _frame_out = avcodec_alloc_frame(); - if (_frame_out == NULL) { - grutil_cat.error() - << "ffmpeg: unable to allocate AVPFrame (RGBA32)" << endl; - clear(); - return false; - } - - // Determine required buffer size and allocate buffer - _image_size_bytes = avpicture_get_size(PIX_FMT_RGB32, _codec_context->width, - _codec_context->height); - - _raw_data = new uint8_t[_image_size_bytes]; - // Assign appropriate parts of buffer to image planes in _frameRGB - avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_RGB32, - _codec_context->width, _codec_context->height); - } - // We could put an option here for single channel frames. - - _next_frame_number = 0; - _filename = filename; - - return true; -} - - - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::clear -// Access: Public -// Description: Stops the video playback and frees the associated -// resources. -//////////////////////////////////////////////////////////////////// -void FFMpegTexture::VideoStream:: -clear() { - if (_codec_context) { - avcodec_close(_codec_context); - _codec_context = NULL; - } - if (_format_context) { - av_close_input_file(_format_context); - _format_context = NULL; - } - if (_frame) { - av_free(_frame); - _frame = NULL; - } - if (_frame_out) { - av_free(_frame_out); - _frame_out = NULL; - } - - _next_frame_number = 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FFMpegTexture::VideoStream::read_video_frame -// Access: Private -// Description: Fills packet with the next sequential video frame in -// the stream, skipping over all non-video frames. -// packet must later be deallocated with -// av_free_packet(). -// -// Returns nonnegative on success, or negative on error. -//////////////////////////////////////////////////////////////////// -int FFMpegTexture::VideoStream:: -read_video_frame(AVPacket *packet) { - int err = av_read_frame(_format_context, packet); - if (err < 0) { - return err; - } - - while (packet->stream_index != _stream_number) { - // It's not a video packet; free it and get another. - av_free_packet(packet); - - err = av_read_frame(_format_context, packet); - if (err < 0) { - grutil_cat.debug() - << "Got error " << err << " reading frame.\n"; - return err; - } - } - - // This is a video packet, return it. - return err; -} - - -#endif // HAVE_FFMpeg - +// Filename: ffmpegTexture.cxx +// Created by: zacpavlov (05May06) +// +//////////////////////////////////////////////////////////////////// +// +// PANDA 3D SOFTWARE +// Copyright (c) Carnegie Mellon University. All rights reserved. +// +// All use of this software is subject to the terms of the revised BSD +// license. You should have received a copy of this license along +// with this source code in a file named "LICENSE." +// +//////////////////////////////////////////////////////////////////// + +#include "pandabase.h" + +#ifdef HAVE_FFMPEG +#include "ffmpegTexture.h" +#include "clockObject.h" +#include "config_gobj.h" +#include "config_grutil.h" +#include "bamCacheRecord.h" +#include "bamReader.h" + +TypeHandle FFMpegTexture::_type_handle; + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::Constructor +// Access: Published +// Description: +//////////////////////////////////////////////////////////////////// +FFMpegTexture:: +FFMpegTexture(const string &name) : + VideoTexture(name) +{ +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::Copy Constructor +// Access: Protected +// Description: Use FFmpegTexture::make_copy() to make a duplicate copy of +// an existing FFMpegTexture. +//////////////////////////////////////////////////////////////////// +FFMpegTexture:: +FFMpegTexture(const FFMpegTexture ©) : + VideoTexture(copy), + _pages(copy._pages) +{ + nassertv(false); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::Destructor +// Access: Published, Virtual +// Description: I'm betting that texture takes care of the, so we'll just do a clear. +//////////////////////////////////////////////////////////////////// +FFMpegTexture:: +~FFMpegTexture() { + clear(); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::do_make_copy +// Access: Protected, Virtual +// Description: Returns a new copy of the same Texture. This copy, +// if applied to geometry, will be copied into texture +// as a separate texture from the original, so it will +// be duplicated in texture memory (and may be +// independently modified if desired). +// +// If the Texture is an FFMpegTexture, the resulting +// duplicate may be animated independently of the +// original. +//////////////////////////////////////////////////////////////////// +PT(Texture) FFMpegTexture:: +do_make_copy() { + PT(FFMpegTexture) tex = new FFMpegTexture(get_name()); + tex->do_assign(*this); + + return tex.p(); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::do_assign +// Access: Protected +// Description: Implements make_copy(). +//////////////////////////////////////////////////////////////////// +void FFMpegTexture:: +do_assign(const FFMpegTexture ©) { + VideoTexture::do_assign(copy); + _pages = copy._pages; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMPegTexture::modify_page +// Access: Private +// Description: Returns a reference to the zth VideoPage (level) of +// the texture. In the case of a 2-d texture, there is +// only one page, level 0; but cube maps and 3-d +// textures have more. +//////////////////////////////////////////////////////////////////// +FFMpegTexture::VideoPage &FFMpegTexture:: +modify_page(int z) { + nassertr(z < _z_size, _pages[0]); + while (z >= (int)_pages.size()) { + _pages.push_back(VideoPage()); + } + return _pages[z]; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFmpegTexture::do_reconsider_video_properties +// Access: Private +// Description: Resets the internal Texture properties when a new +// video file is loaded. Returns true if the new image +// is valid, false otherwise. +//////////////////////////////////////////////////////////////////// +bool FFMpegTexture:: +do_reconsider_video_properties(const FFMpegTexture::VideoStream &stream, + int num_components, int z, + const LoaderOptions &options) { + double frame_rate = 0.0f; + int num_frames = 0; + if (!stream._codec_context) { + // printf("not valid yet\n"); + return true; + } + + AVStream *vstream = stream._format_context->streams[stream._stream_number]; + + if (stream.is_from_file()) { + // frame rate comes from ffmpeg as an avRational. + frame_rate = vstream->r_frame_rate.num/(float)vstream->r_frame_rate.den; + + // Number of frames is a little questionable if we've got variable + // frame rate. Duration comes in as a generic timestamp, + // and is therefore multiplied by AV_TIME_BASE. + num_frames = (int)((stream._format_context->duration*frame_rate)/AV_TIME_BASE); + if (grutil_cat.is_debug()) { + grutil_cat.debug() + << "Loaded " << stream._filename << ", " << num_frames << " frames at " + << frame_rate << " fps\n"; + } + } + + int width = stream._codec_context->width; + int height = stream._codec_context->height; + + int x_size = width; + int y_size = height; + + if (Texture::get_textures_power_2() != ATS_none) { + x_size = up_to_power_2(width); + y_size = up_to_power_2(height); + } + + if (grutil_cat.is_debug()) { + grutil_cat.debug() + << "Video stream is " << width << " by " << height + << " pixels; fitting in texture " << x_size << " by " + << y_size << " texels.\n"; + } + + if (!do_reconsider_image_properties(x_size, y_size, num_components, + T_unsigned_byte, z, options)) { + return false; + } + + if (_loaded_from_image && + (get_video_width() != width || get_video_height() != height || + get_num_frames() != num_frames || get_frame_rate() != frame_rate)) { + grutil_cat.error() + << "Video properties have changed for texture " << get_name() + << " level " << z << ".\n"; + return false; + } + + set_frame_rate(frame_rate); + set_num_frames(num_frames); + set_video_size(width, height); + + // By default, the newly-loaded video stream will immediately start + // looping. + loop(true); + + return true; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::make_texture +// Access: Public, Static +// Description: A factory function to make a new FFMpegTexture, used +// to pass to the TexturePool. +//////////////////////////////////////////////////////////////////// +PT(Texture) FFMpegTexture:: +make_texture() { + return new FFMpegTexture; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMPegTexture::update_frame +// Access: Protected, Virtual +// Description: Called once per frame, as needed, to load the new +// image contents. +//////////////////////////////////////////////////////////////////// +void FFMpegTexture:: +update_frame(int frame) { + int max_z = min(_z_size, (int)_pages.size()); + for (int z = 0; z < max_z; ++z) { + VideoPage &page = _pages.at(z); + if (page._color.is_valid() || page._alpha.is_valid()) { + do_modify_ram_image(); + } + if (page._color.is_valid()) { + nassertv(_num_components >= 3 && _component_width == 1); + + // A little different from the opencv implementation + // The frame is kept on the stream itself. This is partially + // because there is a conversion step that must be done for + // every video (I've gotten very odd results with any video + // that I don't convert, even if the IO formats are the same!) + if (page._color.get_frame_data(frame)) { + nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); + unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; + int dest_row_width = (_x_size * _num_components * _component_width); + + // Simplest case, where we deal with an rgb texture + if (_num_components == 3) { + int source_row_width=3*page._color._codec_context->width; + unsigned char * source=(unsigned char *)page._color._frame_out->data[0] + +source_row_width*(get_video_height()-1); + + // row by row copy. + for (int y = 0; y < get_video_height(); ++y) { + memcpy(dest, source, source_row_width); + dest += dest_row_width; + source -= source_row_width; + } + // Next best option, we're a 4 component alpha video on one stream + } else if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { + int source_row_width= page._color._codec_context->width * 4; + unsigned char * source=(unsigned char *)page._color._frame_out->data[0] + +source_row_width*(get_video_height()-1); + + // row by row copy. + for (int y = 0; y < get_video_height(); ++y) { + memcpy(dest,source,source_row_width); + dest += dest_row_width; + source -= source_row_width; + } + // Otherwise, we've got to be tricky + } else { + int source_row_width= page._color._codec_context->width * 3; + unsigned char * source=(unsigned char *)page._color._frame_out->data[0] + +source_row_width*(get_video_height()-1); + + // The harder case--interleave the color in with the alpha, + // pixel by pixel. + nassertv(_num_components == 4); + for (int y = 0; y < get_video_height(); ++y) { + int dx = 0; + int sx = 0; + for (int x = 0; x < get_video_width(); ++x) { + dest[dx] = source[sx]; + dest[dx + 1] = source[sx + 1]; + dest[dx + 2] = source[sx + 2]; + dx += 4; + sx += 3; + } + dest += dest_row_width; + source -= source_row_width; + } + } + + + } + } + + if (page._alpha.is_valid()) { + nassertv(_num_components == 4 && _component_width == 1); + + if (page._alpha.get_frame_data(frame)) { + nassertv(get_video_width() <= _x_size && get_video_height() <= _y_size); + + // Currently, we assume the alpha has been converted to an rgb format + // There is no reason it can't be a 256 color grayscale though. + unsigned char *dest = _ram_images[0]._image.p() + do_get_expected_ram_page_size() * z; + int dest_row_width = (_x_size * _num_components * _component_width); + + int source_row_width= page._alpha._codec_context->width * 3; + unsigned char * source=(unsigned char *)page._alpha._frame_out->data[0] + +source_row_width*(get_video_height()-1); + for (int y = 0; y < get_video_height(); ++y) { + int dx = 3; + int sx = 0; + for (int x = 0; x < get_video_width(); ++x) { + dest[dx] = source[sx]; + dx += 4; + sx += 3; + } + dest += dest_row_width; + source -= source_row_width; + } + + } + } + } +} + + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::do_read_one +// Access: Protected, Virtual +// Description: Combines a color and alpha video image from the two +// indicated filenames. Both must be the same kind of +// video with similar properties. +//////////////////////////////////////////////////////////////////// +bool FFMpegTexture:: +do_read_one(const Filename &fullpath, const Filename &alpha_fullpath, + int z, int n, int primary_file_num_channels, int alpha_file_channel, + const LoaderOptions &options, + bool header_only, BamCacheRecord *record) { + if (record != (BamCacheRecord *)NULL) { + record->add_dependent_file(fullpath); + } + + nassertr(n == 0, false); + nassertr(z >= 0 && z < get_z_size(), false); + + VideoPage &page = modify_page(z); + if (!page._color.read(fullpath)) { + grutil_cat.error() + << "FFMpeg couldn't read " << fullpath << " as video.\n"; + return false; + } + + if (!alpha_fullpath.empty()) { + if (!page._alpha.read(alpha_fullpath)) { + grutil_cat.error() + << "FFMPEG couldn't read " << alpha_fullpath << " as video.\n"; + page._color.clear(); + return false; + } + } + + + if (z == 0) { + if (!has_name()) { + set_name(fullpath.get_basename_wo_extension()); + } + if (!_filename.empty()) { + _filename = fullpath; + _alpha_filename = alpha_fullpath; + } + + _fullpath = fullpath; + _alpha_fullpath = alpha_fullpath; + } + if (page._color._codec_context->pix_fmt==PIX_FMT_RGB32) { + // There had better not be an alpha interleave here. + nassertr(alpha_fullpath.empty(), false); + + _primary_file_num_channels = 4; + _alpha_file_channel = 0; + if (!do_reconsider_video_properties(page._color, 4, z, options)) { + page._color.clear(); + return false; + } + + } else { + _primary_file_num_channels = 3; + _alpha_file_channel = alpha_file_channel; + + if (page._alpha.is_valid()) { + if (!do_reconsider_video_properties(page._color, 4, z, options)) { + page._color.clear(); + page._alpha.clear(); + return false; + } + if (!do_reconsider_video_properties(page._alpha, 4, z, options)) { + page._color.clear(); + page._alpha.clear(); + return false; + } + } else { + if (!do_reconsider_video_properties(page._color, 3, z, options)) { + page._color.clear(); + page._alpha.clear(); + return false; + } + + } + + } + set_loaded_from_image(); + clear_current_frame(); + update_frame(0); + return true; +} + + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::do_load_one +// Access: Protected, Virtual +// Description: Resets the texture (or the particular level of the +// texture) to the indicated static image. +//////////////////////////////////////////////////////////////////// +bool FFMpegTexture:: +do_load_one(const PNMImage &pnmimage, const string &name, int z, int n, + const LoaderOptions &options) { + if (z <= (int)_pages.size()) { + VideoPage &page = modify_page(z); + page._color.clear(); + } + + return Texture::do_load_one(pnmimage, name, z, n, options); +} + + + + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::register_with_read_factory +// Access: Public, Static +// Description: Factory method to generate a Texture object +//////////////////////////////////////////////////////////////////// +void FFMpegTexture:: +register_with_read_factory() { + // Since Texture is such a funny object that is reloaded from the + // TexturePool each time, instead of actually being read fully from + // the bam file, and since the VideoTexture and FFMpegTexture + // classes don't really add any useful data to the bam record, we + // don't need to define make_from_bam(), fillin(), or + // write_datagram() in this class--we just inherit the same + // functions from Texture. + + // We do, however, have to register this class with the BamReader, + // to avoid warnings about creating the wrong kind of object from + // the bam file. + BamReader::get_factory()->register_factory(get_class_type(), make_from_bam); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::Constructor +// Access: Public +// Description: +//////////////////////////////////////////////////////////////////// +FFMpegTexture::VideoStream:: +VideoStream() : + _codec_context(NULL), + _format_context(NULL), + _frame(NULL), + _frame_out(NULL), + _next_frame_number(0) +{ + // printf("creating video stream\n"); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::Copy Constructor +// Access: Public +// Description: +//////////////////////////////////////////////////////////////////// +FFMpegTexture::VideoStream:: +VideoStream(const FFMpegTexture::VideoStream ©) : + _codec_context(NULL), + _format_context(NULL), + _frame(NULL), + _frame_out(NULL), + _next_frame_number(0) +{ + // Rather than copying the _capture pointer, we must open a new + // stream that references the same file. + if (copy.is_valid()) { + if (copy.is_from_file()) { + read(copy._filename); + } + } +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::Copy Constructor +// Access: Public +// Description: +//////////////////////////////////////////////////////////////////// +FFMpegTexture::VideoStream:: +~VideoStream() { + clear(); +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::get_frame_data +// Access: Public +// Description: Returns the pointer to the beginning of the +// decompressed buffer for the indicated frame number. +// It is most efficient to call this in increasing order +// of frame number. +//////////////////////////////////////////////////////////////////// +bool FFMpegTexture::VideoStream:: +get_frame_data(int frame_number) { + nassertr(is_valid(), false); + int coming_from = _next_frame_number; + + _next_frame_number = frame_number + 1; + AVPacket packet; + AVStream *vstream = _format_context->streams[_stream_number]; + + int got_frame; + + // Can we get to our target frame just by skipping forward a few + // frames? We arbitrarily draw the line at 50 frames for now. + if (frame_number >= coming_from && frame_number - coming_from < 50) { + + if (frame_number > coming_from) { + // Ok, we do have to skip a few frames. + _codec_context->hurry_up = true; + while (frame_number > coming_from) { + int err = read_video_frame(&packet); + if (err < 0) { + return false; + + } +#if LIBAVCODEC_VERSION_INT < 3414272 + avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); +#else + avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); +#endif + av_free_packet(&packet); + ++coming_from; + } + _codec_context->hurry_up = false; + } + + // Now we're ready to read a frame. + int err = read_video_frame(&packet); + if (err < 0) { + return false; + } + + } else { + // We have to skip backward, or maybe forward a whole bunch of + // frames. Better off seeking through the stream. + + double time_stamp = ((double)AV_TIME_BASE * frame_number * vstream->r_frame_rate.den) / vstream->r_frame_rate.num; + double curr_time_stamp; + + // find point in time + av_seek_frame(_format_context, -1, (long long)time_stamp, + AVSEEK_FLAG_BACKWARD); + + // Okay, now we're at the nearest keyframe behind our timestamp. + // Hurry up and move through frames until we find a frame just after it. + _codec_context->hurry_up = true; + do { + int err = read_video_frame(&packet); + if (err < 0) { + return false; + } + + curr_time_stamp = (((double)AV_TIME_BASE * packet.pts) / + ((double)packet.duration * av_q2d(vstream->r_frame_rate))); + if (curr_time_stamp > time_stamp) { + break; + } + +#if LIBAVCODEC_VERSION_INT < 3414272 + avcodec_decode_video(_codec_context, _frame, &got_frame, packet.data, packet.size); +#else + avcodec_decode_video2(_codec_context, _frame, &got_frame, &packet); +#endif + + av_free_packet(&packet); + } while (true); + + _codec_context->hurry_up = false; + // Now near frame with Packet ready for decode (and free) + } + + // Now we have a packet from someone. Lets get this in a frame + + int frame_finished; + + // Is this a packet from the video stream? + if (packet.stream_index == _stream_number) { + // Decode video frame +#if LIBAVCODEC_VERSION_INT < 3414272 + avcodec_decode_video(_codec_context, _frame, &frame_finished, packet.data, packet.size); +#else + avcodec_decode_video2(_codec_context, _frame, &frame_finished, &packet); +#endif + + // Did we get a video frame? + if (frame_finished) { + // Convert the image from its native format to RGB +#ifdef HAVE_SWSCALE + // Note from pro-rsoft: ffmpeg removed img_convert and told + // everyone to use sws_scale instead - that's why I wrote + // this code. I have no idea if it works well or not, but + // it seems to compile and run without crashing. + PixelFormat dst_format; + if (_codec_context->pix_fmt != PIX_FMT_RGB32) { + dst_format = PIX_FMT_BGR24; + } else { + dst_format = PIX_FMT_RGB32; + } + struct SwsContext *convert_ctx = sws_getContext(_codec_context->width, _codec_context->height, + _codec_context->pix_fmt, _codec_context->width, _codec_context->height, + dst_format, 2, NULL, NULL, NULL); + nassertr(convert_ctx != NULL, false); + sws_scale(convert_ctx, _frame->data, _frame->linesize, + 0, _codec_context->height, _frame_out->data, _frame_out->linesize); + sws_freeContext(convert_ctx); +#else + if (_codec_context->pix_fmt != PIX_FMT_RGB32) { + img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, + (AVPicture *)_frame, _codec_context->pix_fmt, + _codec_context->width, _codec_context->height); + + } else { // _codec_context->pix_fmt == PIX_FMT_RGB32 + img_convert((AVPicture *)_frame_out, PIX_FMT_RGB32, + (AVPicture *)_frame, _codec_context->pix_fmt, + _codec_context->width, _codec_context->height); + } +#endif + } + } + + // Free the packet that was allocated by av_read_frame + av_free_packet(&packet); + + return true; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::read +// Access: Public +// Description: Sets up the stream to read the indicated file. +// Returns true on success, false on failure. +//////////////////////////////////////////////////////////////////// +bool FFMpegTexture::VideoStream:: +read(const Filename &filename) { + // Clear out the last stream + clear(); + + string os_specific = filename.to_os_specific(); + // Open video file + int result = av_open_input_file(&_format_context, os_specific.c_str(), NULL, + 0, NULL); + if (result != 0) { + grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; + // Don't call clear(), because nothing happened yet + return false; + } + + // Retrieve stream information + result = av_find_stream_info(_format_context); + if (result < 0) { + grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; + clear(); + return false; + } + dump_format(_format_context, 0, os_specific.c_str(), false); + + _stream_number = -1; + for(int i = 0; i < _format_context->nb_streams; i++) { + if ((*_format_context->streams[i]->codec).codec_type == CODEC_TYPE_VIDEO) { + _stream_number = i; + break; + } + } + + if (_stream_number == -1) { + grutil_cat.error() + << "ffmpeg: no stream found with codec of type CODEC_TYPE_VIDEO" << endl; + clear(); + return false; + } + + // Get a pointer to the codec context for the video stream + AVCodecContext *codec_context = _format_context->streams[_stream_number]->codec; + + if (grutil_cat.is_debug()) { + grutil_cat.debug() + << "ffmpeg: codec id is " << codec_context->codec_id << endl; + } + + // Find the decoder for the video stream + _codec = avcodec_find_decoder(codec_context->codec_id); + if (_codec == NULL) { + grutil_cat.error() << "ffmpeg: no appropriate decoder found" << endl; + clear(); + return false; + } + + if (_codec->capabilities & CODEC_CAP_TRUNCATED) { + codec_context->flags |= CODEC_FLAG_TRUNCATED; + } + + // Open codec + _codec_context = codec_context; + result = avcodec_open(_codec_context, _codec); + if (result < 0) { + grutil_cat.error() << "ffmpeg AVERROR: " << result << endl; + _codec_context = NULL; + clear(); + return false; + } + + _frame = avcodec_alloc_frame(); + + if (_codec_context->pix_fmt != PIX_FMT_RGB32) { + _frame_out = avcodec_alloc_frame(); + if (_frame_out == NULL) { + grutil_cat.error() + << "ffmpeg: unable to allocate AVPFrame (BGR24)" << endl; + clear(); + return false; + } + + // Determine required buffer size and allocate buffer + _image_size_bytes = avpicture_get_size(PIX_FMT_BGR24, _codec_context->width, + _codec_context->height); + + _raw_data = new uint8_t[_image_size_bytes]; + + // Assign appropriate parts of buffer to image planes in _frameRGB + avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_BGR24, + _codec_context->width, _codec_context->height); + + } else { + _frame_out = avcodec_alloc_frame(); + if (_frame_out == NULL) { + grutil_cat.error() + << "ffmpeg: unable to allocate AVPFrame (RGBA32)" << endl; + clear(); + return false; + } + + // Determine required buffer size and allocate buffer + _image_size_bytes = avpicture_get_size(PIX_FMT_RGB32, _codec_context->width, + _codec_context->height); + + _raw_data = new uint8_t[_image_size_bytes]; + // Assign appropriate parts of buffer to image planes in _frameRGB + avpicture_fill((AVPicture *)_frame_out, _raw_data, PIX_FMT_RGB32, + _codec_context->width, _codec_context->height); + } + // We could put an option here for single channel frames. + + _next_frame_number = 0; + _filename = filename; + + return true; +} + + + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::clear +// Access: Public +// Description: Stops the video playback and frees the associated +// resources. +//////////////////////////////////////////////////////////////////// +void FFMpegTexture::VideoStream:: +clear() { + if (_codec_context) { + avcodec_close(_codec_context); + _codec_context = NULL; + } + if (_format_context) { + av_close_input_file(_format_context); + _format_context = NULL; + } + if (_frame) { + av_free(_frame); + _frame = NULL; + } + if (_frame_out) { + av_free(_frame_out); + _frame_out = NULL; + } + + _next_frame_number = 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FFMpegTexture::VideoStream::read_video_frame +// Access: Private +// Description: Fills packet with the next sequential video frame in +// the stream, skipping over all non-video frames. +// packet must later be deallocated with +// av_free_packet(). +// +// Returns nonnegative on success, or negative on error. +//////////////////////////////////////////////////////////////////// +int FFMpegTexture::VideoStream:: +read_video_frame(AVPacket *packet) { + int err = av_read_frame(_format_context, packet); + if (err < 0) { + return err; + } + + while (packet->stream_index != _stream_number) { + // It's not a video packet; free it and get another. + av_free_packet(packet); + + err = av_read_frame(_format_context, packet); + if (err < 0) { + grutil_cat.debug() + << "Got error " << err << " reading frame.\n"; + return err; + } + } + + // This is a video packet, return it. + return err; +} + + +#endif // HAVE_FFMpeg + diff --git a/panda/src/movies/ffmpegAudioCursor.cxx b/panda/src/movies/ffmpegAudioCursor.cxx index 48c8881252..2ad08a1a99 100644 --- a/panda/src/movies/ffmpegAudioCursor.cxx +++ b/panda/src/movies/ffmpegAudioCursor.cxx @@ -1,311 +1,311 @@ -// Filename: ffmpegAudioCursor.cxx -// Created by: jyelon (01Aug2007) -// -//////////////////////////////////////////////////////////////////// -// -// PANDA 3D SOFTWARE -// Copyright (c) Carnegie Mellon University. All rights reserved. -// -// All use of this software is subject to the terms of the revised BSD -// license. You should have received a copy of this license along -// with this source code in a file named "LICENSE." -// -//////////////////////////////////////////////////////////////////// - -#ifdef HAVE_FFMPEG - -#include "ffmpegAudioCursor.h" -extern "C" { - #include "libavcodec/avcodec.h" - #include "libavformat/avformat.h" -} - -TypeHandle FfmpegAudioCursor::_type_handle; - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::Constructor -// Access: Protected -// Description: xxx -//////////////////////////////////////////////////////////////////// -FfmpegAudioCursor:: -FfmpegAudioCursor(FfmpegAudio *src) : - MovieAudioCursor(src), - _filename(src->_filename), - _packet(0), - _packet_data(0), - _format_ctx(0), - _audio_ctx(0), - _buffer(0), - _buffer_alloc(0) -{ - string url = "pandavfs:"; - url += _filename; - if (av_open_input_file(&_format_ctx, url.c_str(), NULL, 0, NULL)!=0) { - cleanup(); - return; - } - - if (av_find_stream_info(_format_ctx)<0) { - cleanup(); - return; - } - - // Find the audio stream - for(int i=0; i<_format_ctx->nb_streams; i++) { - if(_format_ctx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) { - _audio_index = i; - _audio_ctx = _format_ctx->streams[i]->codec; - _audio_timebase = av_q2d(_format_ctx->streams[i]->time_base); - _audio_rate = _audio_ctx->sample_rate; - _audio_channels = _audio_ctx->channels; - } - } - - if (_audio_ctx == 0) { - cleanup(); - return; - } - - AVCodec *pAudioCodec=avcodec_find_decoder(_audio_ctx->codec_id); - if(pAudioCodec == 0) { - cleanup(); - return; - } - if(avcodec_open(_audio_ctx, pAudioCodec)<0) { - cleanup(); - return; - } - - _length = (_format_ctx->duration * 1.0) / AV_TIME_BASE; - _can_seek = true; - _can_seek_fast = true; - - _packet = new AVPacket; - _buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE / 2; - _buffer_alloc = new PN_int16[_buffer_size + 128]; - if ((_packet == 0)||(_buffer_alloc == 0)) { - cleanup(); - return; - } - memset(_packet, 0, sizeof(AVPacket)); - - // Align the buffer to a 16-byte boundary - // The ffmpeg codec likes this, because it uses SSE/SSE2. - _buffer = _buffer_alloc; - while (((size_t)_buffer) & 15) { - _buffer += 1; - } - - fetch_packet(); - _initial_dts = _packet->dts; - _last_seek = 0; - _samples_read = 0; - _buffer_head = 0; - _buffer_tail = 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::Destructor -// Access: Protected, Virtual -// Description: xxx -//////////////////////////////////////////////////////////////////// -FfmpegAudioCursor:: -~FfmpegAudioCursor() { - cleanup(); -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::cleanup -// Access: Public -// Description: Reset to a standard inactive state. -//////////////////////////////////////////////////////////////////// -void FfmpegAudioCursor:: -cleanup() { - if (_packet) { - if (_packet->data) { - av_free_packet(_packet); - } - delete _packet; - _packet = 0; - } - if (_buffer_alloc) { - delete[] _buffer_alloc; - _buffer_alloc = 0; - _buffer = 0; - } - if ((_audio_ctx)&&(_audio_ctx->codec)) { - avcodec_close(_audio_ctx); - } - _audio_ctx = 0; - if (_format_ctx) { - av_close_input_file(_format_ctx); - _format_ctx = 0; - } - _audio_ctx = 0; - _audio_index = -1; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::fetch_packet -// Access: Protected -// Description: Fetches an audio packet and stores it in the -// packet buffer. Also sets packet_size and packet_data. -//////////////////////////////////////////////////////////////////// -void FfmpegAudioCursor:: -fetch_packet() { - if (_packet->data) { - av_free_packet(_packet); - } - while (av_read_frame(_format_ctx, _packet) >= 0) { - if (_packet->stream_index == _audio_index) { - _packet_size = _packet->size; - _packet_data = _packet->data; - return; - } - av_free_packet(_packet); - } - _packet->data = 0; - _packet_size = 0; - _packet_data = 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::reload_buffer -// Access: Protected -// Description: Reloads the audio buffer by decoding audio packets -// until one of those audio packets finally yields -// some samples. If we encounter the end of the -// stream, we synthesize silence. -//////////////////////////////////////////////////////////////////// -void FfmpegAudioCursor:: -reload_buffer() { - - - while (_buffer_head == _buffer_tail) { - // If we're out of packets, generate silence. - if (_packet->data == 0) { - _buffer_head = 0; - _buffer_tail = _buffer_size; - memset(_buffer, 0, _buffer_size * 2); - return; - } else if (_packet_size > 0) { - int bufsize = _buffer_size * 2; -#if LIBAVCODEC_VERSION_INT < 3414272 -#if LIBAVCODEC_VERSION_INT < 3349504 - int len = avcodec_decode_audio(_audio_ctx, _buffer, &bufsize, - _packet_data, _packet_size); - movies_debug("avcodec_decode_audio returned " << len); -#else - int len = avcodec_decode_audio2(_audio_ctx, _buffer, &bufsize, - _packet_data, _packet_size); - movies_debug("avcodec_decode_audio2 returned " << len); -#endif -#else - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = _packet_data; - pkt.size = _packet_size; - int len = avcodec_decode_audio3(_audio_ctx, _buffer, &bufsize, &pkt); - movies_debug("avcodec_decode_audio3 returned " << len); - av_free_packet(&pkt); // Not sure about this -#endif - if (len <= 0) { - break; - } - _packet_data += len; - _packet_size -= len; - if (bufsize > 0) { - _buffer_head = 0; - _buffer_tail = (bufsize/2); - return; - } - } else { - fetch_packet(); - } - } -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::seek -// Access: Protected -// Description: Seeks to a target location. Afterward, the -// packet_time is guaranteed to be less than or -// equal to the specified time. -//////////////////////////////////////////////////////////////////// -void FfmpegAudioCursor:: -seek(double t) { - PN_int64 target_ts = (PN_int64)(t / _audio_timebase); - if (target_ts < (PN_int64)(_initial_dts)) { - // Attempts to seek before the first packet will fail. - target_ts = _initial_dts; - } - if (av_seek_frame(_format_ctx, _audio_index, target_ts, AVSEEK_FLAG_BACKWARD) < 0) { - movies_cat.error() << "Seek failure. Shutting down movie.\n"; - cleanup(); - return; - } - avcodec_close(_audio_ctx); - AVCodec *pAudioCodec=avcodec_find_decoder(_audio_ctx->codec_id); - if(pAudioCodec == 0) { - cleanup(); - return; - } - if(avcodec_open(_audio_ctx, pAudioCodec)<0) { - cleanup(); - return; - } - _buffer_head = 0; - _buffer_tail = 0; - fetch_packet(); - double ts = _packet->dts * _audio_timebase; - if (t > ts) { - int skip = (int)((t-ts) * _audio_rate); - read_samples(skip, 0); - } - _last_seek = t; - _samples_read = 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegAudioCursor::read_samples -// Access: Public, Virtual -// Description: Read audio samples from the stream. N is the -// number of samples you wish to read. Your buffer -// must be equal in size to N * channels. -// Multiple-channel audio will be interleaved. -//////////////////////////////////////////////////////////////////// -void FfmpegAudioCursor:: -read_samples(int n, PN_int16 *data) { - - //movies_debug("here!!! FfmpegAudioCursor n="< 0) { - - if (_buffer_head == _buffer_tail) { - reload_buffer(); - give_up_after --; - movies_debug("reload_buffer will give up in "< available) ? available : desired; - if (ncopy) { - if (data != 0) { - memcpy(data, _buffer + _buffer_head, ncopy * 2); - data += ncopy; - } - desired -= ncopy; - _buffer_head += ncopy; - } - - } - _samples_read += n; -} - -//////////////////////////////////////////////////////////////////// - -#endif // HAVE_FFMPEG +// Filename: ffmpegAudioCursor.cxx +// Created by: jyelon (01Aug2007) +// +//////////////////////////////////////////////////////////////////// +// +// PANDA 3D SOFTWARE +// Copyright (c) Carnegie Mellon University. All rights reserved. +// +// All use of this software is subject to the terms of the revised BSD +// license. You should have received a copy of this license along +// with this source code in a file named "LICENSE." +// +//////////////////////////////////////////////////////////////////// + +#ifdef HAVE_FFMPEG + +#include "ffmpegAudioCursor.h" +extern "C" { + #include "libavcodec/avcodec.h" + #include "libavformat/avformat.h" +} + +TypeHandle FfmpegAudioCursor::_type_handle; + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::Constructor +// Access: Protected +// Description: xxx +//////////////////////////////////////////////////////////////////// +FfmpegAudioCursor:: +FfmpegAudioCursor(FfmpegAudio *src) : + MovieAudioCursor(src), + _filename(src->_filename), + _packet(0), + _packet_data(0), + _format_ctx(0), + _audio_ctx(0), + _buffer(0), + _buffer_alloc(0) +{ + string url = "pandavfs:"; + url += _filename; + if (av_open_input_file(&_format_ctx, url.c_str(), NULL, 0, NULL)!=0) { + cleanup(); + return; + } + + if (av_find_stream_info(_format_ctx)<0) { + cleanup(); + return; + } + + // Find the audio stream + for(int i=0; i<_format_ctx->nb_streams; i++) { + if(_format_ctx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) { + _audio_index = i; + _audio_ctx = _format_ctx->streams[i]->codec; + _audio_timebase = av_q2d(_format_ctx->streams[i]->time_base); + _audio_rate = _audio_ctx->sample_rate; + _audio_channels = _audio_ctx->channels; + } + } + + if (_audio_ctx == 0) { + cleanup(); + return; + } + + AVCodec *pAudioCodec=avcodec_find_decoder(_audio_ctx->codec_id); + if(pAudioCodec == 0) { + cleanup(); + return; + } + if(avcodec_open(_audio_ctx, pAudioCodec)<0) { + cleanup(); + return; + } + + _length = (_format_ctx->duration * 1.0) / AV_TIME_BASE; + _can_seek = true; + _can_seek_fast = true; + + _packet = new AVPacket; + _buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE / 2; + _buffer_alloc = new PN_int16[_buffer_size + 128]; + if ((_packet == 0)||(_buffer_alloc == 0)) { + cleanup(); + return; + } + memset(_packet, 0, sizeof(AVPacket)); + + // Align the buffer to a 16-byte boundary + // The ffmpeg codec likes this, because it uses SSE/SSE2. + _buffer = _buffer_alloc; + while (((size_t)_buffer) & 15) { + _buffer += 1; + } + + fetch_packet(); + _initial_dts = _packet->dts; + _last_seek = 0; + _samples_read = 0; + _buffer_head = 0; + _buffer_tail = 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::Destructor +// Access: Protected, Virtual +// Description: xxx +//////////////////////////////////////////////////////////////////// +FfmpegAudioCursor:: +~FfmpegAudioCursor() { + cleanup(); +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::cleanup +// Access: Public +// Description: Reset to a standard inactive state. +//////////////////////////////////////////////////////////////////// +void FfmpegAudioCursor:: +cleanup() { + if (_packet) { + if (_packet->data) { + av_free_packet(_packet); + } + delete _packet; + _packet = 0; + } + if (_buffer_alloc) { + delete[] _buffer_alloc; + _buffer_alloc = 0; + _buffer = 0; + } + if ((_audio_ctx)&&(_audio_ctx->codec)) { + avcodec_close(_audio_ctx); + } + _audio_ctx = 0; + if (_format_ctx) { + av_close_input_file(_format_ctx); + _format_ctx = 0; + } + _audio_ctx = 0; + _audio_index = -1; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::fetch_packet +// Access: Protected +// Description: Fetches an audio packet and stores it in the +// packet buffer. Also sets packet_size and packet_data. +//////////////////////////////////////////////////////////////////// +void FfmpegAudioCursor:: +fetch_packet() { + if (_packet->data) { + av_free_packet(_packet); + } + while (av_read_frame(_format_ctx, _packet) >= 0) { + if (_packet->stream_index == _audio_index) { + _packet_size = _packet->size; + _packet_data = _packet->data; + return; + } + av_free_packet(_packet); + } + _packet->data = 0; + _packet_size = 0; + _packet_data = 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::reload_buffer +// Access: Protected +// Description: Reloads the audio buffer by decoding audio packets +// until one of those audio packets finally yields +// some samples. If we encounter the end of the +// stream, we synthesize silence. +//////////////////////////////////////////////////////////////////// +void FfmpegAudioCursor:: +reload_buffer() { + + + while (_buffer_head == _buffer_tail) { + // If we're out of packets, generate silence. + if (_packet->data == 0) { + _buffer_head = 0; + _buffer_tail = _buffer_size; + memset(_buffer, 0, _buffer_size * 2); + return; + } else if (_packet_size > 0) { + int bufsize = _buffer_size * 2; +#if LIBAVCODEC_VERSION_INT < 3414272 +#if LIBAVCODEC_VERSION_INT < 3349504 + int len = avcodec_decode_audio(_audio_ctx, _buffer, &bufsize, + _packet_data, _packet_size); + movies_debug("avcodec_decode_audio returned " << len); +#else + int len = avcodec_decode_audio2(_audio_ctx, _buffer, &bufsize, + _packet_data, _packet_size); + movies_debug("avcodec_decode_audio2 returned " << len); +#endif +#else + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = _packet_data; + pkt.size = _packet_size; + int len = avcodec_decode_audio3(_audio_ctx, _buffer, &bufsize, &pkt); + movies_debug("avcodec_decode_audio3 returned " << len); + av_free_packet(&pkt); // Not sure about this +#endif + if (len <= 0) { + break; + } + _packet_data += len; + _packet_size -= len; + if (bufsize > 0) { + _buffer_head = 0; + _buffer_tail = (bufsize/2); + return; + } + } else { + fetch_packet(); + } + } +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::seek +// Access: Protected +// Description: Seeks to a target location. Afterward, the +// packet_time is guaranteed to be less than or +// equal to the specified time. +//////////////////////////////////////////////////////////////////// +void FfmpegAudioCursor:: +seek(double t) { + PN_int64 target_ts = (PN_int64)(t / _audio_timebase); + if (target_ts < (PN_int64)(_initial_dts)) { + // Attempts to seek before the first packet will fail. + target_ts = _initial_dts; + } + if (av_seek_frame(_format_ctx, _audio_index, target_ts, AVSEEK_FLAG_BACKWARD) < 0) { + movies_cat.error() << "Seek failure. Shutting down movie.\n"; + cleanup(); + return; + } + avcodec_close(_audio_ctx); + AVCodec *pAudioCodec=avcodec_find_decoder(_audio_ctx->codec_id); + if(pAudioCodec == 0) { + cleanup(); + return; + } + if(avcodec_open(_audio_ctx, pAudioCodec)<0) { + cleanup(); + return; + } + _buffer_head = 0; + _buffer_tail = 0; + fetch_packet(); + double ts = _packet->dts * _audio_timebase; + if (t > ts) { + int skip = (int)((t-ts) * _audio_rate); + read_samples(skip, 0); + } + _last_seek = t; + _samples_read = 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegAudioCursor::read_samples +// Access: Public, Virtual +// Description: Read audio samples from the stream. N is the +// number of samples you wish to read. Your buffer +// must be equal in size to N * channels. +// Multiple-channel audio will be interleaved. +//////////////////////////////////////////////////////////////////// +void FfmpegAudioCursor:: +read_samples(int n, PN_int16 *data) { + + //movies_debug("here!!! FfmpegAudioCursor n="< 0) { + + if (_buffer_head == _buffer_tail) { + reload_buffer(); + give_up_after --; + movies_debug("reload_buffer will give up in "< available) ? available : desired; + if (ncopy) { + if (data != 0) { + memcpy(data, _buffer + _buffer_head, ncopy * 2); + data += ncopy; + } + desired -= ncopy; + _buffer_head += ncopy; + } + + } + _samples_read += n; +} + +//////////////////////////////////////////////////////////////////// + +#endif // HAVE_FFMPEG diff --git a/panda/src/movies/ffmpegVideoCursor.cxx b/panda/src/movies/ffmpegVideoCursor.cxx index 6ace1e6802..ead9eab71d 100644 --- a/panda/src/movies/ffmpegVideoCursor.cxx +++ b/panda/src/movies/ffmpegVideoCursor.cxx @@ -1,384 +1,384 @@ -// Filename: ffmpegVideoCursor.cxx -// Created by: jyelon (01Aug2007) -// -//////////////////////////////////////////////////////////////////// -// -// PANDA 3D SOFTWARE -// Copyright (c) Carnegie Mellon University. All rights reserved. -// -// All use of this software is subject to the terms of the revised BSD -// license. You should have received a copy of this license along -// with this source code in a file named "LICENSE." -// -//////////////////////////////////////////////////////////////////// - -#ifdef HAVE_FFMPEG - -#include "ffmpegVideoCursor.h" -#include "config_movies.h" -extern "C" { - #include "libavcodec/avcodec.h" - #include "libavformat/avformat.h" -#ifdef HAVE_SWSCALE - #include "libswscale/swscale.h" -#endif -} -#include "pStatCollector.h" -#include "pStatTimer.h" - -TypeHandle FfmpegVideoCursor::_type_handle; - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::Constructor -// Access: Public -// Description: xxx -//////////////////////////////////////////////////////////////////// -FfmpegVideoCursor:: -FfmpegVideoCursor(FfmpegVideo *src) : - MovieVideoCursor(src), - _filename(src->_filename), - _format_ctx(0), - _video_index(-1), - _video_ctx(0), - _frame(0), - _frame_out(0), - _packet(0), - _min_fseek(3.0) -{ - string url = "pandavfs:"; - url += _filename; - if (av_open_input_file(&_format_ctx, url.c_str(), NULL, 0, NULL)!=0) { - cleanup(); - return; - } - - if (av_find_stream_info(_format_ctx)<0) { - cleanup(); - return; - } - - // Find the video stream - for(int i=0; i<_format_ctx->nb_streams; i++) { - if(_format_ctx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { - _video_index = i; - _video_ctx = _format_ctx->streams[i]->codec; - _video_timebase = av_q2d(_format_ctx->streams[i]->time_base); - } - } - - if (_video_ctx == 0) { - cleanup(); - return; - } - - AVCodec *pVideoCodec=avcodec_find_decoder(_video_ctx->codec_id); - if(pVideoCodec == 0) { - cleanup(); - return; - } - if(avcodec_open(_video_ctx, pVideoCodec)<0) { - cleanup(); - return; - } - - _size_x = _video_ctx->width; - _size_y = _video_ctx->height; - _num_components = 3; // Don't know how to implement RGBA movies yet. - _length = (_format_ctx->duration * 1.0) / AV_TIME_BASE; - _can_seek = true; - _can_seek_fast = true; - - _packet = new AVPacket; - _frame = avcodec_alloc_frame(); - _frame_out = avcodec_alloc_frame(); - if ((_packet == 0)||(_frame == 0)||(_frame_out == 0)) { - cleanup(); - return; - } - memset(_packet, 0, sizeof(AVPacket)); - - fetch_packet(0.0); - _initial_dts = _packet->dts; - _packet_time = 0.0; - _last_start = -1.0; - _next_start = 0.0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::Destructor -// Access: Public -// Description: xxx -//////////////////////////////////////////////////////////////////// -FfmpegVideoCursor:: -~FfmpegVideoCursor() { - cleanup(); -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::cleanup -// Access: Public -// Description: Reset to a standard inactive state. -//////////////////////////////////////////////////////////////////// -void FfmpegVideoCursor:: -cleanup() { - if (_frame) { - av_free(_frame); - _frame = 0; - } - if (_frame_out) { - _frame_out->data[0] = 0; - av_free(_frame_out); - _frame_out = 0; - } - if (_packet) { - if (_packet->data) { - av_free_packet(_packet); - } - delete _packet; - _packet = 0; - } - if ((_video_ctx)&&(_video_ctx->codec)) { - avcodec_close(_video_ctx); - } - _video_ctx = 0; - if (_format_ctx) { - av_close_input_file(_format_ctx); - _format_ctx = 0; - } - _video_ctx = 0; - _video_index = -1; - -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::export_frame -// Access: Public, Virtual -// Description: Exports the contents of the frame buffer into the -// user's target buffer. -//////////////////////////////////////////////////////////////////// -static PStatCollector export_frame_collector("*:FFMPEG Convert Video to BGR"); -void FfmpegVideoCursor:: -export_frame(unsigned char *data, bool bgra, int bufx) { - PStatTimer timer(export_frame_collector); - if (bgra) { - _frame_out->data[0] = data + ((_size_y - 1) * bufx * 4); - _frame_out->linesize[0] = bufx * -4; -#ifdef HAVE_SWSCALE - struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y, - _video_ctx->pix_fmt, _size_x, _size_y, - PIX_FMT_BGRA, 2, NULL, NULL, NULL); - nassertv(convert_ctx != NULL); - sws_scale(convert_ctx, _frame->data, _frame->linesize, - 0, _size_y, _frame_out->data, _frame_out->linesize); - sws_freeContext(convert_ctx); -#else - img_convert((AVPicture *)_frame_out, PIX_FMT_BGRA, - (AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y); -#endif - } else { - _frame_out->data[0] = data + ((_size_y - 1) * bufx * 3); - _frame_out->linesize[0] = bufx * -3; -#ifdef HAVE_SWSCALE - struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y, - _video_ctx->pix_fmt, _size_x, _size_y, - PIX_FMT_BGR24, 2, NULL, NULL, NULL); - nassertv(convert_ctx != NULL); - sws_scale(convert_ctx, _frame->data, _frame->linesize, - 0, _size_y, _frame_out->data, _frame_out->linesize); - sws_freeContext(convert_ctx); -#else - img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, - (AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y); -#endif - } -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::fetch_packet -// Access: Protected -// Description: Fetches a video packet and stores it in the -// packet buffer. Sets packet_time to the packet's -// timestamp. If a packet could not be read, the -// packet is cleared and the packet_time is set to -// the specified default value. -//////////////////////////////////////////////////////////////////// -void FfmpegVideoCursor:: -fetch_packet(double default_time) { - if (_packet->data) { - av_free_packet(_packet); - } - while (av_read_frame(_format_ctx, _packet) >= 0) { - if (_packet->stream_index == _video_index) { - _packet_time = _packet->dts * _video_timebase; - return; - } - av_free_packet(_packet); - } - _packet->data = 0; - _packet_time = default_time; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::fetch_frame -// Access: Protected -// Description: Fetches a frame from the stream and stores it in -// the frame buffer. Sets last_start and next_start -// to indicate the extents of the frame. -//////////////////////////////////////////////////////////////////// -void FfmpegVideoCursor:: -fetch_frame() { - int finished = 0; - _last_start = _packet_time; - while (!finished && _packet->data) { -#if LIBAVCODEC_VERSION_INT < 3414272 - avcodec_decode_video(_video_ctx, _frame, - &finished, _packet->data, _packet->size); -#else - avcodec_decode_video2(_video_ctx, _frame, &finished, _packet); -#endif - fetch_packet(_last_start + 1.0); - } - _next_start = _packet_time; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::seek -// Access: Protected -// Description: Seeks to a target location. Afterward, the -// packet_time is guaranteed to be less than or -// equal to the specified time. -//////////////////////////////////////////////////////////////////// -void FfmpegVideoCursor:: -seek(double t) { - PN_int64 target_ts = (PN_int64)(t / _video_timebase); - if (target_ts < (PN_int64)(_initial_dts)) { - // Attempts to seek before the first packet will fail. - target_ts = _initial_dts; - } - if (av_seek_frame(_format_ctx, _video_index, target_ts, AVSEEK_FLAG_BACKWARD) < 0) { - if (t >= _packet_time) { - return; - } - movies_cat.error() << "Seek failure. Shutting down movie.\n"; - cleanup(); - _packet_time = t; - return; - } - avcodec_close(_video_ctx); - AVCodec *pVideoCodec=avcodec_find_decoder(_video_ctx->codec_id); - if(pVideoCodec == 0) { - cleanup(); - return; - } - if(avcodec_open(_video_ctx, pVideoCodec)<0) { - cleanup(); - return; - } - fetch_packet(t); - if (_packet_time > t) { - _packet_time = t; - } -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::fetch_time -// Access: Public, Virtual -// Description: Advance until the specified time is in the -// export buffer. -//////////////////////////////////////////////////////////////////// -void FfmpegVideoCursor:: -fetch_time(double time) { - if (time < _last_start) { - // Time is in the past. - seek(time); - while (_packet_time <= time) { - fetch_frame(); - } - } else if (time < _next_start) { - // Time is in the present: already have the frame. - } else if (time < _next_start + _min_fseek) { - // Time is in the near future. - while ((_packet_time <= time) && (_packet->data)) { - fetch_frame(); - } - } else { - // Time is in the far future. Seek forward, then read. - // There's a danger here: because keyframes are spaced - // unpredictably, trying to seek forward could actually - // move us backward in the stream! This must be avoided. - // So the rule is, try the seek. If it hurts us by moving - // us backward, we increase the minimum threshold distance - // for forward-seeking in the future. - - double base = _packet_time; - seek(time); - if (_packet_time < base) { - _min_fseek += (base - _packet_time); - } - while (_packet_time <= time) { - fetch_frame(); - } - } -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::fetch_into_texture -// Access: Public, Virtual -// Description: See MovieVideoCursor::fetch_into_texture. -//////////////////////////////////////////////////////////////////// -static PStatCollector fetch_into_texture_pcollector("*:FFMPEG Video Decoding"); -void FfmpegVideoCursor:: -fetch_into_texture(double time, Texture *t, int page) { - PStatTimer timer(fetch_into_texture_pcollector); - - nassertv(t->get_x_size() >= size_x()); - nassertv(t->get_y_size() >= size_y()); - nassertv((t->get_num_components() == 3) || (t->get_num_components() == 4)); - nassertv(t->get_component_width() == 1); - nassertv(page < t->get_z_size()); - - PTA_uchar img = t->modify_ram_image(); - - unsigned char *data = img.p() + page * t->get_expected_ram_page_size(); - - // If there was an error at any point, synthesize black. - if (_format_ctx==0) { - if (data) { - memset(data,0,t->get_x_size() * t->get_y_size() * t->get_num_components()); - } - _last_start = time; - _next_start = time + 1.0; - return; - } - - fetch_time(time); - export_frame(data, (t->get_num_components()==4), t->get_x_size()); -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVideoCursor::fetch_into_buffer -// Access: Public, Virtual -// Description: See MovieVideoCursor::fetch_into_buffer. -//////////////////////////////////////////////////////////////////// -static PStatCollector fetch_into_buffer_pcollector("*:FFMPEG Video Decoding"); -void FfmpegVideoCursor:: -fetch_into_buffer(double time, unsigned char *data, bool bgra) { - PStatTimer timer(fetch_into_buffer_pcollector); - - // If there was an error at any point, synthesize black. - if (_format_ctx==0) { - if (data) { - memset(data,0,size_x()*size_y()*(bgra?4:3)); - } - _last_start = time; - _next_start = time + 1.0; - return; - } - - fetch_time(time); - export_frame(data, bgra, _size_x); -} - -//////////////////////////////////////////////////////////////////// - -#endif // HAVE_FFMPEG +// Filename: ffmpegVideoCursor.cxx +// Created by: jyelon (01Aug2007) +// +//////////////////////////////////////////////////////////////////// +// +// PANDA 3D SOFTWARE +// Copyright (c) Carnegie Mellon University. All rights reserved. +// +// All use of this software is subject to the terms of the revised BSD +// license. You should have received a copy of this license along +// with this source code in a file named "LICENSE." +// +//////////////////////////////////////////////////////////////////// + +#ifdef HAVE_FFMPEG + +#include "ffmpegVideoCursor.h" +#include "config_movies.h" +extern "C" { + #include "libavcodec/avcodec.h" + #include "libavformat/avformat.h" +#ifdef HAVE_SWSCALE + #include "libswscale/swscale.h" +#endif +} +#include "pStatCollector.h" +#include "pStatTimer.h" + +TypeHandle FfmpegVideoCursor::_type_handle; + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::Constructor +// Access: Public +// Description: xxx +//////////////////////////////////////////////////////////////////// +FfmpegVideoCursor:: +FfmpegVideoCursor(FfmpegVideo *src) : + MovieVideoCursor(src), + _filename(src->_filename), + _format_ctx(0), + _video_index(-1), + _video_ctx(0), + _frame(0), + _frame_out(0), + _packet(0), + _min_fseek(3.0) +{ + string url = "pandavfs:"; + url += _filename; + if (av_open_input_file(&_format_ctx, url.c_str(), NULL, 0, NULL)!=0) { + cleanup(); + return; + } + + if (av_find_stream_info(_format_ctx)<0) { + cleanup(); + return; + } + + // Find the video stream + for(int i=0; i<_format_ctx->nb_streams; i++) { + if(_format_ctx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { + _video_index = i; + _video_ctx = _format_ctx->streams[i]->codec; + _video_timebase = av_q2d(_format_ctx->streams[i]->time_base); + } + } + + if (_video_ctx == 0) { + cleanup(); + return; + } + + AVCodec *pVideoCodec=avcodec_find_decoder(_video_ctx->codec_id); + if(pVideoCodec == 0) { + cleanup(); + return; + } + if(avcodec_open(_video_ctx, pVideoCodec)<0) { + cleanup(); + return; + } + + _size_x = _video_ctx->width; + _size_y = _video_ctx->height; + _num_components = 3; // Don't know how to implement RGBA movies yet. + _length = (_format_ctx->duration * 1.0) / AV_TIME_BASE; + _can_seek = true; + _can_seek_fast = true; + + _packet = new AVPacket; + _frame = avcodec_alloc_frame(); + _frame_out = avcodec_alloc_frame(); + if ((_packet == 0)||(_frame == 0)||(_frame_out == 0)) { + cleanup(); + return; + } + memset(_packet, 0, sizeof(AVPacket)); + + fetch_packet(0.0); + _initial_dts = _packet->dts; + _packet_time = 0.0; + _last_start = -1.0; + _next_start = 0.0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::Destructor +// Access: Public +// Description: xxx +//////////////////////////////////////////////////////////////////// +FfmpegVideoCursor:: +~FfmpegVideoCursor() { + cleanup(); +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::cleanup +// Access: Public +// Description: Reset to a standard inactive state. +//////////////////////////////////////////////////////////////////// +void FfmpegVideoCursor:: +cleanup() { + if (_frame) { + av_free(_frame); + _frame = 0; + } + if (_frame_out) { + _frame_out->data[0] = 0; + av_free(_frame_out); + _frame_out = 0; + } + if (_packet) { + if (_packet->data) { + av_free_packet(_packet); + } + delete _packet; + _packet = 0; + } + if ((_video_ctx)&&(_video_ctx->codec)) { + avcodec_close(_video_ctx); + } + _video_ctx = 0; + if (_format_ctx) { + av_close_input_file(_format_ctx); + _format_ctx = 0; + } + _video_ctx = 0; + _video_index = -1; + +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::export_frame +// Access: Public, Virtual +// Description: Exports the contents of the frame buffer into the +// user's target buffer. +//////////////////////////////////////////////////////////////////// +static PStatCollector export_frame_collector("*:FFMPEG Convert Video to BGR"); +void FfmpegVideoCursor:: +export_frame(unsigned char *data, bool bgra, int bufx) { + PStatTimer timer(export_frame_collector); + if (bgra) { + _frame_out->data[0] = data + ((_size_y - 1) * bufx * 4); + _frame_out->linesize[0] = bufx * -4; +#ifdef HAVE_SWSCALE + struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y, + _video_ctx->pix_fmt, _size_x, _size_y, + PIX_FMT_BGRA, 2, NULL, NULL, NULL); + nassertv(convert_ctx != NULL); + sws_scale(convert_ctx, _frame->data, _frame->linesize, + 0, _size_y, _frame_out->data, _frame_out->linesize); + sws_freeContext(convert_ctx); +#else + img_convert((AVPicture *)_frame_out, PIX_FMT_BGRA, + (AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y); +#endif + } else { + _frame_out->data[0] = data + ((_size_y - 1) * bufx * 3); + _frame_out->linesize[0] = bufx * -3; +#ifdef HAVE_SWSCALE + struct SwsContext *convert_ctx = sws_getContext(_size_x, _size_y, + _video_ctx->pix_fmt, _size_x, _size_y, + PIX_FMT_BGR24, 2, NULL, NULL, NULL); + nassertv(convert_ctx != NULL); + sws_scale(convert_ctx, _frame->data, _frame->linesize, + 0, _size_y, _frame_out->data, _frame_out->linesize); + sws_freeContext(convert_ctx); +#else + img_convert((AVPicture *)_frame_out, PIX_FMT_BGR24, + (AVPicture *)_frame, _video_ctx->pix_fmt, _size_x, _size_y); +#endif + } +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::fetch_packet +// Access: Protected +// Description: Fetches a video packet and stores it in the +// packet buffer. Sets packet_time to the packet's +// timestamp. If a packet could not be read, the +// packet is cleared and the packet_time is set to +// the specified default value. +//////////////////////////////////////////////////////////////////// +void FfmpegVideoCursor:: +fetch_packet(double default_time) { + if (_packet->data) { + av_free_packet(_packet); + } + while (av_read_frame(_format_ctx, _packet) >= 0) { + if (_packet->stream_index == _video_index) { + _packet_time = _packet->dts * _video_timebase; + return; + } + av_free_packet(_packet); + } + _packet->data = 0; + _packet_time = default_time; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::fetch_frame +// Access: Protected +// Description: Fetches a frame from the stream and stores it in +// the frame buffer. Sets last_start and next_start +// to indicate the extents of the frame. +//////////////////////////////////////////////////////////////////// +void FfmpegVideoCursor:: +fetch_frame() { + int finished = 0; + _last_start = _packet_time; + while (!finished && _packet->data) { +#if LIBAVCODEC_VERSION_INT < 3414272 + avcodec_decode_video(_video_ctx, _frame, + &finished, _packet->data, _packet->size); +#else + avcodec_decode_video2(_video_ctx, _frame, &finished, _packet); +#endif + fetch_packet(_last_start + 1.0); + } + _next_start = _packet_time; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::seek +// Access: Protected +// Description: Seeks to a target location. Afterward, the +// packet_time is guaranteed to be less than or +// equal to the specified time. +//////////////////////////////////////////////////////////////////// +void FfmpegVideoCursor:: +seek(double t) { + PN_int64 target_ts = (PN_int64)(t / _video_timebase); + if (target_ts < (PN_int64)(_initial_dts)) { + // Attempts to seek before the first packet will fail. + target_ts = _initial_dts; + } + if (av_seek_frame(_format_ctx, _video_index, target_ts, AVSEEK_FLAG_BACKWARD) < 0) { + if (t >= _packet_time) { + return; + } + movies_cat.error() << "Seek failure. Shutting down movie.\n"; + cleanup(); + _packet_time = t; + return; + } + avcodec_close(_video_ctx); + AVCodec *pVideoCodec=avcodec_find_decoder(_video_ctx->codec_id); + if(pVideoCodec == 0) { + cleanup(); + return; + } + if(avcodec_open(_video_ctx, pVideoCodec)<0) { + cleanup(); + return; + } + fetch_packet(t); + if (_packet_time > t) { + _packet_time = t; + } +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::fetch_time +// Access: Public, Virtual +// Description: Advance until the specified time is in the +// export buffer. +//////////////////////////////////////////////////////////////////// +void FfmpegVideoCursor:: +fetch_time(double time) { + if (time < _last_start) { + // Time is in the past. + seek(time); + while (_packet_time <= time) { + fetch_frame(); + } + } else if (time < _next_start) { + // Time is in the present: already have the frame. + } else if (time < _next_start + _min_fseek) { + // Time is in the near future. + while ((_packet_time <= time) && (_packet->data)) { + fetch_frame(); + } + } else { + // Time is in the far future. Seek forward, then read. + // There's a danger here: because keyframes are spaced + // unpredictably, trying to seek forward could actually + // move us backward in the stream! This must be avoided. + // So the rule is, try the seek. If it hurts us by moving + // us backward, we increase the minimum threshold distance + // for forward-seeking in the future. + + double base = _packet_time; + seek(time); + if (_packet_time < base) { + _min_fseek += (base - _packet_time); + } + while (_packet_time <= time) { + fetch_frame(); + } + } +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::fetch_into_texture +// Access: Public, Virtual +// Description: See MovieVideoCursor::fetch_into_texture. +//////////////////////////////////////////////////////////////////// +static PStatCollector fetch_into_texture_pcollector("*:FFMPEG Video Decoding"); +void FfmpegVideoCursor:: +fetch_into_texture(double time, Texture *t, int page) { + PStatTimer timer(fetch_into_texture_pcollector); + + nassertv(t->get_x_size() >= size_x()); + nassertv(t->get_y_size() >= size_y()); + nassertv((t->get_num_components() == 3) || (t->get_num_components() == 4)); + nassertv(t->get_component_width() == 1); + nassertv(page < t->get_z_size()); + + PTA_uchar img = t->modify_ram_image(); + + unsigned char *data = img.p() + page * t->get_expected_ram_page_size(); + + // If there was an error at any point, synthesize black. + if (_format_ctx==0) { + if (data) { + memset(data,0,t->get_x_size() * t->get_y_size() * t->get_num_components()); + } + _last_start = time; + _next_start = time + 1.0; + return; + } + + fetch_time(time); + export_frame(data, (t->get_num_components()==4), t->get_x_size()); +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVideoCursor::fetch_into_buffer +// Access: Public, Virtual +// Description: See MovieVideoCursor::fetch_into_buffer. +//////////////////////////////////////////////////////////////////// +static PStatCollector fetch_into_buffer_pcollector("*:FFMPEG Video Decoding"); +void FfmpegVideoCursor:: +fetch_into_buffer(double time, unsigned char *data, bool bgra) { + PStatTimer timer(fetch_into_buffer_pcollector); + + // If there was an error at any point, synthesize black. + if (_format_ctx==0) { + if (data) { + memset(data,0,size_x()*size_y()*(bgra?4:3)); + } + _last_start = time; + _next_start = time + 1.0; + return; + } + + fetch_time(time); + export_frame(data, bgra, _size_x); +} + +//////////////////////////////////////////////////////////////////// + +#endif // HAVE_FFMPEG diff --git a/panda/src/movies/ffmpegVirtualFile.cxx b/panda/src/movies/ffmpegVirtualFile.cxx index c5a7498584..4eba926f22 100644 --- a/panda/src/movies/ffmpegVirtualFile.cxx +++ b/panda/src/movies/ffmpegVirtualFile.cxx @@ -1,176 +1,176 @@ -// Filename: ffmpegVirtualFile.cxx -// Created by: jyelon (02Jul07) -// -//////////////////////////////////////////////////////////////////// -// -// PANDA 3D SOFTWARE -// Copyright (c) Carnegie Mellon University. All rights reserved. -// -// All use of this software is subject to the terms of the revised BSD -// license. You should have received a copy of this license along -// with this source code in a file named "LICENSE." -// -//////////////////////////////////////////////////////////////////// - -#ifdef HAVE_FFMPEG - -#include "pandabase.h" -#include "config_movies.h" -#include "ffmpegVirtualFile.h" -#include "virtualFileSystem.h" -extern "C" { - #include "libavformat/avio.h" -} - -#ifndef AVSEEK_SIZE - #define AVSEEK_SIZE 0x10000 -#endif - -//////////////////////////////////////////////////////////////////// -// These functions need to use C calling conventions. -//////////////////////////////////////////////////////////////////// -extern "C" { - static int pandavfs_open(URLContext *h, const char *filename, int flags); - static int pandavfs_read(URLContext *h, unsigned char *buf, int size); - static int pandavfs_write(URLContext *h, unsigned char *buf, int size); - static PN_int64 pandavfs_seek(URLContext *h, PN_int64 pos, int whence); - static int pandavfs_close(URLContext *h); -} - -//////////////////////////////////////////////////////////////////// -// Function: pandavfs_open -// Access: Static Function -// Description: A hook to open a panda VFS file. -//////////////////////////////////////////////////////////////////// -static int -pandavfs_open(URLContext *h, const char *filename, int flags) { - if (flags != 0) { - movies_cat.error() << "ffmpeg is trying to write to the VFS.\n"; - return -1; - } - filename += 9; // Skip over "pandavfs:" - VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr(); - istream *s = vfs->open_read_file(filename, true); - if (s == 0) { - return -1; - } - // Test whether seek works. - s->seekg(1, ios::beg); - int tel1 = s->tellg(); - s->seekg(0, ios::beg); - int tel2 = s->tellg(); - if (s->fail() || (tel1!=1) || (tel2!=0)) { - movies_cat.error() << "cannot play movie (not seekable): " << h->filename << "\n"; - delete s; - return -1; - } - h->priv_data = s; - return 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: pandavfs_read -// Access: Static Function -// Description: A hook to read a panda VFS file. -//////////////////////////////////////////////////////////////////// -static int -pandavfs_read(URLContext *h, unsigned char *buf, int size) { - istream *s = (istream*)(h->priv_data); - s->read((char*)buf, size); - int gc = s->gcount(); - s->clear(); - return gc; -} - -//////////////////////////////////////////////////////////////////// -// Function: pandavfs_write -// Access: Static Function -// Description: A hook to write a panda VFS file. -//////////////////////////////////////////////////////////////////// -static int -pandavfs_write(URLContext *h, unsigned char *buf, int size) { - movies_cat.error() << "ffmpeg is trying to write to the VFS.\n"; - return -1; -} - -//////////////////////////////////////////////////////////////////// -// Function: pandavfs_seek -// Access: Static Function -// Description: A hook to seek a panda VFS file. -//////////////////////////////////////////////////////////////////// -static PN_int64 -pandavfs_seek(URLContext *h, PN_int64 pos, int whence) { - istream *s = (istream*)(h->priv_data); - switch(whence) { - case SEEK_SET: s->seekg(pos, ios::beg); break; - case SEEK_CUR: s->seekg(pos, ios::cur); break; - case SEEK_END: s->seekg(pos, ios::end); break; - case AVSEEK_SIZE: { - s->seekg(0, ios::cur); - int p = s->tellg(); - s->seekg(-1, ios::end); - int size = s->tellg(); - if (size < 0) { - movies_cat.error() << "Failed to determine filesize in ffmpegVirtualFile\n"; - s->clear(); - return -1; - } - size++; - s->seekg(p, ios::beg); - s->clear(); - return size; } - default: - movies_cat.error() << "Illegal parameter to seek in ffmpegVirtualFile\n"; - s->clear(); - return -1; - } - s->clear(); - int tl = s->tellg(); - return tl; -} - -//////////////////////////////////////////////////////////////////// -// Function: pandavfs_close -// Access: Static Function -// Description: A hook to close a panda VFS file. -//////////////////////////////////////////////////////////////////// -static int -pandavfs_close(URLContext *h) { - istream *s = (istream*)(h->priv_data); - delete s; - h->priv_data = 0; - return 0; -} - -//////////////////////////////////////////////////////////////////// -// Function: FfmpegVirtualFile::register_protocol -// Access: Public, Static -// Description: Enables ffmpeg to access panda's VFS. -// -// After calling this method, ffmpeg will be -// able to open "URLs" that look like this: -// -// pandavfs:/c/mygame/foo.avi -// -//////////////////////////////////////////////////////////////////// -void FfmpegVirtualFile:: -register_protocol() { - static bool initialized = false; - if (initialized) { - return; - } - static URLProtocol protocol; - protocol.name = "pandavfs"; - protocol.url_open = pandavfs_open; - protocol.url_read = pandavfs_read; - protocol.url_write = pandavfs_write; - protocol.url_seek = pandavfs_seek; - protocol.url_close = pandavfs_close; -#if LIBAVFORMAT_VERSION_INT < 3415296 - ::register_protocol(&protocol); -#else - av_register_protocol(&protocol); -#endif -} - -#endif // HAVE_FFMPEG +// Filename: ffmpegVirtualFile.cxx +// Created by: jyelon (02Jul07) +// +//////////////////////////////////////////////////////////////////// +// +// PANDA 3D SOFTWARE +// Copyright (c) Carnegie Mellon University. All rights reserved. +// +// All use of this software is subject to the terms of the revised BSD +// license. You should have received a copy of this license along +// with this source code in a file named "LICENSE." +// +//////////////////////////////////////////////////////////////////// + +#ifdef HAVE_FFMPEG + +#include "pandabase.h" +#include "config_movies.h" +#include "ffmpegVirtualFile.h" +#include "virtualFileSystem.h" +extern "C" { + #include "libavformat/avio.h" +} + +#ifndef AVSEEK_SIZE + #define AVSEEK_SIZE 0x10000 +#endif + +//////////////////////////////////////////////////////////////////// +// These functions need to use C calling conventions. +//////////////////////////////////////////////////////////////////// +extern "C" { + static int pandavfs_open(URLContext *h, const char *filename, int flags); + static int pandavfs_read(URLContext *h, unsigned char *buf, int size); + static int pandavfs_write(URLContext *h, unsigned char *buf, int size); + static PN_int64 pandavfs_seek(URLContext *h, PN_int64 pos, int whence); + static int pandavfs_close(URLContext *h); +} + +//////////////////////////////////////////////////////////////////// +// Function: pandavfs_open +// Access: Static Function +// Description: A hook to open a panda VFS file. +//////////////////////////////////////////////////////////////////// +static int +pandavfs_open(URLContext *h, const char *filename, int flags) { + if (flags != 0) { + movies_cat.error() << "ffmpeg is trying to write to the VFS.\n"; + return -1; + } + filename += 9; // Skip over "pandavfs:" + VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr(); + istream *s = vfs->open_read_file(filename, true); + if (s == 0) { + return -1; + } + // Test whether seek works. + s->seekg(1, ios::beg); + int tel1 = s->tellg(); + s->seekg(0, ios::beg); + int tel2 = s->tellg(); + if (s->fail() || (tel1!=1) || (tel2!=0)) { + movies_cat.error() << "cannot play movie (not seekable): " << h->filename << "\n"; + delete s; + return -1; + } + h->priv_data = s; + return 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: pandavfs_read +// Access: Static Function +// Description: A hook to read a panda VFS file. +//////////////////////////////////////////////////////////////////// +static int +pandavfs_read(URLContext *h, unsigned char *buf, int size) { + istream *s = (istream*)(h->priv_data); + s->read((char*)buf, size); + int gc = s->gcount(); + s->clear(); + return gc; +} + +//////////////////////////////////////////////////////////////////// +// Function: pandavfs_write +// Access: Static Function +// Description: A hook to write a panda VFS file. +//////////////////////////////////////////////////////////////////// +static int +pandavfs_write(URLContext *h, unsigned char *buf, int size) { + movies_cat.error() << "ffmpeg is trying to write to the VFS.\n"; + return -1; +} + +//////////////////////////////////////////////////////////////////// +// Function: pandavfs_seek +// Access: Static Function +// Description: A hook to seek a panda VFS file. +//////////////////////////////////////////////////////////////////// +static PN_int64 +pandavfs_seek(URLContext *h, PN_int64 pos, int whence) { + istream *s = (istream*)(h->priv_data); + switch(whence) { + case SEEK_SET: s->seekg(pos, ios::beg); break; + case SEEK_CUR: s->seekg(pos, ios::cur); break; + case SEEK_END: s->seekg(pos, ios::end); break; + case AVSEEK_SIZE: { + s->seekg(0, ios::cur); + int p = s->tellg(); + s->seekg(-1, ios::end); + int size = s->tellg(); + if (size < 0) { + movies_cat.error() << "Failed to determine filesize in ffmpegVirtualFile\n"; + s->clear(); + return -1; + } + size++; + s->seekg(p, ios::beg); + s->clear(); + return size; } + default: + movies_cat.error() << "Illegal parameter to seek in ffmpegVirtualFile\n"; + s->clear(); + return -1; + } + s->clear(); + int tl = s->tellg(); + return tl; +} + +//////////////////////////////////////////////////////////////////// +// Function: pandavfs_close +// Access: Static Function +// Description: A hook to close a panda VFS file. +//////////////////////////////////////////////////////////////////// +static int +pandavfs_close(URLContext *h) { + istream *s = (istream*)(h->priv_data); + delete s; + h->priv_data = 0; + return 0; +} + +//////////////////////////////////////////////////////////////////// +// Function: FfmpegVirtualFile::register_protocol +// Access: Public, Static +// Description: Enables ffmpeg to access panda's VFS. +// +// After calling this method, ffmpeg will be +// able to open "URLs" that look like this: +// +// pandavfs:/c/mygame/foo.avi +// +//////////////////////////////////////////////////////////////////// +void FfmpegVirtualFile:: +register_protocol() { + static bool initialized = false; + if (initialized) { + return; + } + static URLProtocol protocol; + protocol.name = "pandavfs"; + protocol.url_open = pandavfs_open; + protocol.url_read = pandavfs_read; + protocol.url_write = pandavfs_write; + protocol.url_seek = pandavfs_seek; + protocol.url_close = pandavfs_close; +#if LIBAVFORMAT_VERSION_INT < 3415296 + ::register_protocol(&protocol); +#else + av_register_protocol(&protocol); +#endif +} + +#endif // HAVE_FFMPEG