Support enumeration of different pixel formats in WebcamVideo, support for RGB pixel formats in Video4Linux

This commit is contained in:
rdb 2014-09-24 15:33:40 +00:00
parent 0a10aa6b26
commit de95ed855d
6 changed files with 273 additions and 129 deletions

View File

@ -40,11 +40,22 @@ get_size_y() const {
// is a maximum theoretical: the actual performance // is a maximum theoretical: the actual performance
// will depend on the speed of the hardware. // will depend on the speed of the hardware.
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
INLINE int WebcamVideo:: INLINE double WebcamVideo::
get_fps() const { get_fps() const {
return _fps; return _fps;
} }
////////////////////////////////////////////////////////////////////
// Function: WebcamVideo::get_pixel_format
// Access: Published
// Description: Returns the camera's pixel format, as a FourCC code,
// if known.
////////////////////////////////////////////////////////////////////
INLINE const string &WebcamVideo::
get_pixel_format() const {
return _pixel_format;
}
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// Function: WebcamVideo::output // Function: WebcamVideo::output
// Access: Public // Access: Public
@ -53,7 +64,13 @@ get_fps() const {
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
INLINE void WebcamVideo:: INLINE void WebcamVideo::
output(ostream &out) const { output(ostream &out) const {
out << get_name() << ": " << get_size_x() << "x" << get_size_y() << " @ " << get_fps() << "Hz"; out << get_name() << ": " << get_size_x() << "x" << get_size_y();
if (!_pixel_format.empty()) {
out << " " << _pixel_format;
}
out << " @ " << get_fps() << "Hz";
} }
INLINE ostream &operator << (ostream &out, const WebcamVideo &n) { INLINE ostream &operator << (ostream &out, const WebcamVideo &n) {

View File

@ -33,7 +33,8 @@ PUBLISHED:
INLINE int get_size_x() const; INLINE int get_size_x() const;
INLINE int get_size_y() const; INLINE int get_size_y() const;
INLINE int get_fps() const; INLINE double get_fps() const;
INLINE const string &get_pixel_format() const;
virtual PT(MovieVideoCursor) open() = 0; virtual PT(MovieVideoCursor) open() = 0;
@ -45,7 +46,8 @@ public:
protected: protected:
int _size_x; int _size_x;
int _size_y; int _size_y;
int _fps; double _fps;
string _pixel_format;
static pvector<PT(WebcamVideo)> _all_webcams; static pvector<PT(WebcamVideo)> _all_webcams;

View File

@ -19,7 +19,6 @@
#include <fcntl.h> #include <fcntl.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <linux/videodev2.h>
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
extern "C" { extern "C" {
@ -34,7 +33,7 @@ TypeHandle WebcamVideoCursorV4L::_type_handle;
#define clamp(x) min(max(x, 0.0), 255.0) #define clamp(x) min(max(x, 0.0), 255.0)
INLINE static void yuv_to_rgb(unsigned char *dest, const unsigned char *src) { INLINE static void yuv_to_bgr(unsigned char *dest, const unsigned char *src) {
double y1 = (255 / 219.0) * (src[0] - 16); double y1 = (255 / 219.0) * (src[0] - 16);
double pb = (255 / 224.0) * (src[1] - 128); double pb = (255 / 224.0) * (src[1] - 128);
double pr = (255 / 224.0) * (src[2] - 128); double pr = (255 / 224.0) * (src[2] - 128);
@ -43,20 +42,33 @@ INLINE static void yuv_to_rgb(unsigned char *dest, const unsigned char *src) {
dest[0] = clamp(1.0 * y1 + 1.772 * pb + 0 * pr); dest[0] = clamp(1.0 * y1 + 1.772 * pb + 0 * pr);
} }
INLINE static void yuyv_to_rgbrgb(unsigned char *dest, const unsigned char *src) { INLINE static void yuyv_to_bgrbgr(unsigned char *dest, const unsigned char *src) {
unsigned char yuv[] = {src[0], src[1], src[3]}; unsigned char yuv[] = {src[0], src[1], src[3]};
yuv_to_rgb(dest, yuv); yuv_to_bgr(dest, yuv);
yuv[0] = src[2]; yuv[0] = src[2];
yuv_to_rgb(dest + 3, yuv); yuv_to_bgr(dest + 3, yuv);
} }
INLINE static void yuyv_to_rgbargba(unsigned char *dest, const unsigned char *src) { INLINE static void yuyv_to_bgrabgra(unsigned char *dest, const unsigned char *src) {
unsigned char yuv[] = {src[0], src[1], src[3]}; unsigned char yuv[] = {src[0], src[1], src[3]};
yuv_to_rgb(dest, yuv); yuv_to_bgr(dest, yuv);
yuv[0] = src[2]; yuv[0] = src[2];
yuv_to_rgb(dest + 4, yuv); yuv_to_bgr(dest + 4, yuv);
dest[3] = (unsigned char) -1; dest[3] = 0xff;
dest[7] = (unsigned char) -1; dest[7] = 0xff;
}
INLINE static void rgb_to_bgr(unsigned char *dest, const unsigned char *src) {
dest[0] = src[2];
dest[1] = src[1];
dest[2] = src[0];
}
INLINE static void rgb_to_bgra(unsigned char *dest, const unsigned char *src) {
dest[0] = src[2];
dest[1] = src[1];
dest[2] = src[0];
dest[3] = 0xff;
} }
#if defined(HAVE_JPEG) && !defined(CPPPARSER) #if defined(HAVE_JPEG) && !defined(CPPPARSER)
@ -196,11 +208,8 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
_aborted = false; _aborted = false;
_streaming = true; _streaming = true;
_ready = false; _ready = false;
_format = (struct v4l2_format *) malloc(sizeof(struct v4l2_format)); memset(&_format, 0, sizeof(struct v4l2_format));
memset(_format, 0, sizeof(struct v4l2_format));
#ifdef HAVE_JPEG
_cinfo = NULL;
#endif
_buffers = NULL; _buffers = NULL;
_buflens = NULL; _buflens = NULL;
_fd = open(src->_device.c_str(), O_RDWR); _fd = open(src->_device.c_str(), O_RDWR);
@ -211,22 +220,38 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
// Find the best format in our _pformats vector. // Find the best format in our _pformats vector.
// MJPEG is preferred over YUYV, as it's much smaller. // MJPEG is preferred over YUYV, as it's much smaller.
_format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; _format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pvector<uint32_t>::iterator it; _format.fmt.pix.pixelformat = src->_pformat;
for (it = src->_pformats.begin(); it != src->_pformats.end(); ++it) {
switch (_format.fmt.pix.pixelformat) {
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
if (*it == V4L2_PIX_FMT_MJPEG) { case V4L2_PIX_FMT_MJPEG:
_format->fmt.pix.pixelformat = *it; _num_components = 3;
break; break;
} else
#endif #endif
if (*it == V4L2_PIX_FMT_YUYV) {
_format->fmt.pix.pixelformat = *it; case V4L2_PIX_FMT_YUYV:
break; _num_components = 3;
} break;
}
if (it == src->_pformats.end()) { case V4L2_PIX_FMT_BGR24:
vision_cat.error() << "Failed to find a suitable pixel format!\n"; _num_components = 3;
break;
case V4L2_PIX_FMT_BGR32:
_num_components = 4;
break;
case V4L2_PIX_FMT_RGB24:
_num_components = 3;
break;
case V4L2_PIX_FMT_RGB32:
_num_components = 4;
break;
default:
vision_cat.error() << "Unsupported pixel format " << src->get_pixel_format() << "!\n";
_ready = false; _ready = false;
close(_fd); close(_fd);
_fd = -1; _fd = -1;
@ -234,12 +259,12 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
} }
// Request a format of this size, and no interlacing // Request a format of this size, and no interlacing
_format->fmt.pix.width = _size_x; _format.fmt.pix.width = _size_x;
_format->fmt.pix.height = _size_y; _format.fmt.pix.height = _size_y;
_format->fmt.pix.field = V4L2_FIELD_NONE; _format.fmt.pix.field = V4L2_FIELD_NONE;
// Now politely ask the driver to switch to this format // Now politely ask the driver to switch to this format
if (-1 == ioctl(_fd, VIDIOC_S_FMT, _format)) { if (-1 == ioctl(_fd, VIDIOC_S_FMT, &_format)) {
vision_cat.error() << "Driver rejected format!\n"; vision_cat.error() << "Driver rejected format!\n";
_ready = false; _ready = false;
close(_fd); close(_fd);
@ -247,8 +272,8 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
return; return;
} }
_size_x = _format->fmt.pix.width; _size_x = _format.fmt.pix.width;
_size_y = _format->fmt.pix.height; _size_y = _format.fmt.pix.height;
struct v4l2_streamparm streamparm; struct v4l2_streamparm streamparm;
memset(&streamparm, 0, sizeof streamparm); memset(&streamparm, 0, sizeof streamparm);
@ -274,7 +299,7 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
} }
_bufcount = req.count; _bufcount = req.count;
_buffers = (void* *) calloc (req.count, sizeof (void*)); _buffers = (void **) calloc (req.count, sizeof (void*));
_buflens = (size_t*) calloc (req.count, sizeof (size_t)); _buflens = (size_t*) calloc (req.count, sizeof (size_t));
if (!_buffers || !_buflens) { if (!_buffers || !_buflens) {
@ -312,19 +337,18 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
// Initialize the JPEG library, if necessary // Initialize the JPEG library, if necessary
if (_format->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) { if (_format.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) {
_cinfo = (struct jpeg_decompress_struct *) malloc(sizeof(struct jpeg_decompress_struct)); jpeg_create_decompress(&_cinfo);
jpeg_create_decompress(_cinfo);
_cinfo->src = (struct jpeg_source_mgr *) _cinfo.src = (struct jpeg_source_mgr *)
(*_cinfo->mem->alloc_small) ((j_common_ptr) _cinfo, JPOOL_PERMANENT, (*_cinfo.mem->alloc_small) ((j_common_ptr) &_cinfo, JPOOL_PERMANENT,
sizeof(struct jpeg_source_mgr)); sizeof(struct jpeg_source_mgr));
// Set up function pointers // Set up function pointers
_cinfo->src->init_source = my_init_source; _cinfo.src->init_source = my_init_source;
_cinfo->src->fill_input_buffer = my_fill_input_buffer; _cinfo.src->fill_input_buffer = my_fill_input_buffer;
_cinfo->src->skip_input_data = my_skip_input_data; _cinfo.src->skip_input_data = my_skip_input_data;
_cinfo->src->resync_to_restart = jpeg_resync_to_restart; _cinfo.src->resync_to_restart = jpeg_resync_to_restart;
_cinfo->src->term_source = my_term_source; _cinfo.src->term_source = my_term_source;
} }
#endif #endif
_ready = true; _ready = true;
@ -338,14 +362,10 @@ WebcamVideoCursorV4L(WebcamVideoV4L *src) : MovieVideoCursor(src) {
WebcamVideoCursorV4L:: WebcamVideoCursorV4L::
~WebcamVideoCursorV4L() { ~WebcamVideoCursorV4L() {
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
if (_cinfo != NULL) { if (_format.fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) {
jpeg_destroy_decompress(_cinfo); jpeg_destroy_decompress(&_cinfo);
free(_cinfo);
} }
#endif #endif
if (_format != NULL) {
free(_format);
}
if (-1 != _fd) { if (-1 != _fd) {
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ioctl(_fd, VIDIOC_STREAMOFF, &type); ioctl(_fd, VIDIOC_STREAMOFF, &type);
@ -385,14 +405,15 @@ fetch_buffer() {
} }
nassertr(vbuf.index < _bufcount, NULL); nassertr(vbuf.index < _bufcount, NULL);
size_t bufsize = _buflens[vbuf.index]; size_t bufsize = _buflens[vbuf.index];
size_t old_bpl = _format->fmt.pix.bytesperline; size_t old_bpl = _format.fmt.pix.bytesperline;
size_t new_bpl = _size_x * 3; size_t new_bpl = _size_x * _num_components;
unsigned char *buf = (unsigned char *) _buffers[vbuf.index]; unsigned char *buf = (unsigned char *) _buffers[vbuf.index];
if (_format->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) { switch (_format.fmt.pix.pixelformat) {
case V4L2_PIX_FMT_MJPEG: {
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
struct my_error_mgr jerr; struct my_error_mgr jerr;
_cinfo->err = jpeg_std_error(&jerr.pub); _cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = my_error_exit; jerr.pub.error_exit = my_error_exit;
jerr.pub.output_message = my_output_message; jerr.pub.output_message = my_output_message;
@ -400,41 +421,41 @@ fetch_buffer() {
// Establish the setjmp return context for my_error_exit to use // Establish the setjmp return context for my_error_exit to use
if (setjmp(jerr.setjmp_buffer)) { if (setjmp(jerr.setjmp_buffer)) {
jpeg_abort_decompress(_cinfo); jpeg_abort_decompress(&_cinfo);
} else { } else {
// Set up data pointer // Set up data pointer
_cinfo->src->bytes_in_buffer = bufsize; _cinfo.src->bytes_in_buffer = bufsize;
_cinfo->src->next_input_byte = buf; _cinfo.src->next_input_byte = buf;
if (jpeg_read_header(_cinfo, TRUE) == JPEG_HEADER_OK) { if (jpeg_read_header(&_cinfo, TRUE) == JPEG_HEADER_OK) {
if (_cinfo->dc_huff_tbl_ptrs[0] == NULL) { if (_cinfo.dc_huff_tbl_ptrs[0] == NULL) {
// Many MJPEG streams do not include huffman tables. Remedy this. // Many MJPEG streams do not include huffman tables. Remedy this.
_cinfo->dc_huff_tbl_ptrs[0] = &dc_luminance_tbl; _cinfo.dc_huff_tbl_ptrs[0] = &dc_luminance_tbl;
_cinfo->dc_huff_tbl_ptrs[1] = &dc_chrominance_tbl; _cinfo.dc_huff_tbl_ptrs[1] = &dc_chrominance_tbl;
_cinfo->ac_huff_tbl_ptrs[0] = &ac_luminance_tbl; _cinfo.ac_huff_tbl_ptrs[0] = &ac_luminance_tbl;
_cinfo->ac_huff_tbl_ptrs[1] = &ac_chrominance_tbl; _cinfo.ac_huff_tbl_ptrs[1] = &ac_chrominance_tbl;
} }
_cinfo->scale_num = 1; _cinfo.scale_num = 1;
_cinfo->scale_denom = 1; _cinfo.scale_denom = 1;
_cinfo->out_color_space = JCS_RGB; _cinfo.out_color_space = JCS_RGB;
_cinfo->dct_method = JDCT_IFAST; _cinfo.dct_method = JDCT_IFAST;
if (jpeg_start_decompress(_cinfo) && _cinfo->output_components == 3 if (jpeg_start_decompress(&_cinfo) && _cinfo.output_components == 3
&& _size_x == _cinfo->output_width && _size_y == _cinfo->output_height) { && _size_x == _cinfo.output_width && _size_y == _cinfo.output_height) {
JSAMPLE *buffer_end = newbuf + new_bpl * _cinfo->output_height; JSAMPLE *buffer_end = newbuf + new_bpl * _cinfo.output_height;
JSAMPLE *rowptr = newbuf; JSAMPLE *rowptr = newbuf;
while (_cinfo->output_scanline < _cinfo->output_height) { while (_cinfo.output_scanline < _cinfo.output_height) {
nassertd(rowptr + new_bpl <= buffer_end) break; nassertd(rowptr + new_bpl <= buffer_end) break;
jpeg_read_scanlines(_cinfo, &rowptr, _cinfo->output_height); jpeg_read_scanlines(&_cinfo, &rowptr, _cinfo.output_height);
rowptr += new_bpl; rowptr += new_bpl;
} }
if (_cinfo->output_scanline < _cinfo->output_height) { if (_cinfo.output_scanline < _cinfo.output_height) {
jpeg_abort_decompress(_cinfo); jpeg_abort_decompress(&_cinfo);
} else { } else {
jpeg_finish_decompress(_cinfo); jpeg_finish_decompress(&_cinfo);
} }
} }
} }
@ -454,16 +475,51 @@ fetch_buffer() {
block[i + 2] = ex; block[i + 2] = ex;
} }
#else #else
nassertr(false, NULL); // Not compiled with JPEG support nassertr(false /* Not compiled with JPEG support*/, NULL);
#endif #endif
} else { break;
}
case V4L2_PIX_FMT_YUYV:
for (size_t row = 0; row < _size_y; ++row) { for (size_t row = 0; row < _size_y; ++row) {
size_t c = 0; size_t c = 0;
for (size_t i = 0; i < old_bpl; i += 4) { for (size_t i = 0; i < old_bpl; i += 4) {
yuyv_to_rgbrgb(block + (_size_y - row - 1) * new_bpl + c, buf + row * old_bpl + i); yuyv_to_bgrbgr(block + (_size_y - row - 1) * new_bpl + c, buf + row * old_bpl + i);
c += 6; c += 6;
} }
} }
break;
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_BGR32:
// Simplest case: copying every row verbatim.
nassertr(old_bpl == new_bpl, NULL);
for (size_t row = 0; row < _size_y; ++row) {
memcpy(block + (_size_y - row - 1) * new_bpl, buf + row * old_bpl, new_bpl);
}
break;
case V4L2_PIX_FMT_RGB24:
// Swap components.
nassertr(old_bpl == new_bpl, NULL);
for (size_t row = 0; row < _size_y; ++row) {
for (size_t i = 0; i < old_bpl; i += 3) {
rgb_to_bgr(block + (_size_y - row - 1) * old_bpl + i, buf + row * old_bpl + i);
}
}
break;
case V4L2_PIX_FMT_RGB32:
// Swap components.
nassertr(old_bpl == new_bpl, NULL);
for (size_t row = 0; row < _size_y; ++row) {
for (size_t i = 0; i < old_bpl; i += 4) {
rgb_to_bgra(block + (_size_y - row - 1) * old_bpl + i, buf + row * old_bpl + i + 1);
}
}
break;
} }
if (-1 == ioctl(_fd, VIDIOC_QBUF, &vbuf)) { if (-1 == ioctl(_fd, VIDIOC_QBUF, &vbuf)) {

View File

@ -22,10 +22,12 @@
#include "webcamVideo.h" #include "webcamVideo.h"
#include "movieVideoCursor.h" #include "movieVideoCursor.h"
struct v4l2_format; #include <linux/videodev2.h>
#if defined(HAVE_JPEG) #ifdef HAVE_JPEG
struct jpeg_decompress_struct; extern "C" {
#include <jpeglib.h>
}
#endif #endif
class WebcamVideoV4L; class WebcamVideoV4L;
@ -45,9 +47,9 @@ private:
void **_buffers; void **_buffers;
size_t *_buflens; size_t *_buflens;
size_t _bufcount; size_t _bufcount;
struct v4l2_format *_format; struct v4l2_format _format;
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
struct jpeg_decompress_struct *_cinfo; struct jpeg_decompress_struct _cinfo;
#endif #endif
public: public:

View File

@ -25,6 +25,56 @@
TypeHandle WebcamVideoV4L::_type_handle; TypeHandle WebcamVideoV4L::_type_handle;
////////////////////////////////////////////////////////////////////
// Function: add_options_for_size
// Access: Private, Static
// Description:
////////////////////////////////////////////////////////////////////
void WebcamVideoV4L::
add_options_for_size(int fd, const string &dev, const char *name, unsigned width, unsigned height, unsigned pixelformat) {
struct v4l2_frmivalenum frmivalenum;
for (int k = 0;; k++) {
memset(&frmivalenum, 0, sizeof frmivalenum);
frmivalenum.index = k;
frmivalenum.pixel_format = pixelformat;
frmivalenum.width = width;
frmivalenum.height = height;
if (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmivalenum) == -1) {
break;
}
double fps = 0.0;
switch (frmivalenum.type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
fps = ((double) frmivalenum.discrete.denominator) / ((double) frmivalenum.discrete.numerator);
break;
case V4L2_FRMIVAL_TYPE_CONTINUOUS:
case V4L2_FRMIVAL_TYPE_STEPWISE:
{
// Select the maximum framerate.
double max_fps = ((double) frmivalenum.stepwise.max.denominator) / ((double) frmivalenum.stepwise.max.numerator);
fps = max_fps;
}
break;
default:
continue;
}
// Create a new webcam video object
PT(WebcamVideoV4L) wc = new WebcamVideoV4L;
wc->set_name(name);
wc->_device = dev;
wc->_size_x = width;
wc->_size_y = height;
wc->_fps = fps;
wc->_pformat = pixelformat;
wc->_pixel_format = string((char*) &pixelformat, 4);
WebcamVideoV4L::_all_webcams.push_back(DCAST(WebcamVideo, wc));
}
}
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// Function: find_all_webcams_v4l // Function: find_all_webcams_v4l
// Access: Public, Static // Access: Public, Static
@ -52,6 +102,23 @@ void find_all_webcams_v4l() {
if (ioctl(fd, VIDIOC_ENUM_FMT, &fmt) == -1) { if (ioctl(fd, VIDIOC_ENUM_FMT, &fmt) == -1) {
break; break;
} }
// Only accept supported formats.
switch (fmt.pixelformat) {
#ifdef HAVE_JPEG
case V4L2_PIX_FMT_MJPEG:
#endif
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_RGB32:
break;
default:
continue;
}
struct v4l2_frmsizeenum frmsizeenum; struct v4l2_frmsizeenum frmsizeenum;
for (int j = 0;; j++) { for (int j = 0;; j++) {
memset(&frmsizeenum, 0, sizeof frmsizeenum); memset(&frmsizeenum, 0, sizeof frmsizeenum);
@ -60,51 +127,48 @@ void find_all_webcams_v4l() {
if (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmsizeenum) == -1) { if (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmsizeenum) == -1) {
break; break;
} }
if (frmsizeenum.type != V4L2_FRMSIZE_TYPE_DISCRETE) {
continue;
}
struct v4l2_frmivalenum frmivalenum;
for (int k = 0;; k++) {
memset(&frmivalenum, 0, sizeof frmivalenum);
frmivalenum.index = k;
frmivalenum.pixel_format = fmt.pixelformat;
frmivalenum.width = frmsizeenum.discrete.width;
frmivalenum.height = frmsizeenum.discrete.height;
if (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmivalenum) == -1) {
break;
}
if (frmivalenum.type != V4L2_FRMIVAL_TYPE_DISCRETE) {
continue;
}
// Create a new webcam video object switch (frmsizeenum.type) {
PT(WebcamVideoV4L) wc = new WebcamVideoV4L; case V4L2_FRMSIZE_TYPE_DISCRETE:
wc->set_name((const char*) cap2.card); // Easy, add the options with this discrete size.
wc->_device = *it; WebcamVideoV4L::
wc->_size_x = frmsizeenum.discrete.width; add_options_for_size(fd, *it, (const char *)cap2.card,
wc->_size_y = frmsizeenum.discrete.height; frmsizeenum.discrete.width,
wc->_fps = ((double) frmivalenum.discrete.denominator) / ((double) frmivalenum.discrete.numerator); frmsizeenum.discrete.height,
wc->_pformats.push_back(fmt.pixelformat); fmt.pixelformat);
break;
// Iterate through the webcams to make sure we don't put any duplicates in there case V4L2_FRMSIZE_TYPE_CONTINUOUS:
pvector<PT(WebcamVideo)>::iterator wvi; {
for (wvi = WebcamVideoV4L::_all_webcams.begin(); wvi != WebcamVideoV4L::_all_webcams.end(); ++wvi) { // Okay, er, we don't have a proper handling of this,
if ((*wvi)->is_of_type(WebcamVideoV4L::get_class_type())) { // so let's add all powers of two in this range.
PT(WebcamVideoV4L) wv_v4l = DCAST(WebcamVideoV4L, *wvi);
if (wv_v4l->_device == wc->_device && __u32 width = Texture::up_to_power_2(frmsizeenum.stepwise.min_width);
wv_v4l->_size_x == wc->_size_x && for (; width <= frmsizeenum.stepwise.max_width; width *= 2) {
wv_v4l->_size_y == wc->_size_y && __u32 height = Texture::up_to_power_2(frmsizeenum.stepwise.min_height);
wv_v4l->_fps == wc->_fps) { for (; height <= frmsizeenum.stepwise.max_height; height *= 2) {
wv_v4l->_pformats.push_back(fmt.pixelformat); WebcamVideoV4L::
break; add_options_for_size(fd, *it, (const char *)cap2.card, width, height, fmt.pixelformat);
} }
} }
} }
// Did the loop finish, meaning that a webcam of these break;
// properties does not exist? Add it.
if (wvi == WebcamVideoV4L::_all_webcams.end()) { case V4L2_FRMSIZE_TYPE_STEPWISE:
WebcamVideoV4L::_all_webcams.push_back(DCAST(WebcamVideo, wc)); {
__u32 width = Texture::up_to_power_2(frmsizeenum.stepwise.min_width);
for (; width <= frmsizeenum.stepwise.max_width; width *= 2) {
__u32 height = Texture::up_to_power_2(frmsizeenum.stepwise.min_height);
for (; height <= frmsizeenum.stepwise.max_height; height *= 2) {
if ((width - frmsizeenum.stepwise.min_width) % frmsizeenum.stepwise.step_width == 0 &&
(height - frmsizeenum.stepwise.min_height) % frmsizeenum.stepwise.step_height == 0) {
WebcamVideoV4L::
add_options_for_size(fd, *it, (const char *)cap2.card, width, height, fmt.pixelformat);
}
}
}
} }
break;
} }
} }
} }

View File

@ -33,9 +33,12 @@ private:
friend class WebcamVideoCursorV4L; friend class WebcamVideoCursorV4L;
friend void find_all_webcams_v4l(); friend void find_all_webcams_v4l();
static void add_options_for_size(int fd, const string &dev, const char *name,
unsigned width, unsigned height,
unsigned pixelformat);
string _device; string _device;
pvector<uint32_t> _pformats; uint32_t _pformat;
public: public:
static TypeHandle get_class_type() { static TypeHandle get_class_type() {