More thoroughly fix issues with pickling Panda objects in Python 3

This also adds DatagramBuffer, a class for writing datagrams to memory and reading them from there again.
This commit is contained in:
rdb 2017-11-08 00:02:35 +01:00
parent 08629ef1a0
commit 1b1d80cd27
22 changed files with 415 additions and 161 deletions

View File

@ -4808,18 +4808,9 @@ write_function_instance(ostream &out, FunctionRemap *remap,
if (args_type == AT_single_arg) {
out << "#if PY_MAJOR_VERSION >= 3\n";
// As a special hack to fix pickling in Python 3, if the method name
// starts with py_decode_, we take a bytes object instead of a str.
if (remap->_cppfunc->get_local_name().substr(0, 10) == "py_decode_") {
indent(out, indent_level) << "if (PyBytes_AsStringAndSize(arg, (char **)&"
<< param_name << "_str, &" << param_name << "_len) == -1) {\n";
indent(out, indent_level + 2) << param_name << "_str = NULL;\n";
indent(out, indent_level) << "}\n";
} else {
indent(out, indent_level)
<< param_name << "_str = PyUnicode_AsUTF8AndSize(arg, &"
<< param_name << "_len);\n";
}
indent(out, indent_level)
<< param_name << "_str = PyUnicode_AsUTF8AndSize(arg, &"
<< param_name << "_len);\n";
out << "#else\n"; // NB. PyString_AsStringAndSize also accepts a PyUnicode.
indent(out, indent_level) << "if (PyString_AsStringAndSize(arg, (char **)&"
<< param_name << "_str, &" << param_name << "_len) == -1) {\n";

View File

@ -222,7 +222,7 @@ ALWAYS_INLINE PyObject *Dtool_WrapValue(PyObject *value) {
return value;
}
ALWAYS_INLINE PyObject *Dtool_WrapValue(const std::vector<unsigned char> &value) {
ALWAYS_INLINE PyObject *Dtool_WrapValue(const vector_uchar &value) {
#if PY_MAJOR_VERSION >= 3
return PyBytes_FromStringAndSize((char *)value.data(), (Py_ssize_t)value.size());
#else

View File

@ -29,6 +29,7 @@
#endif
#include "pnotify.h"
#include "vector_uchar.h"
#if defined(HAVE_PYTHON) && !defined(CPPPARSER)
@ -549,7 +550,7 @@ ALWAYS_INLINE PyObject *Dtool_WrapValue(char value);
ALWAYS_INLINE PyObject *Dtool_WrapValue(wchar_t value);
ALWAYS_INLINE PyObject *Dtool_WrapValue(nullptr_t);
ALWAYS_INLINE PyObject *Dtool_WrapValue(PyObject *value);
ALWAYS_INLINE PyObject *Dtool_WrapValue(const std::vector<unsigned char> &value);
ALWAYS_INLINE PyObject *Dtool_WrapValue(const vector_uchar &value);
#if PY_MAJOR_VERSION >= 0x02060000
ALWAYS_INLINE PyObject *Dtool_WrapValue(Py_buffer *value);

View File

@ -78,9 +78,7 @@ __reduce__(PyObject *self) const {
// Since a TextureCollection is itself an iterator, we can simply pass it as
// the fourth tuple component.
PyObject *result = Py_BuildValue("(O()OO)", this_class, Py_None, self);
Py_DECREF(this_class);
return result;
return Py_BuildValue("(N()OO)", this_class, Py_None, self);
}
#endif // HAVE_PYTHON

View File

@ -2103,11 +2103,11 @@ get_name() const {
* This method is used by __reduce__ to handle streaming of NodePaths to a
* pickle file.
*/
INLINE string NodePath::
INLINE vector_uchar NodePath::
encode_to_bam_stream() const {
string data;
vector_uchar data;
if (!encode_to_bam_stream(data)) {
return string();
data.clear();
}
return data;
}

View File

@ -70,6 +70,7 @@
#include "modelNode.h"
#include "bam.h"
#include "bamWriter.h"
#include "datagramBuffer.h"
// stack seems to overflow on Intel C++ at 7000. If we need more than 7000,
// need to increase stack size.
@ -5576,28 +5577,24 @@ write_bam_stream(ostream &out) const {
* calls this function.
*/
bool NodePath::
encode_to_bam_stream(string &data, BamWriter *writer) const {
encode_to_bam_stream(vector_uchar &data, BamWriter *writer) const {
data.clear();
ostringstream stream;
DatagramOutputFile dout;
if (!dout.open(stream)) {
return false;
}
DatagramBuffer buffer;
BamWriter local_writer;
bool used_local_writer = false;
if (writer == NULL) {
// Create our own writer.
if (!dout.write_header(_bam_header)) {
if (!buffer.write_header(_bam_header)) {
return false;
}
writer = &local_writer;
used_local_writer = true;
}
writer->set_target(&dout);
writer->set_target(&buffer);
int num_nodes = get_num_nodes();
if (used_local_writer && num_nodes > 1) {
@ -5615,7 +5612,7 @@ encode_to_bam_stream(string &data, BamWriter *writer) const {
dg.add_uint8(_error_type);
dg.add_int32(num_nodes);
if (!dout.put_datagram(dg)) {
if (!buffer.put_datagram(dg)) {
writer->set_target(NULL);
return false;
}
@ -5631,7 +5628,7 @@ encode_to_bam_stream(string &data, BamWriter *writer) const {
}
writer->set_target(NULL);
data = stream.str();
buffer.swap_data(data);
return true;
}
@ -5640,22 +5637,17 @@ encode_to_bam_stream(string &data, BamWriter *writer) const {
* extracts and returns the NodePath on that string. Returns NULL on error.
*/
NodePath NodePath::
decode_from_bam_stream(const string &data, BamReader *reader) {
decode_from_bam_stream(vector_uchar data, BamReader *reader) {
NodePath result;
istringstream stream(data);
DatagramInputFile din;
if (!din.open(stream)) {
return NodePath::fail();
}
DatagramBuffer buffer(move(data));
BamReader local_reader;
if (reader == NULL) {
// Create a local reader.
string head;
if (!din.read_header(head, _bam_header.size())) {
if (!buffer.read_header(head, _bam_header.size())) {
return NodePath::fail();
}
@ -5666,11 +5658,11 @@ decode_from_bam_stream(const string &data, BamReader *reader) {
reader = &local_reader;
}
reader->set_source(&din);
reader->set_source(&buffer);
// One initial datagram to encode the error type, and the number of nodes.
Datagram dg;
if (!din.get_datagram(dg)) {
if (!buffer.get_datagram(dg)) {
return NodePath::fail();
}

View File

@ -940,9 +940,9 @@ PUBLISHED:
BLOCKING bool write_bam_file(const Filename &filename) const;
BLOCKING bool write_bam_stream(ostream &out) const;
INLINE string encode_to_bam_stream() const;
bool encode_to_bam_stream(string &data, BamWriter *writer = NULL) const;
static NodePath decode_from_bam_stream(const string &data, BamReader *reader = NULL);
INLINE vector_uchar encode_to_bam_stream() const;
bool encode_to_bam_stream(vector_uchar &data, BamWriter *writer = nullptr) const;
static NodePath decode_from_bam_stream(vector_uchar data, BamReader *reader = nullptr);
private:
static NodePathComponent *

View File

@ -120,7 +120,7 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
// We have a non-empty NodePath.
string bam_stream;
vector_uchar bam_stream;
if (!_this->encode_to_bam_stream(bam_stream, writer)) {
ostringstream stream;
stream << "Could not bamify " << _this;
@ -150,7 +150,6 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
} else {
// The traditional pickle support: call the non-persistent version of this
// function.
func = Extension<TypedWritable>::find_global_decode(this_class, "py_decode_NodePath_from_bam_stream");
if (func == NULL) {
PyErr_SetString(PyExc_TypeError, "Couldn't find py_decode_NodePath_from_bam_stream()");
@ -159,14 +158,15 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
}
}
#if PY_MAJOR_VERSION >= 3
PyObject *result = Py_BuildValue("(O(y#))", func, bam_stream.data(), (Py_ssize_t) bam_stream.size());
#else
PyObject *result = Py_BuildValue("(O(s#))", func, bam_stream.data(), (Py_ssize_t) bam_stream.size());
#endif
Py_DECREF(func);
Py_DECREF(this_class);
return result;
// PyTuple_SET_ITEM conveniently borrows the reference it is passed.
PyObject *args = PyTuple_New(2);
PyTuple_SET_ITEM(args, 0, this_class);
PyTuple_SET_ITEM(args, 1, Dtool_WrapValue(bam_stream));
PyObject *tuple = PyTuple_New(2);
PyTuple_SET_ITEM(tuple, 0, func);
PyTuple_SET_ITEM(tuple, 1, args);
return tuple;
}
/**
@ -190,15 +190,15 @@ find_net_python_tag(PyObject *key) const {
* This wrapper is defined as a global function to suit pickle's needs.
*/
NodePath
py_decode_NodePath_from_bam_stream(const string &data) {
return py_decode_NodePath_from_bam_stream_persist(NULL, data);
py_decode_NodePath_from_bam_stream(vector_uchar data) {
return py_decode_NodePath_from_bam_stream_persist(nullptr, move(data));
}
/**
* This wrapper is defined as a global function to suit pickle's needs.
*/
NodePath
py_decode_NodePath_from_bam_stream_persist(PyObject *unpickler, const string &data) {
py_decode_NodePath_from_bam_stream_persist(PyObject *unpickler, vector_uchar data) {
BamReader *reader = NULL;
if (unpickler != NULL) {
PyObject *py_reader = PyObject_GetAttrString(unpickler, "bamReader");
@ -211,7 +211,7 @@ py_decode_NodePath_from_bam_stream_persist(PyObject *unpickler, const string &da
}
}
return NodePath::decode_from_bam_stream(data, reader);
return NodePath::decode_from_bam_stream(move(data), reader);
}
/**

View File

@ -56,8 +56,8 @@ public:
};
BEGIN_PUBLISH
NodePath py_decode_NodePath_from_bam_stream(const string &data);
NodePath py_decode_NodePath_from_bam_stream_persist(PyObject *unpickler, const string &data);
NodePath py_decode_NodePath_from_bam_stream(vector_uchar data);
NodePath py_decode_NodePath_from_bam_stream_persist(PyObject *unpickler, vector_uchar data);
END_PUBLISH
#include "nodePath_ext.I"

View File

@ -2124,8 +2124,8 @@ is_ambient_light() const {
}
/**
* Reads the string created by a previous call to encode_to_bam_stream(), and
* extracts and returns the single object on that string. Returns NULL on
* Reads the bytes created by a previous call to encode_to_bam_stream(), and
* extracts and returns the single object on those bytes. Returns NULL on
* error.
*
* This method is intended to replace decode_raw_from_bam_stream() when you
@ -2134,15 +2134,15 @@ is_ambient_light() const {
* responsible for maintaining the reference count on the return value.
*/
PT(PandaNode) PandaNode::
decode_from_bam_stream(const string &data, BamReader *reader) {
decode_from_bam_stream(vector_uchar data, BamReader *reader) {
TypedWritable *object;
ReferenceCount *ref_ptr;
if (!TypedWritable::decode_raw_from_bam_stream(object, ref_ptr, data, reader)) {
return NULL;
if (TypedWritable::decode_raw_from_bam_stream(object, ref_ptr, move(data), reader)) {
return DCAST(PandaNode, object);
} else {
return nullptr;
}
return DCAST(PandaNode, object);
}
/**

View File

@ -321,7 +321,7 @@ PUBLISHED:
INLINE int get_fancy_bits(Thread *current_thread = Thread::get_current_thread()) const;
PUBLISHED:
static PT(PandaNode) decode_from_bam_stream(const string &data, BamReader *reader = NULL);
static PT(PandaNode) decode_from_bam_stream(vector_uchar data, BamReader *reader = nullptr);
protected:
class BoundsData;

View File

@ -0,0 +1,68 @@
/**
* PANDA 3D SOFTWARE
* Copyright (c) Carnegie Mellon University. All rights reserved.
*
* All use of this software is subject to the terms of the revised BSD
* license. You should have received a copy of this license along
* with this source code in a file named "LICENSE."
*
* @file datagramBuffer.I
* @author rdb
* @date 2017-11-07
*/
/**
* Initializes an empty datagram buffer.
*/
INLINE DatagramBuffer::
DatagramBuffer() :
_read_offset(0),
_wrote_first_datagram(false),
_read_first_datagram(false) {
}
/**
* Initializes the buffer with the given data.
*/
INLINE DatagramBuffer::
DatagramBuffer(vector_uchar data) :
_data(move(data)),
_read_offset(0),
_wrote_first_datagram(false),
_read_first_datagram(false) {
}
/**
* Clears the internal buffer.
*/
INLINE void DatagramBuffer::
clear() {
_data.clear();
_read_offset = 0;
_wrote_first_datagram = false;
_read_first_datagram = false;
}
/**
* Returns the internal buffer.
*/
INLINE const vector_uchar &DatagramBuffer::
get_data() const {
return _data;
}
/**
* Replaces the data in the internal buffer.
*/
INLINE void DatagramBuffer::
set_data(vector_uchar data) {
_data = move(data);
}
/**
* Swaps the data in the internal buffer with that of the other buffer.
*/
INLINE void DatagramBuffer::
swap_data(vector_uchar &other) {
_data.swap(other);
}

View File

@ -0,0 +1,152 @@
/**
* PANDA 3D SOFTWARE
* Copyright (c) Carnegie Mellon University. All rights reserved.
*
* All use of this software is subject to the terms of the revised BSD
* license. You should have received a copy of this license along
* with this source code in a file named "LICENSE."
*
* @file datagramBuffer.cxx
* @author rdb
* @date 2017-11-07
*/
#include "datagramBuffer.h"
/**
* Writes a sequence of bytes to the beginning of the datagram file. This may
* be called any number of times after the file has been opened and before the
* first datagram is written. It may not be called once the first datagram is
* written.
*/
bool DatagramBuffer::
write_header(const string &header) {
nassertr(!_wrote_first_datagram, false);
_data.insert(_data.end(), header.begin(), header.end());
return true;
}
/**
* Writes the given datagram to the file. Returns true on success, false if
* there is an error.
*/
bool DatagramBuffer::
put_datagram(const Datagram &data) {
_wrote_first_datagram = true;
// First, write the size of the upcoming datagram.
size_t num_bytes = data.get_length();
size_t offset = _data.size();
if (num_bytes == (uint32_t)-1 || num_bytes != (uint32_t)num_bytes) {
// Write a large value as a 64-bit size.
_data.resize(offset + num_bytes + 4 + sizeof(uint64_t));
_data[offset++] = 0xff;
_data[offset++] = 0xff;
_data[offset++] = 0xff;
_data[offset++] = 0xff;
LittleEndian s(&num_bytes, sizeof(uint64_t));
memcpy(&_data[offset], s.get_data(), sizeof(uint64_t));
offset += sizeof(uint64_t);
} else {
// Write a value that fits in 32 bits.
_data.resize(offset + num_bytes + sizeof(uint32_t));
LittleEndian s(&num_bytes, sizeof(uint32_t));
memcpy(&_data[offset], s.get_data(), sizeof(uint32_t));
offset += sizeof(uint32_t);
}
// Now, write the datagram itself.
memcpy(&_data[offset], data.get_data(), data.get_length());
return true;
}
/**
* This does absolutely nothing.
*/
void DatagramBuffer::
flush() {
}
/**
* Reads a sequence of bytes from the beginning of the datagram file. This
* may be called any number of times after the file has been opened and before
* the first datagram is read. It may not be called once the first datagram
* has been read.
*/
bool DatagramBuffer::
read_header(string &header, size_t num_bytes) {
nassertr(!_read_first_datagram, false);
if (_read_offset + num_bytes > _data.size()) {
return false;
}
header = string((char *)&_data[_read_offset], num_bytes);
_read_offset += num_bytes;
return true;
}
/**
* Reads the next datagram from the file. Returns true on success, false if
* there is an error or end of file.
*/
bool DatagramBuffer::
get_datagram(Datagram &data) {
_read_first_datagram = true;
if (_read_offset + sizeof(uint32_t) > _data.size()) {
// Reached the end of the buffer.
return false;
}
// First, get the size of the upcoming datagram.
uint32_t num_bytes_32;
LittleEndian s(&_data[_read_offset], 0, sizeof(uint32_t));
s.store_value(&num_bytes_32, sizeof(uint32_t));
_read_offset += 4;
if (num_bytes_32 == 0) {
// A special case for a zero-length datagram: no need to try to read any
// data.
data.clear();
return true;
}
size_t num_bytes = (size_t)num_bytes_32;
if (num_bytes_32 == (uint32_t)-1) {
// Another special case for a value larger than 32 bits.
uint64_t num_bytes_64;
LittleEndian s(&_data[_read_offset], 0, sizeof(uint64_t));
s.store_value(&num_bytes_64, sizeof(uint64_t));
_read_offset += 8;
num_bytes = (size_t)num_bytes_64;
nassertr((uint64_t)num_bytes == num_bytes_64, false);
}
// Make sure we have this much data to read.
nassertr_always(_read_offset + num_bytes <= _data.size(), false);
data = Datagram(&_data[_read_offset], num_bytes);
_read_offset += num_bytes;
return true;
}
/**
* Returns true if the buffer has reached the end-of-buffer. This test may
* only be made after a call to read_header() or get_datagram() has failed.
*/
bool DatagramBuffer::
is_eof() {
return (_read_offset + sizeof(uint32_t)) > _data.size();
}
/**
* Returns true if the buffer has reached an error condition.
*/
bool DatagramBuffer::
is_error() {
return false;
}

View File

@ -0,0 +1,63 @@
/**
* PANDA 3D SOFTWARE
* Copyright (c) Carnegie Mellon University. All rights reserved.
*
* All use of this software is subject to the terms of the revised BSD
* license. You should have received a copy of this license along
* with this source code in a file named "LICENSE."
*
* @file datagramBuffer.h
* @author rdb
* @date 2017-11-07
*/
#ifndef DATAGRAMBUFFER_H
#define DATAGRAMBUFFER_H
#include "pandabase.h"
#include "datagramSink.h"
#include "vector_uchar.h"
/**
* This class can be used to write a series of datagrams into a memory buffer.
* It acts as both a datagram sink and generator; you can fill it up with
* datagrams and then read as many datagrams from it.
*
* This uses the same format as DatagramInputFile and DatagramOutputFile,
* meaning that Datagram sizes are always stored little-endian.
*/
class EXPCL_PANDA_PUTIL DatagramBuffer : public DatagramSink, public DatagramGenerator {
PUBLISHED:
INLINE DatagramBuffer();
INLINE explicit DatagramBuffer(vector_uchar data);
INLINE void clear();
public:
bool write_header(const string &header);
virtual bool put_datagram(const Datagram &data) override;
virtual void flush() override;
bool read_header(string &header, size_t num_bytes);
virtual bool get_datagram(Datagram &data) override;
virtual bool is_eof() override;
virtual bool is_error() override;
INLINE const vector_uchar &get_data() const;
INLINE void set_data(vector_uchar data);
INLINE void swap_data(vector_uchar &other);
PUBLISHED:
MAKE_PROPERTY(data, get_data, set_data);
private:
vector_uchar _data;
size_t _read_offset;
bool _wrote_first_datagram;
bool _read_first_datagram;
};
#include "datagramBuffer.I"
#endif

View File

@ -22,6 +22,7 @@
#include "copyOnWriteObject.cxx"
#include "copyOnWritePointer.cxx"
#include "cPointerCallbackObject.cxx"
#include "datagramBuffer.cxx"
#include "datagramInputFile.cxx"
#include "datagramOutputFile.cxx"
#include "doubleBitMask.cxx"

View File

@ -53,22 +53,21 @@ get_bam_modified() const {
return _bam_modified;
}
/**
* Converts the TypedWritable object into a single stream of data using a
* BamWriter, and returns that data as a string string. Returns empty string
* on failure.
* BamWriter, and returns that data as a bytes object. Returns an empty bytes
* object on failure.
*
* This is a convenience method particularly useful for cases when you are
* only serializing a single object. If you have many objects to process, it
* is more efficient to use the same BamWriter to serialize all of them
* together.
*/
INLINE string TypedWritable::
INLINE vector_uchar TypedWritable::
encode_to_bam_stream() const {
string data;
vector_uchar data;
if (!encode_to_bam_stream(data)) {
return string();
data.clear();
}
return data;
}

View File

@ -14,8 +14,7 @@
#include "typedWritable.h"
#include "bamWriter.h"
#include "bamReader.h"
#include "datagramOutputFile.h"
#include "datagramInputFile.h"
#include "datagramBuffer.h"
#include "lightMutexHolder.h"
#include "bam.h"
@ -134,52 +133,43 @@ as_reference_count() {
* together.
*/
bool TypedWritable::
encode_to_bam_stream(string &data, BamWriter *writer) const {
encode_to_bam_stream(vector_uchar &data, BamWriter *writer) const {
data.clear();
ostringstream stream;
// We use nested scoping to ensure the destructors get called in the right
// order.
{
DatagramOutputFile dout;
if (!dout.open(stream)) {
DatagramBuffer buffer;
if (writer == nullptr) {
// Create our own writer.
if (!buffer.write_header(_bam_header)) {
return false;
}
if (writer == NULL) {
// Create our own writer.
BamWriter writer(&buffer);
if (!writer.init()) {
return false;
}
if (!dout.write_header(_bam_header)) {
return false;
}
BamWriter writer(&dout);
if (!writer.init()) {
return false;
}
if (!writer.write_object(this)) {
return false;
}
} else {
// Use the existing writer.
writer->set_target(&dout);
bool result = writer->write_object(this);
writer->set_target(NULL);
if (!result) {
return false;
}
if (!writer.write_object(this)) {
return false;
}
} else {
// Use the existing writer.
writer->set_target(&buffer);
bool result = writer->write_object(this);
writer->set_target(nullptr);
if (!result) {
return false;
}
}
data = stream.str();
buffer.swap_data(data);
return true;
}
/**
* Reads the string created by a previous call to encode_to_bam_stream(), and
* extracts the single object on that string. Returns true on success, false
* on on error.
* Reads the bytes created by a previous call to encode_to_bam_stream(), and
* extracts the single object on those bytes. Returns true on success, false
* on error.
*
* This variant sets the TypedWritable and ReferenceCount pointers separately;
* both are pointers to the same object. The reference count is not
@ -198,18 +188,14 @@ encode_to_bam_stream(string &data, BamWriter *writer) const {
*/
bool TypedWritable::
decode_raw_from_bam_stream(TypedWritable *&ptr, ReferenceCount *&ref_ptr,
const string &data, BamReader *reader) {
istringstream stream(data);
vector_uchar data, BamReader *reader) {
DatagramInputFile din;
if (!din.open(stream)) {
return false;
}
DatagramBuffer buffer(move(data));
if (reader == NULL) {
// Create a local reader.
string head;
if (!din.read_header(head, _bam_header.size())) {
if (!buffer.read_header(head, _bam_header.size())) {
return false;
}
@ -217,7 +203,7 @@ decode_raw_from_bam_stream(TypedWritable *&ptr, ReferenceCount *&ref_ptr,
return false;
}
BamReader reader(&din);
BamReader reader(&buffer);
if (!reader.init()) {
return false;
}
@ -241,7 +227,7 @@ decode_raw_from_bam_stream(TypedWritable *&ptr, ReferenceCount *&ref_ptr,
} else {
// Use the existing reader.
reader->set_source(&din);
reader->set_source(&buffer);
if (!reader->read_object(ptr, ref_ptr)) {
reader->set_source(NULL);
return false;

View File

@ -19,6 +19,7 @@
#include "pvector.h"
#include "lightMutex.h"
#include "updateSeq.h"
#include "vector_uchar.h"
class BamReader;
class BamWriter;
@ -62,11 +63,11 @@ PUBLISHED:
EXTENSION(PyObject *__reduce__(PyObject *self) const);
EXTENSION(PyObject *__reduce_persist__(PyObject *self, PyObject *pickler) const);
INLINE string encode_to_bam_stream() const;
bool encode_to_bam_stream(string &data, BamWriter *writer = NULL) const;
INLINE vector_uchar encode_to_bam_stream() const;
bool encode_to_bam_stream(vector_uchar &data, BamWriter *writer = NULL) const;
static bool decode_raw_from_bam_stream(TypedWritable *&ptr,
ReferenceCount *&ref_ptr,
const string &data,
vector_uchar data,
BamReader *reader = NULL);
private:

View File

@ -26,8 +26,8 @@ as_reference_count() {
}
/**
* Reads the string created by a previous call to encode_to_bam_stream(), and
* extracts and returns the single object on that string. Returns NULL on
* Reads the bytes created by a previous call to encode_to_bam_stream(), and
* extracts and returns the single object on those bytes. Returns NULL on
* error.
*
* This method is intended to replace decode_raw_from_bam_stream() when you
@ -37,13 +37,13 @@ as_reference_count() {
* reference count on the return value.
*/
PT(TypedWritableReferenceCount) TypedWritableReferenceCount::
decode_from_bam_stream(const string &data, BamReader *reader) {
decode_from_bam_stream(vector_uchar data, BamReader *reader) {
TypedWritable *object;
ReferenceCount *ref_ptr;
if (!TypedWritable::decode_raw_from_bam_stream(object, ref_ptr, data, reader)) {
return NULL;
if (TypedWritable::decode_raw_from_bam_stream(object, ref_ptr, move(data), reader)) {
return DCAST(TypedWritableReferenceCount, object);
} else {
return nullptr;
}
return DCAST(TypedWritableReferenceCount, object);
}

View File

@ -37,7 +37,7 @@ public:
virtual ReferenceCount *as_reference_count();
PUBLISHED:
static PT(TypedWritableReferenceCount) decode_from_bam_stream(const string &data, BamReader *reader = NULL);
static PT(TypedWritableReferenceCount) decode_from_bam_stream(vector_uchar data, BamReader *reader = nullptr);
public:
virtual TypeHandle get_type() const {

View File

@ -71,7 +71,7 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
}
// First, streamify the object, if possible.
string bam_stream;
vector_uchar bam_stream;
if (!_this->encode_to_bam_stream(bam_stream, writer)) {
ostringstream stream;
stream << "Could not bamify object of type " << _this->get_type() << "\n";
@ -101,7 +101,6 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
} else {
// The traditional pickle support: call the non-persistent version of this
// function.
func = find_global_decode(this_class, "py_decode_TypedWritable_from_bam_stream");
if (func == NULL) {
PyErr_SetString(PyExc_TypeError, "Couldn't find py_decode_TypedWritable_from_bam_stream()");
@ -110,14 +109,15 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
}
}
#if PY_MAJOR_VERSION >= 3
PyObject *result = Py_BuildValue("(O(Oy#))", func, this_class, bam_stream.data(), (Py_ssize_t) bam_stream.size());
#else
PyObject *result = Py_BuildValue("(O(Os#))", func, this_class, bam_stream.data(), (Py_ssize_t) bam_stream.size());
#endif
Py_DECREF(func);
Py_DECREF(this_class);
return result;
// PyTuple_SET_ITEM conveniently borrows the reference it is passed.
PyObject *args = PyTuple_New(2);
PyTuple_SET_ITEM(args, 0, this_class);
PyTuple_SET_ITEM(args, 1, Dtool_WrapValue(bam_stream));
PyObject *tuple = PyTuple_New(2);
PyTuple_SET_ITEM(tuple, 0, func);
PyTuple_SET_ITEM(tuple, 1, args);
return tuple;
}
/**
@ -131,7 +131,8 @@ __reduce_persist__(PyObject *self, PyObject *pickler) const {
*/
PyObject *Extension<TypedWritable>::
find_global_decode(PyObject *this_class, const char *func_name) {
PyObject *module_name = PyObject_GetAttrString(this_class, "__module__");
// Get the module in which BamWriter is defined.
PyObject *module_name = PyObject_GetAttrString((PyObject *)&Dtool_BamWriter, "__module__");
if (module_name != NULL) {
// borrowed reference
PyObject *sys_modules = PyImport_GetModuleDict();
@ -146,8 +147,8 @@ find_global_decode(PyObject *this_class, const char *func_name) {
}
}
}
Py_DECREF(module_name);
}
Py_DECREF(module_name);
PyObject *bases = PyObject_GetAttrString(this_class, "__bases__");
if (bases != NULL) {
@ -178,8 +179,8 @@ find_global_decode(PyObject *this_class, const char *func_name) {
* properly handle self-referential BAM objects.
*/
PyObject *
py_decode_TypedWritable_from_bam_stream(PyObject *this_class, const string &data) {
return py_decode_TypedWritable_from_bam_stream_persist(NULL, this_class, data);
py_decode_TypedWritable_from_bam_stream(PyObject *this_class, const vector_uchar &data) {
return py_decode_TypedWritable_from_bam_stream_persist(nullptr, this_class, data);
}
/**
@ -192,7 +193,7 @@ py_decode_TypedWritable_from_bam_stream(PyObject *this_class, const string &data
* direct/src/stdpy.
*/
PyObject *
py_decode_TypedWritable_from_bam_stream_persist(PyObject *pickler, PyObject *this_class, const string &data) {
py_decode_TypedWritable_from_bam_stream_persist(PyObject *pickler, PyObject *this_class, const vector_uchar &data) {
PyObject *py_reader = NULL;
if (pickler != NULL) {
@ -210,28 +211,30 @@ py_decode_TypedWritable_from_bam_stream_persist(PyObject *pickler, PyObject *thi
// decode_from_bam_stream appropriate to this class.
PyObject *func = PyObject_GetAttrString(this_class, "decode_from_bam_stream");
if (func == NULL) {
return NULL;
if (func == nullptr) {
Py_XDECREF(py_reader);
return nullptr;
}
PyObject *bytes = Dtool_WrapValue(data);
if (bytes == nullptr) {
Py_DECREF(func);
Py_XDECREF(py_reader);
return nullptr;
}
PyObject *result;
if (py_reader != NULL){
#if PY_MAJOR_VERSION >= 3
result = PyObject_CallFunction(func, (char *)"(y#O)", data.data(), (Py_ssize_t) data.size(), py_reader);
#else
result = PyObject_CallFunction(func, (char *)"(s#O)", data.data(), (Py_ssize_t) data.size(), py_reader);
#endif
if (py_reader != nullptr) {
result = PyObject_CallFunctionObjArgs(func, bytes, py_reader, nullptr);
Py_DECREF(py_reader);
} else {
#if PY_MAJOR_VERSION >= 3
result = PyObject_CallFunction(func, (char *)"(y#)", data.data(), (Py_ssize_t) data.size());
#else
result = PyObject_CallFunction(func, (char *)"(s#)", data.data(), (Py_ssize_t) data.size());
#endif
result = PyObject_CallFunctionObjArgs(func, bytes, nullptr);
}
Py_DECREF(bytes);
Py_DECREF(func);
if (result == NULL) {
return NULL;
if (result == nullptr) {
return nullptr;
}
if (result == Py_None) {

View File

@ -33,12 +33,11 @@ public:
PyObject *__reduce_persist__(PyObject *self, PyObject *pickler) const;
static PyObject *find_global_decode(PyObject *this_class, const char *func_name);
};
BEGIN_PUBLISH
PyObject *py_decode_TypedWritable_from_bam_stream(PyObject *this_class, const string &data);
PyObject *py_decode_TypedWritable_from_bam_stream_persist(PyObject *unpickler, PyObject *this_class, const string &data);
PyObject *py_decode_TypedWritable_from_bam_stream(PyObject *this_class, const vector_uchar &data);
PyObject *py_decode_TypedWritable_from_bam_stream_persist(PyObject *unpickler, PyObject *this_class, const vector_uchar &data);
END_PUBLISH
#endif // HAVE_PYTHON