just base compressability test on analysis of frame coherence

This commit is contained in:
David Rose 2003-11-12 23:33:53 +00:00
parent 8893d9fe4f
commit 972f56a621
2 changed files with 40 additions and 65 deletions

View File

@ -270,26 +270,16 @@ write_reals(Datagram &datagram, const float *array, int length) {
} }
// Normal case: FFT the array, and write that out. // Normal case: FFT the array, and write that out.
double *data = (double *)alloca(length * sizeof(double));
int i;
for (i = 0; i < length; i++) {
data[i] = array[i];
}
double *half_complex = (double *)alloca(length * sizeof(double));
rfftw_plan plan = get_real_compress_plan(length);
rfftw_one(plan, data, half_complex);
// First, check the compressability.
bool reject_compression = false; bool reject_compression = false;
if (_use_error_threshold) { if (_use_error_threshold) {
// As a sanity check, decode the numbers again and see how far off // Don't encode the data if it moves too erratically.
// we will be from the original string. float error = get_compressability(array, length);
double error = get_error(data, half_complex, length);
if (error > fft_error_threshold) { if (error > fft_error_threshold) {
// No good: the compression is too damage. Just write out // No good: the data probably won't compress well. Just write
// lossless data. // out lossless data.
reject_compression = true; reject_compression = true;
} }
} }
@ -306,6 +296,19 @@ write_reals(Datagram &datagram, const float *array, int length) {
return; return;
} }
// Now generate the Fourier transform.
double *data = (double *)alloca(length * sizeof(double));
int i;
for (i = 0; i < length; i++) {
data[i] = array[i];
}
double *half_complex = (double *)alloca(length * sizeof(double));
rfftw_plan plan = get_real_compress_plan(length);
rfftw_one(plan, data, half_complex);
// Now encode the numbers, run-length encoded by size, so we only // Now encode the numbers, run-length encoded by size, so we only
// write out the number of bits we need for each number. // write out the number of bits we need for each number.
@ -970,66 +973,38 @@ interpolate(double t, double a, double b) {
} }
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// Function: FFTCompressor::get_error // Function: FFTCompressor::get_compressability
// Access: Private // Access: Private
// Description: Measures the error that would be incurred from // Description: Returns a factor that indicates how erratically the
// compressing the string of reals. // values are changing. The lower the result, the
// calmer the numbers, and the greater its likelihood of
// being successfully compressed.
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
double FFTCompressor:: float FFTCompressor::
get_error(const double *data, const double *half_complex, int length) const { get_compressability(const float *data, int length) const {
double *truncated_half_complex = (double *)alloca(length * sizeof(double)); // The result returned is actually the standard deviation of the
int i; // table of deltas between consecutive frames. This number is
for (i = 0; i < length; i++) { // larger if the frames have wildly different values.
double scale_factor = get_scale_factor(i, length);
double num = cfloor(half_complex[i] / scale_factor + 0.5); if (length <= 2) {
truncated_half_complex[i] = num * scale_factor; return 0.0;
} }
double *new_data = (double *)alloca(length * sizeof(double)); float sum = 0.0;
rfftw_plan plan = get_real_decompress_plan(length); float sum2 = 0.0;
rfftw_one(plan, &truncated_half_complex[0], new_data); for (int i = 1; i < length; i++) {
float delta = data[i] - data[i - 1];
double scale = 1.0 / (double)length; sum += delta;
for (i = 0; i < length; i++) { sum2 += delta * delta;
new_data[i] *= scale;
} }
float variance = (sum2 - (sum * sum) / (length - 1)) / (length - 2);
double last_value = data[0];
double last_new_value = new_data[0];
for (i = 0; i < length; i++) {
// First, we get the delta from each frame to the next.
double next_value = data[i];
double data_delta = data[i] - last_value;
last_value = next_value;
double next_new_value = new_data[i];
double data_new_delta = new_data[i] - last_value;
last_new_value = next_new_value;
// And we store the relative change in delta between our original
// values and our compressed values.
new_data[i] = data_new_delta - data_delta;
}
// Our error measurement is nothing more than the standard deviation
// of the relative change in delta, from above. If this is large,
// the compressed values are moving substantially more erratically
// than the original values.
double sum = 0.0;
double sum2 = 0.0;
for (i = 0; i < length; i++) {
sum += new_data[i];
sum2 += new_data[i] * new_data[i];
}
double variance = (sum2 - (sum * sum) / length) / (length - 1);
if (variance < 0.0) { if (variance < 0.0) {
// This can only happen due to tiny roundoff error. // This can only happen due to tiny roundoff error.
return 0.0; return 0.0;
} }
double std_deviation = sqrt(variance); float std_deviation = csqrt(variance);
return std_deviation; return std_deviation;
} }

View File

@ -92,7 +92,7 @@ private:
double get_scale_factor(int i, int length) const; double get_scale_factor(int i, int length) const;
static double interpolate(double t, double a, double b); static double interpolate(double t, double a, double b);
double get_error(const double *data, const double *half_complex, int length) const; float get_compressability(const float *data, int length) const;
int _bam_minor_version; int _bam_minor_version;
int _quality; int _quality;