diff --git a/src/dsp/lossless.h b/src/dsp/lossless.h index e5ed59b3..9f0d7a25 100644 --- a/src/dsp/lossless.h +++ b/src/dsp/lossless.h @@ -158,7 +158,8 @@ void VP8LCollectColorBlueTransforms_C(const uint32_t* argb, int stride, void VP8LResidualImage(int width, int height, int bits, int low_effort, uint32_t* const argb, uint32_t* const argb_scratch, - uint32_t* const image, int exact); + uint32_t* const image, int near_lossless, int exact, + int used_subtract_green); void VP8LColorSpaceTransform(int width, int height, int bits, int quality, uint32_t* const argb, uint32_t* image); @@ -172,6 +173,17 @@ static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size, return (size + (1 << sampling_bits) - 1) >> sampling_bits; } +// Converts near lossless quality into max number of bits shaved off. +static WEBP_INLINE int VP8LNearLosslessBits(int near_lossless_quality) { + // 100 -> 0 + // 80..99 -> 1 + // 60..79 -> 2 + // 40..59 -> 3 + // 20..39 -> 4 + // 0..19 -> 5 + return 5 - near_lossless_quality / 20; +} + // ----------------------------------------------------------------------------- // Faster logarithm for integers. Small values use a look-up table. @@ -336,7 +348,7 @@ static WEBP_INLINE uint32_t VP8LAddPixels(uint32_t a, uint32_t b) { return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu); } -// Difference of each component with mod 256. +// Difference of each component, mod 256. static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) { const uint32_t alpha_and_green = 0x00ff00ffu + (a & 0xff00ff00u) - (b & 0xff00ff00u); diff --git a/src/dsp/lossless_enc.c b/src/dsp/lossless_enc.c index 0da14733..0c7bd403 100644 --- a/src/dsp/lossless_enc.c +++ b/src/dsp/lossless_enc.c @@ -382,6 +382,7 @@ static float FastLog2Slow(uint32_t v) { // Mostly used to reduce code size + readability static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; } +static WEBP_INLINE int GetMax(int a, int b) { return (a < b) ? b : a; } //------------------------------------------------------------------------------ // Methods to calculate Entropy (Shannon). @@ -551,18 +552,205 @@ static WEBP_INLINE uint32_t Predict(VP8LPredictorFunc pred_func, } } +static int MaxDiffBetweenPixels(uint32_t p1, uint32_t p2) { + const int diff_a = abs((int)(p1 >> 24) - (int)(p2 >> 24)); + const int diff_r = abs((int)((p1 >> 16) & 0xff) - (int)((p2 >> 16) & 0xff)); + const int diff_g = abs((int)((p1 >> 8) & 0xff) - (int)((p2 >> 8) & 0xff)); + const int diff_b = abs((int)(p1 & 0xff) - (int)(p2 & 0xff)); + return GetMax(GetMax(diff_a, diff_r), GetMax(diff_g, diff_b)); +} + +static int MaxDiffAroundPixel(uint32_t current, uint32_t up, uint32_t down, + uint32_t left, uint32_t right) { + const int diff_up = MaxDiffBetweenPixels(current, up); + const int diff_down = MaxDiffBetweenPixels(current, down); + const int diff_left = MaxDiffBetweenPixels(current, left); + const int diff_right = MaxDiffBetweenPixels(current, right); + return GetMax(GetMax(diff_up, diff_down), GetMax(diff_left, diff_right)); +} + +static uint32_t AddGreenToBlueAndRed(uint32_t argb) { + const uint32_t green = (argb >> 8) & 0xff; + uint32_t red_blue = argb & 0x00ff00ffu; + red_blue += (green << 16) | green; + red_blue &= 0x00ff00ffu; + return (argb & 0xff00ff00u) | red_blue; +} + +static void MaxDiffsForRow(int width, int stride, const uint32_t* const argb, + uint8_t* const max_diffs, int used_subtract_green) { + uint32_t current, up, down, left, right; + int x; + if (width <= 2) return; + current = argb[0]; + right = argb[1]; + if (used_subtract_green) { + current = AddGreenToBlueAndRed(current); + right = AddGreenToBlueAndRed(right); + } + // max_diffs[0] and max_diffs[width - 1] are never used. + for (x = 1; x < width - 1; ++x) { + up = argb[-stride + x]; + down = argb[stride + x]; + left = current; + current = right; + right = argb[x + 1]; + if (used_subtract_green) { + up = AddGreenToBlueAndRed(up); + down = AddGreenToBlueAndRed(down); + right = AddGreenToBlueAndRed(right); + } + max_diffs[x] = MaxDiffAroundPixel(current, up, down, left, right); + } +} + +// Quantize the difference between the actual component value and its prediction +// to a multiple of quantization, working modulo 256, taking care not to cross +// a boundary (inclusive upper limit). +static uint8_t NearLosslessComponent(uint8_t value, uint8_t predict, + uint8_t boundary, int quantization) { + const int residual = (value - predict) & 0xff; + const int boundary_residual = (boundary - predict) & 0xff; + const int lower = residual & ~(quantization - 1); + const int upper = lower + quantization; + // Resolve ties towards a value closer to the prediction (i.e. towards lower + // if value comes after prediction and towards upper otherwise). + const int bias = ((boundary - value) & 0xff) < boundary_residual; + if (residual - lower < upper - residual + bias) { + // lower is closer to residual than upper. + if (residual > boundary_residual && lower <= boundary_residual) { + // Halve quantization step to avoid crossing boundary. This midpoint is + // on the same side of boundary as residual because midpoint >= residual + // (since lower is closer than upper) and residual is above the boundary. + return lower + (quantization >> 1); + } + return lower; + } else { + // upper is closer to residual than lower. + if (residual <= boundary_residual && upper > boundary_residual) { + // Halve quantization step to avoid crossing boundary. This midpoint is + // on the same side of boundary as residual because midpoint <= residual + // (since upper is closer than lower) and residual is below the boundary. + return lower + (quantization >> 1); + } + return upper & 0xff; + } +} + +// Quantize every component of the difference between the actual pixel value and +// its prediction to a multiple of a quantization (a power of 2, not larger than +// max_quantization which is a power of 2, smaller than max_diff). Take care if +// value and predict have undergone subtract green, which means that red and +// blue are represented as offsets from green. +static uint32_t NearLossless(uint32_t value, uint32_t predict, + int max_quantization, int max_diff, + int used_subtract_green) { + int quantization; + uint8_t new_green = 0; + uint8_t green_diff = 0; + uint8_t a, r, g, b; + if (max_diff <= 2) { + return VP8LSubPixels(value, predict); + } + quantization = max_quantization; + while (quantization >= max_diff) { + quantization >>= 1; + } + if ((value >> 24) == 0 || (value >> 24) == 0xff) { + // Preserve transparency of fully transparent or fully opaque pixels. + a = ((value >> 24) - (predict >> 24)) & 0xff; + } else { + a = NearLosslessComponent(value >> 24, predict >> 24, 0xff, quantization); + } + g = NearLosslessComponent((value >> 8) & 0xff, (predict >> 8) & 0xff, 0xff, + quantization); + if (used_subtract_green) { + // The green offset will be added to red and blue components during decoding + // to obtain the actual red and blue values. + new_green = ((predict >> 8) + g) & 0xff; + // The amount by which green has been adjusted during quantization. It is + // subtracted from red and blue for compensation, to avoid accumulating two + // quantization errors in them. + green_diff = (new_green - (value >> 8)) & 0xff; + } + r = NearLosslessComponent(((value >> 16) - green_diff) & 0xff, + (predict >> 16) & 0xff, 0xff - new_green, + quantization); + b = NearLosslessComponent((value - green_diff) & 0xff, predict & 0xff, + 0xff - new_green, quantization); + return ((uint32_t)a << 24) | ((uint32_t)r << 16) | ((uint32_t)g << 8) | b; +} + +// Returns the difference between the pixel and its prediction. In case of a +// lossy encoding, updates the source image to avoid propagating the deviation +// further to pixels which depend on the current pixel for their predictions. +static uint32_t GetResidual(int width, int height, + uint32_t* const upper_row, + uint32_t* const current_row, + const uint8_t* const max_diffs, + int mode, VP8LPredictorFunc pred_func, + int x, int y, + int max_quantization, + int exact, int used_subtract_green) { + const uint32_t predict = Predict(pred_func, x, y, current_row, upper_row); + uint32_t residual; + if (max_quantization == 1 || mode == 0 || y == 0 || y == height - 1 || + x == 0 || x == width - 1) { + residual = VP8LSubPixels(current_row[x], predict); + } else { + residual = NearLossless(current_row[x], predict, max_quantization, + max_diffs[x], used_subtract_green); + // Update the source image. + current_row[x] = VP8LAddPixels(predict, residual); + // x is never 0 here so we do not need to update upper_row like below. + } + if (!exact && (current_row[x] & kMaskAlpha) == 0) { + // If alpha is 0, cleanup RGB. We can choose the RGB values of the residual + // for best compression. The prediction of alpha itself can be non-zero and + // must be kept though. We choose RGB of the residual to be 0. + residual &= kMaskAlpha; + // Update the source image. + current_row[x] = predict & ~kMaskAlpha; + // The prediction for the rightmost pixel in a row uses the leftmost pixel + // in that row as its top-right context pixel. Hence if we change the + // leftmost pixel of current_row, the corresponding change must be applied + // to upper_row as well where top-right context is being read from. + if (x == 0 && y != 0) upper_row[width] = current_row[0]; + } + return residual; +} + // Returns best predictor and updates the accumulated histogram. +// If max_quantization > 1, assumes that near lossless processing will be +// applied, quantizing residuals to multiples of quantization levels up to +// max_quantization (the actual quantization level depends on smoothness near +// the given pixel). static int GetBestPredictorForTile(int width, int height, int tile_x, int tile_y, int bits, int accumulated[4][256], - const uint32_t* const argb_scratch, - int exact) { + uint32_t* const argb_scratch, + const uint32_t* const argb, + int max_quantization, + int exact, int used_subtract_green) { const int kNumPredModes = 14; - const int col_start = tile_x << bits; - const int row_start = tile_y << bits; + const int start_x = tile_x << bits; + const int start_y = tile_y << bits; const int tile_size = 1 << bits; - const int max_y = GetMin(tile_size, height - row_start); - const int max_x = GetMin(tile_size, width - col_start); + const int max_y = GetMin(tile_size, height - start_y); + const int max_x = GetMin(tile_size, width - start_x); + // Whether there exist columns just outside the tile. + const int have_left = (start_x > 0); + const int have_right = (max_x < width - start_x); + // Position and size of the strip covering the tile and adjacent columns if + // they exist. + const int context_start_x = start_x - have_left; + const int context_width = max_x + have_left + have_right; + // The width of upper_row and current_row is one pixel larger than image width + // to allow the top right pixel to point to the leftmost pixel of the next row + // when at the right edge. + uint32_t* upper_row = argb_scratch; + uint32_t* current_row = upper_row + width + 1; + uint8_t* const max_diffs = (uint8_t*)(current_row + width + 1); float best_diff = MAX_DIFF_COST; int best_mode = 0; int mode; @@ -571,28 +759,46 @@ static int GetBestPredictorForTile(int width, int height, // Need pointers to be able to swap arrays. int (*histo_argb)[256] = histo_stack_1; int (*best_histo)[256] = histo_stack_2; - int i, j; + for (mode = 0; mode < kNumPredModes; ++mode) { - const uint32_t* current_row = argb_scratch; const VP8LPredictorFunc pred_func = VP8LPredictors[mode]; float cur_diff; - int y; + int relative_y; memset(histo_argb, 0, sizeof(histo_stack_1)); - for (y = 0; y < max_y; ++y) { - int x; - const int row = row_start + y; - const uint32_t* const upper_row = current_row; - current_row = upper_row + width; - for (x = 0; x < max_x; ++x) { - const int col = col_start + x; - const uint32_t predict = - Predict(pred_func, col, row, current_row, upper_row); - uint32_t residual = VP8LSubPixels(current_row[col], predict); - if (!exact && (current_row[col] & kMaskAlpha) == 0) { - residual &= kMaskAlpha; // See CopyTileWithPrediction. - } - UpdateHisto(histo_argb, residual); + if (start_y > 0) { + // Read the row above the tile which will become the first upper_row. + // Include a pixel to the left if it exists; include a pixel to the right + // in all cases (wrapping to the leftmost pixel of the next row if it does + // not exist). + memcpy(current_row + context_start_x, + argb + (start_y - 1) * width + context_start_x, + sizeof(*argb) * (max_x + have_left + 1)); + } + for (relative_y = 0; relative_y < max_y; ++relative_y) { + const int y = start_y + relative_y; + int relative_x; + uint32_t* tmp = upper_row; + upper_row = current_row; + current_row = tmp; + // Read current_row. Include a pixel to the left if it exists; include a + // pixel to the right in all cases except at the bottom right corner of + // the image (wrapping to the leftmost pixel of the next row if it does + // not exist in the current row). + memcpy(current_row + context_start_x, + argb + y * width + context_start_x, + sizeof(*argb) * (max_x + have_left + (y + 1 < height))); + if (max_quantization > 1 && y >= 1 && y + 1 < height) { + MaxDiffsForRow(context_width, width, argb + y * width + context_start_x, + max_diffs + context_start_x, used_subtract_green); + } + + for (relative_x = 0; relative_x < max_x; ++relative_x) { + const int x = start_x + relative_x; + UpdateHisto(histo_argb, + GetResidual(width, height, upper_row, current_row, + max_diffs, mode, pred_func, x, y, + max_quantization, exact, used_subtract_green)); } } cur_diff = PredictionCostSpatialHistogram( @@ -615,71 +821,82 @@ static int GetBestPredictorForTile(int width, int height, return best_mode; } +// Converts pixels of the image to residuals with respect to predictions. +// If max_quantization > 1, applies near lossless processing, quantizing +// residuals to multiples of quantization levels up to max_quantization +// (the actual quantization level depends on smoothness near the given pixel). static void CopyImageWithPrediction(int width, int height, int bits, uint32_t* const modes, uint32_t* const argb_scratch, uint32_t* const argb, - int low_effort, int exact) { + int low_effort, int max_quantization, + int exact, int used_subtract_green) { const int tiles_per_row = VP8LSubSampleSize(width, bits); const int mask = (1 << bits) - 1; - // The row size is one pixel longer to allow the top right pixel to point to - // the leftmost pixel of the next row when at the right edge. - uint32_t* current_row = argb_scratch; - uint32_t* upper_row = argb_scratch + width + 1; + // The width of upper_row and current_row is one pixel larger than image width + // to allow the top right pixel to point to the leftmost pixel of the next row + // when at the right edge. + uint32_t* upper_row = argb_scratch; + uint32_t* current_row = upper_row + width + 1; + uint8_t* current_max_diffs = (uint8_t*)(current_row + width + 1); + uint8_t* lower_max_diffs = current_max_diffs + width; int y; - VP8LPredictorFunc pred_func = - low_effort ? VP8LPredictors[kPredLowEffort] : NULL; + int mode = 0; + VP8LPredictorFunc pred_func = NULL; for (y = 0; y < height; ++y) { int x; - uint32_t* tmp = upper_row; + uint32_t* const tmp32 = upper_row; upper_row = current_row; - current_row = tmp; - memcpy(current_row, argb + y * width, sizeof(*current_row) * width); - current_row[width] = (y + 1 < height) ? argb[(y + 1) * width] : ARGB_BLACK; + current_row = tmp32; + memcpy(current_row, argb + y * width, + sizeof(*argb) * (width + (y + 1 < height))); if (low_effort) { for (x = 0; x < width; ++x) { - const uint32_t predict = - Predict(pred_func, x, y, current_row, upper_row); + const uint32_t predict = Predict(VP8LPredictors[kPredLowEffort], x, y, + current_row, upper_row); argb[y * width + x] = VP8LSubPixels(current_row[x], predict); } } else { + if (max_quantization > 1) { + // Compute max_diffs for the lower row now, because that needs the + // contents of argb for the current row, which we will overwrite with + // residuals before proceeding with the next row. + uint8_t* const tmp8 = current_max_diffs; + current_max_diffs = lower_max_diffs; + lower_max_diffs = tmp8; + if (y + 2 < height) { + MaxDiffsForRow(width, width, argb + (y + 1) * width, lower_max_diffs, + used_subtract_green); + } + } for (x = 0; x < width; ++x) { - uint32_t predict, residual; if ((x & mask) == 0) { - const int mode = - (modes[(y >> bits) * tiles_per_row + (x >> bits)] >> 8) & 0xff; + mode = (modes[(y >> bits) * tiles_per_row + (x >> bits)] >> 8) & 0xff; pred_func = VP8LPredictors[mode]; } - predict = Predict(pred_func, x, y, current_row, upper_row); - residual = VP8LSubPixels(current_row[x], predict); - if (!exact && (current_row[x] & kMaskAlpha) == 0) { - // If alpha is 0, cleanup RGB. We can choose the RGB values of the - // residual for best compression. The prediction of alpha itself can - // be non-zero and must be kept though. We choose RGB of the residual - // to be 0. - residual &= kMaskAlpha; - // Update input image so that next predictions use correct RGB value. - current_row[x] = predict & ~kMaskAlpha; - if (x == 0 && y != 0) upper_row[width] = current_row[x]; - } - argb[y * width + x] = residual; + argb[y * width + x] = GetResidual( + width, height, upper_row, current_row, current_max_diffs, mode, + pred_func, x, y, max_quantization, exact, used_subtract_green); } } } } +// Finds the best predictor for each tile, and converts the image to residuals +// with respect to predictions. If near_lossless_quality < 100, applies +// near lossless processing, shaving off more bits of residuals for lower +// qualities. void VP8LResidualImage(int width, int height, int bits, int low_effort, uint32_t* const argb, uint32_t* const argb_scratch, - uint32_t* const image, int exact) { - const int max_tile_size = 1 << bits; + uint32_t* const image, int near_lossless_quality, + int exact, int used_subtract_green) { const int tiles_per_row = VP8LSubSampleSize(width, bits); const int tiles_per_col = VP8LSubSampleSize(height, bits); - uint32_t* const upper_row = argb_scratch; - uint32_t* const current_tile_rows = argb_scratch + width; int tile_y; int histo[4][256]; + const int max_quantization = 1 << VP8LNearLosslessBits(near_lossless_quality); if (low_effort) { int i; for (i = 0; i < tiles_per_row * tiles_per_col; ++i) { @@ -688,26 +905,19 @@ void VP8LResidualImage(int width, int height, int bits, int low_effort, } else { memset(histo, 0, sizeof(histo)); for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) { - const int tile_y_offset = tile_y * max_tile_size; - const int this_tile_height = - (tile_y < tiles_per_col - 1) ? max_tile_size : height - tile_y_offset; int tile_x; - if (tile_y > 0) { - memcpy(upper_row, current_tile_rows + (max_tile_size - 1) * width, - width * sizeof(*upper_row)); - } - memcpy(current_tile_rows, &argb[tile_y_offset * width], - this_tile_height * width * sizeof(*current_tile_rows)); for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) { const int pred = GetBestPredictorForTile(width, height, tile_x, tile_y, - bits, (int (*)[256])histo, argb_scratch, exact); + bits, histo, argb_scratch, argb, max_quantization, exact, + used_subtract_green); image[tile_y * tiles_per_row + tile_x] = ARGB_BLACK | (pred << 8); } } } - CopyImageWithPrediction(width, height, bits, - image, argb_scratch, argb, low_effort, exact); + CopyImageWithPrediction(width, height, bits, image, argb_scratch, argb, + low_effort, max_quantization, exact, + used_subtract_green); } void VP8LSubtractGreenFromBlueAndRed_C(uint32_t* argb_data, int num_pixels) { diff --git a/src/enc/near_lossless.c b/src/enc/near_lossless.c index 0074d4a7..f4ab91f5 100644 --- a/src/enc/near_lossless.c +++ b/src/enc/near_lossless.c @@ -97,22 +97,11 @@ static void NearLossless(int xsize, int ysize, uint32_t* argb, } } -static int QualityToLimitBits(int quality) { - // quality mapping: - // 0..19 -> 5 - // 20..39 -> 4 - // 40..59 -> 3 - // 60..79 -> 2 - // 80..99 -> 1 - // 100 -> 0 - return MAX_LIMIT_BITS - quality / 20; -} - int VP8ApplyNearLossless(int xsize, int ysize, uint32_t* argb, int quality) { int i; uint32_t* const copy_buffer = (uint32_t*)WebPSafeMalloc(xsize * 3, sizeof(*copy_buffer)); - const int limit_bits = QualityToLimitBits(quality); + const int limit_bits = VP8LNearLosslessBits(quality); assert(argb != NULL); assert(limit_bits >= 0); assert(limit_bits <= MAX_LIMIT_BITS); diff --git a/src/enc/vp8l.c b/src/enc/vp8l.c index 2c308577..14f71d67 100644 --- a/src/enc/vp8l.c +++ b/src/enc/vp8l.c @@ -1008,6 +1008,7 @@ static void ApplySubtractGreen(VP8LEncoder* const enc, int width, int height, static WebPEncodingError ApplyPredictFilter(const VP8LEncoder* const enc, int width, int height, int quality, int low_effort, + int used_subtract_green, VP8LBitWriter* const bw) { const int pred_bits = enc->transform_bits_; const int transform_width = VP8LSubSampleSize(width, pred_bits); @@ -1015,7 +1016,8 @@ static WebPEncodingError ApplyPredictFilter(const VP8LEncoder* const enc, VP8LResidualImage(width, height, pred_bits, low_effort, enc->argb_, enc->argb_scratch_, enc->transform_data_, - enc->config_->exact); + enc->config_->near_lossless, enc->config_->exact, + used_subtract_green); VP8LPutBits(bw, TRANSFORM_PRESENT, 1); VP8LPutBits(bw, PREDICTOR_TRANSFORM, 2); assert(pred_bits >= 2); @@ -1129,21 +1131,26 @@ static void ClearTransformBuffer(VP8LEncoder* const enc) { static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc, int width, int height) { WebPEncodingError err = VP8_ENC_OK; - const int tile_size = 1 << enc->transform_bits_; const uint64_t image_size = width * height; - // Ensure enough size for tiles, as well as for two scanlines and two - // extra pixels for CopyImageWithPrediction. + // VP8LResidualImage needs room for 2 scanlines of uint32 pixels with an extra + // pixel in each, plus 2 regular scanlines of bytes. + // TODO(skal): Clean up by using arithmetic in bytes instead of words. const uint64_t argb_scratch_size = - enc->use_predict_ ? tile_size * width + width + 2 : 0; - const int transform_data_size = + enc->use_predict_ + ? (width + 1) * 2 + + (width * 2 + sizeof(uint32_t) - 1) / sizeof(uint32_t) + : 0; + const uint64_t transform_data_size = (enc->use_predict_ || enc->use_cross_color_) ? VP8LSubSampleSize(width, enc->transform_bits_) * - VP8LSubSampleSize(height, enc->transform_bits_) + VP8LSubSampleSize(height, enc->transform_bits_) : 0; + const uint64_t max_alignment_in_words = + (WEBP_ALIGN_CST + sizeof(uint32_t) - 1) / sizeof(uint32_t); const uint64_t mem_size = - image_size + WEBP_ALIGN_CST + - argb_scratch_size + WEBP_ALIGN_CST + - (uint64_t)transform_data_size; + image_size + max_alignment_in_words + + argb_scratch_size + max_alignment_in_words + + transform_data_size; uint32_t* mem = enc->transform_mem_; if (mem == NULL || mem_size > enc->transform_mem_size_) { ClearTransformBuffer(enc); @@ -1412,7 +1419,8 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config, } // Apply near-lossless preprocessing. - use_near_lossless = !enc->use_palette_ && (config->near_lossless < 100); + use_near_lossless = + (config->near_lossless < 100) && !enc->use_palette_ && !enc->use_predict_; if (use_near_lossless) { if (!VP8ApplyNearLossless(width, height, picture->argb, config->near_lossless)) { @@ -1464,7 +1472,7 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config, if (enc->use_predict_) { err = ApplyPredictFilter(enc, enc->current_width_, height, quality, - low_effort, bw); + low_effort, enc->use_subtract_green_, bw); if (err != VP8_ENC_OK) goto Error; }