Libav 0.7.1
|
00001 /* 00002 * Copyright (C) 2003-2004 the ffmpeg project 00003 * 00004 * This file is part of Libav. 00005 * 00006 * Libav is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * Libav is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with Libav; if not, write to the Free Software 00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00019 */ 00020 00032 #include <stdio.h> 00033 #include <stdlib.h> 00034 #include <string.h> 00035 00036 #include "libavutil/imgutils.h" 00037 #include "avcodec.h" 00038 #include "dsputil.h" 00039 #include "get_bits.h" 00040 00041 #include "vp3data.h" 00042 #include "xiph.h" 00043 #include "thread.h" 00044 00045 #define FRAGMENT_PIXELS 8 00046 00047 static av_cold int vp3_decode_end(AVCodecContext *avctx); 00048 static void vp3_decode_flush(AVCodecContext *avctx); 00049 00050 //FIXME split things out into their own arrays 00051 typedef struct Vp3Fragment { 00052 int16_t dc; 00053 uint8_t coding_method; 00054 uint8_t qpi; 00055 } Vp3Fragment; 00056 00057 #define SB_NOT_CODED 0 00058 #define SB_PARTIALLY_CODED 1 00059 #define SB_FULLY_CODED 2 00060 00061 // This is the maximum length of a single long bit run that can be encoded 00062 // for superblock coding or block qps. Theora special-cases this to read a 00063 // bit instead of flipping the current bit to allow for runs longer than 4129. 00064 #define MAXIMUM_LONG_BIT_RUN 4129 00065 00066 #define MODE_INTER_NO_MV 0 00067 #define MODE_INTRA 1 00068 #define MODE_INTER_PLUS_MV 2 00069 #define MODE_INTER_LAST_MV 3 00070 #define MODE_INTER_PRIOR_LAST 4 00071 #define MODE_USING_GOLDEN 5 00072 #define MODE_GOLDEN_MV 6 00073 #define MODE_INTER_FOURMV 7 00074 #define CODING_MODE_COUNT 8 00075 00076 /* special internal mode */ 00077 #define MODE_COPY 8 00078 00079 /* There are 6 preset schemes, plus a free-form scheme */ 00080 static const int ModeAlphabet[6][CODING_MODE_COUNT] = 00081 { 00082 /* scheme 1: Last motion vector dominates */ 00083 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00084 MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, 00085 MODE_INTRA, MODE_USING_GOLDEN, 00086 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00087 00088 /* scheme 2 */ 00089 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00090 MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, 00091 MODE_INTRA, MODE_USING_GOLDEN, 00092 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00093 00094 /* scheme 3 */ 00095 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, 00096 MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, 00097 MODE_INTRA, MODE_USING_GOLDEN, 00098 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00099 00100 /* scheme 4 */ 00101 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, 00102 MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, 00103 MODE_INTRA, MODE_USING_GOLDEN, 00104 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00105 00106 /* scheme 5: No motion vector dominates */ 00107 { MODE_INTER_NO_MV, MODE_INTER_LAST_MV, 00108 MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, 00109 MODE_INTRA, MODE_USING_GOLDEN, 00110 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00111 00112 /* scheme 6 */ 00113 { MODE_INTER_NO_MV, MODE_USING_GOLDEN, 00114 MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00115 MODE_INTER_PLUS_MV, MODE_INTRA, 00116 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00117 00118 }; 00119 00120 static const uint8_t hilbert_offset[16][2] = { 00121 {0,0}, {1,0}, {1,1}, {0,1}, 00122 {0,2}, {0,3}, {1,3}, {1,2}, 00123 {2,2}, {2,3}, {3,3}, {3,2}, 00124 {3,1}, {2,1}, {2,0}, {3,0} 00125 }; 00126 00127 #define MIN_DEQUANT_VAL 2 00128 00129 typedef struct Vp3DecodeContext { 00130 AVCodecContext *avctx; 00131 int theora, theora_tables; 00132 int version; 00133 int width, height; 00134 int chroma_x_shift, chroma_y_shift; 00135 AVFrame golden_frame; 00136 AVFrame last_frame; 00137 AVFrame current_frame; 00138 int keyframe; 00139 DSPContext dsp; 00140 int flipped_image; 00141 int last_slice_end; 00142 int skip_loop_filter; 00143 00144 int qps[3]; 00145 int nqps; 00146 int last_qps[3]; 00147 00148 int superblock_count; 00149 int y_superblock_width; 00150 int y_superblock_height; 00151 int y_superblock_count; 00152 int c_superblock_width; 00153 int c_superblock_height; 00154 int c_superblock_count; 00155 int u_superblock_start; 00156 int v_superblock_start; 00157 unsigned char *superblock_coding; 00158 00159 int macroblock_count; 00160 int macroblock_width; 00161 int macroblock_height; 00162 00163 int fragment_count; 00164 int fragment_width[2]; 00165 int fragment_height[2]; 00166 00167 Vp3Fragment *all_fragments; 00168 int fragment_start[3]; 00169 int data_offset[3]; 00170 00171 int8_t (*motion_val[2])[2]; 00172 00173 ScanTable scantable; 00174 00175 /* tables */ 00176 uint16_t coded_dc_scale_factor[64]; 00177 uint32_t coded_ac_scale_factor[64]; 00178 uint8_t base_matrix[384][64]; 00179 uint8_t qr_count[2][3]; 00180 uint8_t qr_size [2][3][64]; 00181 uint16_t qr_base[2][3][64]; 00182 00200 int16_t *dct_tokens[3][64]; 00201 int16_t *dct_tokens_base; 00202 #define TOKEN_EOB(eob_run) ((eob_run) << 2) 00203 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1) 00204 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2) 00205 00209 int num_coded_frags[3][64]; 00210 int total_num_coded_frags; 00211 00212 /* this is a list of indexes into the all_fragments array indicating 00213 * which of the fragments are coded */ 00214 int *coded_fragment_list[3]; 00215 00216 VLC dc_vlc[16]; 00217 VLC ac_vlc_1[16]; 00218 VLC ac_vlc_2[16]; 00219 VLC ac_vlc_3[16]; 00220 VLC ac_vlc_4[16]; 00221 00222 VLC superblock_run_length_vlc; 00223 VLC fragment_run_length_vlc; 00224 VLC mode_code_vlc; 00225 VLC motion_vector_vlc; 00226 00227 /* these arrays need to be on 16-byte boundaries since SSE2 operations 00228 * index into them */ 00229 DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; //<qmat[qpi][is_inter][plane] 00230 00231 /* This table contains superblock_count * 16 entries. Each set of 16 00232 * numbers corresponds to the fragment indexes 0..15 of the superblock. 00233 * An entry will be -1 to indicate that no entry corresponds to that 00234 * index. */ 00235 int *superblock_fragments; 00236 00237 /* This is an array that indicates how a particular macroblock 00238 * is coded. */ 00239 unsigned char *macroblock_coding; 00240 00241 uint8_t *edge_emu_buffer; 00242 00243 /* Huffman decode */ 00244 int hti; 00245 unsigned int hbits; 00246 int entries; 00247 int huff_code_size; 00248 uint32_t huffman_table[80][32][2]; 00249 00250 uint8_t filter_limit_values[64]; 00251 DECLARE_ALIGNED(8, int, bounding_values_array)[256+2]; 00252 } Vp3DecodeContext; 00253 00254 /************************************************************************ 00255 * VP3 specific functions 00256 ************************************************************************/ 00257 00258 /* 00259 * This function sets up all of the various blocks mappings: 00260 * superblocks <-> fragments, macroblocks <-> fragments, 00261 * superblocks <-> macroblocks 00262 * 00263 * @return 0 is successful; returns 1 if *anything* went wrong. 00264 */ 00265 static int init_block_mapping(Vp3DecodeContext *s) 00266 { 00267 int sb_x, sb_y, plane; 00268 int x, y, i, j = 0; 00269 00270 for (plane = 0; plane < 3; plane++) { 00271 int sb_width = plane ? s->c_superblock_width : s->y_superblock_width; 00272 int sb_height = plane ? s->c_superblock_height : s->y_superblock_height; 00273 int frag_width = s->fragment_width[!!plane]; 00274 int frag_height = s->fragment_height[!!plane]; 00275 00276 for (sb_y = 0; sb_y < sb_height; sb_y++) 00277 for (sb_x = 0; sb_x < sb_width; sb_x++) 00278 for (i = 0; i < 16; i++) { 00279 x = 4*sb_x + hilbert_offset[i][0]; 00280 y = 4*sb_y + hilbert_offset[i][1]; 00281 00282 if (x < frag_width && y < frag_height) 00283 s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; 00284 else 00285 s->superblock_fragments[j++] = -1; 00286 } 00287 } 00288 00289 return 0; /* successful path out */ 00290 } 00291 00292 /* 00293 * This function sets up the dequantization tables used for a particular 00294 * frame. 00295 */ 00296 static void init_dequantizer(Vp3DecodeContext *s, int qpi) 00297 { 00298 int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]]; 00299 int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]]; 00300 int i, plane, inter, qri, bmi, bmj, qistart; 00301 00302 for(inter=0; inter<2; inter++){ 00303 for(plane=0; plane<3; plane++){ 00304 int sum=0; 00305 for(qri=0; qri<s->qr_count[inter][plane]; qri++){ 00306 sum+= s->qr_size[inter][plane][qri]; 00307 if(s->qps[qpi] <= sum) 00308 break; 00309 } 00310 qistart= sum - s->qr_size[inter][plane][qri]; 00311 bmi= s->qr_base[inter][plane][qri ]; 00312 bmj= s->qr_base[inter][plane][qri+1]; 00313 for(i=0; i<64; i++){ 00314 int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i] 00315 - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i] 00316 + s->qr_size[inter][plane][qri]) 00317 / (2*s->qr_size[inter][plane][qri]); 00318 00319 int qmin= 8<<(inter + !i); 00320 int qscale= i ? ac_scale_factor : dc_scale_factor; 00321 00322 s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); 00323 } 00324 // all DC coefficients use the same quant so as not to interfere with DC prediction 00325 s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; 00326 } 00327 } 00328 } 00329 00330 /* 00331 * This function initializes the loop filter boundary limits if the frame's 00332 * quality index is different from the previous frame's. 00333 * 00334 * The filter_limit_values may not be larger than 127. 00335 */ 00336 static void init_loop_filter(Vp3DecodeContext *s) 00337 { 00338 int *bounding_values= s->bounding_values_array+127; 00339 int filter_limit; 00340 int x; 00341 int value; 00342 00343 filter_limit = s->filter_limit_values[s->qps[0]]; 00344 00345 /* set up the bounding values */ 00346 memset(s->bounding_values_array, 0, 256 * sizeof(int)); 00347 for (x = 0; x < filter_limit; x++) { 00348 bounding_values[-x] = -x; 00349 bounding_values[x] = x; 00350 } 00351 for (x = value = filter_limit; x < 128 && value; x++, value--) { 00352 bounding_values[ x] = value; 00353 bounding_values[-x] = -value; 00354 } 00355 if (value) 00356 bounding_values[128] = value; 00357 bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; 00358 } 00359 00360 /* 00361 * This function unpacks all of the superblock/macroblock/fragment coding 00362 * information from the bitstream. 00363 */ 00364 static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) 00365 { 00366 int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; 00367 int bit = 0; 00368 int current_superblock = 0; 00369 int current_run = 0; 00370 int num_partial_superblocks = 0; 00371 00372 int i, j; 00373 int current_fragment; 00374 int plane; 00375 00376 if (s->keyframe) { 00377 memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); 00378 00379 } else { 00380 00381 /* unpack the list of partially-coded superblocks */ 00382 bit = get_bits1(gb) ^ 1; 00383 current_run = 0; 00384 00385 while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { 00386 if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) 00387 bit = get_bits1(gb); 00388 else 00389 bit ^= 1; 00390 00391 current_run = get_vlc2(gb, 00392 s->superblock_run_length_vlc.table, 6, 2) + 1; 00393 if (current_run == 34) 00394 current_run += get_bits(gb, 12); 00395 00396 if (current_superblock + current_run > s->superblock_count) { 00397 av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n"); 00398 return -1; 00399 } 00400 00401 memset(s->superblock_coding + current_superblock, bit, current_run); 00402 00403 current_superblock += current_run; 00404 if (bit) 00405 num_partial_superblocks += current_run; 00406 } 00407 00408 /* unpack the list of fully coded superblocks if any of the blocks were 00409 * not marked as partially coded in the previous step */ 00410 if (num_partial_superblocks < s->superblock_count) { 00411 int superblocks_decoded = 0; 00412 00413 current_superblock = 0; 00414 bit = get_bits1(gb) ^ 1; 00415 current_run = 0; 00416 00417 while (superblocks_decoded < s->superblock_count - num_partial_superblocks 00418 && get_bits_left(gb) > 0) { 00419 00420 if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) 00421 bit = get_bits1(gb); 00422 else 00423 bit ^= 1; 00424 00425 current_run = get_vlc2(gb, 00426 s->superblock_run_length_vlc.table, 6, 2) + 1; 00427 if (current_run == 34) 00428 current_run += get_bits(gb, 12); 00429 00430 for (j = 0; j < current_run; current_superblock++) { 00431 if (current_superblock >= s->superblock_count) { 00432 av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n"); 00433 return -1; 00434 } 00435 00436 /* skip any superblocks already marked as partially coded */ 00437 if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { 00438 s->superblock_coding[current_superblock] = 2*bit; 00439 j++; 00440 } 00441 } 00442 superblocks_decoded += current_run; 00443 } 00444 } 00445 00446 /* if there were partial blocks, initialize bitstream for 00447 * unpacking fragment codings */ 00448 if (num_partial_superblocks) { 00449 00450 current_run = 0; 00451 bit = get_bits1(gb); 00452 /* toggle the bit because as soon as the first run length is 00453 * fetched the bit will be toggled again */ 00454 bit ^= 1; 00455 } 00456 } 00457 00458 /* figure out which fragments are coded; iterate through each 00459 * superblock (all planes) */ 00460 s->total_num_coded_frags = 0; 00461 memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); 00462 00463 for (plane = 0; plane < 3; plane++) { 00464 int sb_start = superblock_starts[plane]; 00465 int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); 00466 int num_coded_frags = 0; 00467 00468 for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { 00469 00470 /* iterate through all 16 fragments in a superblock */ 00471 for (j = 0; j < 16; j++) { 00472 00473 /* if the fragment is in bounds, check its coding status */ 00474 current_fragment = s->superblock_fragments[i * 16 + j]; 00475 if (current_fragment != -1) { 00476 int coded = s->superblock_coding[i]; 00477 00478 if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { 00479 00480 /* fragment may or may not be coded; this is the case 00481 * that cares about the fragment coding runs */ 00482 if (current_run-- == 0) { 00483 bit ^= 1; 00484 current_run = get_vlc2(gb, 00485 s->fragment_run_length_vlc.table, 5, 2); 00486 } 00487 coded = bit; 00488 } 00489 00490 if (coded) { 00491 /* default mode; actual mode will be decoded in 00492 * the next phase */ 00493 s->all_fragments[current_fragment].coding_method = 00494 MODE_INTER_NO_MV; 00495 s->coded_fragment_list[plane][num_coded_frags++] = 00496 current_fragment; 00497 } else { 00498 /* not coded; copy this fragment from the prior frame */ 00499 s->all_fragments[current_fragment].coding_method = 00500 MODE_COPY; 00501 } 00502 } 00503 } 00504 } 00505 s->total_num_coded_frags += num_coded_frags; 00506 for (i = 0; i < 64; i++) 00507 s->num_coded_frags[plane][i] = num_coded_frags; 00508 if (plane < 2) 00509 s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags; 00510 } 00511 return 0; 00512 } 00513 00514 /* 00515 * This function unpacks all the coding mode data for individual macroblocks 00516 * from the bitstream. 00517 */ 00518 static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) 00519 { 00520 int i, j, k, sb_x, sb_y; 00521 int scheme; 00522 int current_macroblock; 00523 int current_fragment; 00524 int coding_mode; 00525 int custom_mode_alphabet[CODING_MODE_COUNT]; 00526 const int *alphabet; 00527 Vp3Fragment *frag; 00528 00529 if (s->keyframe) { 00530 for (i = 0; i < s->fragment_count; i++) 00531 s->all_fragments[i].coding_method = MODE_INTRA; 00532 00533 } else { 00534 00535 /* fetch the mode coding scheme for this frame */ 00536 scheme = get_bits(gb, 3); 00537 00538 /* is it a custom coding scheme? */ 00539 if (scheme == 0) { 00540 for (i = 0; i < 8; i++) 00541 custom_mode_alphabet[i] = MODE_INTER_NO_MV; 00542 for (i = 0; i < 8; i++) 00543 custom_mode_alphabet[get_bits(gb, 3)] = i; 00544 alphabet = custom_mode_alphabet; 00545 } else 00546 alphabet = ModeAlphabet[scheme-1]; 00547 00548 /* iterate through all of the macroblocks that contain 1 or more 00549 * coded fragments */ 00550 for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { 00551 for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { 00552 if (get_bits_left(gb) <= 0) 00553 return -1; 00554 00555 for (j = 0; j < 4; j++) { 00556 int mb_x = 2*sb_x + (j>>1); 00557 int mb_y = 2*sb_y + (((j>>1)+j)&1); 00558 current_macroblock = mb_y * s->macroblock_width + mb_x; 00559 00560 if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height) 00561 continue; 00562 00563 #define BLOCK_X (2*mb_x + (k&1)) 00564 #define BLOCK_Y (2*mb_y + (k>>1)) 00565 /* coding modes are only stored if the macroblock has at least one 00566 * luma block coded, otherwise it must be INTER_NO_MV */ 00567 for (k = 0; k < 4; k++) { 00568 current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00569 if (s->all_fragments[current_fragment].coding_method != MODE_COPY) 00570 break; 00571 } 00572 if (k == 4) { 00573 s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; 00574 continue; 00575 } 00576 00577 /* mode 7 means get 3 bits for each coding mode */ 00578 if (scheme == 7) 00579 coding_mode = get_bits(gb, 3); 00580 else 00581 coding_mode = alphabet 00582 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; 00583 00584 s->macroblock_coding[current_macroblock] = coding_mode; 00585 for (k = 0; k < 4; k++) { 00586 frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00587 if (frag->coding_method != MODE_COPY) 00588 frag->coding_method = coding_mode; 00589 } 00590 00591 #define SET_CHROMA_MODES \ 00592 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ 00593 frag[s->fragment_start[1]].coding_method = coding_mode;\ 00594 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ 00595 frag[s->fragment_start[2]].coding_method = coding_mode; 00596 00597 if (s->chroma_y_shift) { 00598 frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x; 00599 SET_CHROMA_MODES 00600 } else if (s->chroma_x_shift) { 00601 frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; 00602 for (k = 0; k < 2; k++) { 00603 SET_CHROMA_MODES 00604 frag += s->fragment_width[1]; 00605 } 00606 } else { 00607 for (k = 0; k < 4; k++) { 00608 frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X; 00609 SET_CHROMA_MODES 00610 } 00611 } 00612 } 00613 } 00614 } 00615 } 00616 00617 return 0; 00618 } 00619 00620 /* 00621 * This function unpacks all the motion vectors for the individual 00622 * macroblocks from the bitstream. 00623 */ 00624 static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) 00625 { 00626 int j, k, sb_x, sb_y; 00627 int coding_mode; 00628 int motion_x[4]; 00629 int motion_y[4]; 00630 int last_motion_x = 0; 00631 int last_motion_y = 0; 00632 int prior_last_motion_x = 0; 00633 int prior_last_motion_y = 0; 00634 int current_macroblock; 00635 int current_fragment; 00636 int frag; 00637 00638 if (s->keyframe) 00639 return 0; 00640 00641 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ 00642 coding_mode = get_bits1(gb); 00643 00644 /* iterate through all of the macroblocks that contain 1 or more 00645 * coded fragments */ 00646 for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { 00647 for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { 00648 if (get_bits_left(gb) <= 0) 00649 return -1; 00650 00651 for (j = 0; j < 4; j++) { 00652 int mb_x = 2*sb_x + (j>>1); 00653 int mb_y = 2*sb_y + (((j>>1)+j)&1); 00654 current_macroblock = mb_y * s->macroblock_width + mb_x; 00655 00656 if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height || 00657 (s->macroblock_coding[current_macroblock] == MODE_COPY)) 00658 continue; 00659 00660 switch (s->macroblock_coding[current_macroblock]) { 00661 00662 case MODE_INTER_PLUS_MV: 00663 case MODE_GOLDEN_MV: 00664 /* all 6 fragments use the same motion vector */ 00665 if (coding_mode == 0) { 00666 motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00667 motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00668 } else { 00669 motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; 00670 motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; 00671 } 00672 00673 /* vector maintenance, only on MODE_INTER_PLUS_MV */ 00674 if (s->macroblock_coding[current_macroblock] == 00675 MODE_INTER_PLUS_MV) { 00676 prior_last_motion_x = last_motion_x; 00677 prior_last_motion_y = last_motion_y; 00678 last_motion_x = motion_x[0]; 00679 last_motion_y = motion_y[0]; 00680 } 00681 break; 00682 00683 case MODE_INTER_FOURMV: 00684 /* vector maintenance */ 00685 prior_last_motion_x = last_motion_x; 00686 prior_last_motion_y = last_motion_y; 00687 00688 /* fetch 4 vectors from the bitstream, one for each 00689 * Y fragment, then average for the C fragment vectors */ 00690 for (k = 0; k < 4; k++) { 00691 current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00692 if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { 00693 if (coding_mode == 0) { 00694 motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00695 motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00696 } else { 00697 motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)]; 00698 motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)]; 00699 } 00700 last_motion_x = motion_x[k]; 00701 last_motion_y = motion_y[k]; 00702 } else { 00703 motion_x[k] = 0; 00704 motion_y[k] = 0; 00705 } 00706 } 00707 break; 00708 00709 case MODE_INTER_LAST_MV: 00710 /* all 6 fragments use the last motion vector */ 00711 motion_x[0] = last_motion_x; 00712 motion_y[0] = last_motion_y; 00713 00714 /* no vector maintenance (last vector remains the 00715 * last vector) */ 00716 break; 00717 00718 case MODE_INTER_PRIOR_LAST: 00719 /* all 6 fragments use the motion vector prior to the 00720 * last motion vector */ 00721 motion_x[0] = prior_last_motion_x; 00722 motion_y[0] = prior_last_motion_y; 00723 00724 /* vector maintenance */ 00725 prior_last_motion_x = last_motion_x; 00726 prior_last_motion_y = last_motion_y; 00727 last_motion_x = motion_x[0]; 00728 last_motion_y = motion_y[0]; 00729 break; 00730 00731 default: 00732 /* covers intra, inter without MV, golden without MV */ 00733 motion_x[0] = 0; 00734 motion_y[0] = 0; 00735 00736 /* no vector maintenance */ 00737 break; 00738 } 00739 00740 /* assign the motion vectors to the correct fragments */ 00741 for (k = 0; k < 4; k++) { 00742 current_fragment = 00743 BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00744 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00745 s->motion_val[0][current_fragment][0] = motion_x[k]; 00746 s->motion_val[0][current_fragment][1] = motion_y[k]; 00747 } else { 00748 s->motion_val[0][current_fragment][0] = motion_x[0]; 00749 s->motion_val[0][current_fragment][1] = motion_y[0]; 00750 } 00751 } 00752 00753 if (s->chroma_y_shift) { 00754 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00755 motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); 00756 motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); 00757 } 00758 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); 00759 motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); 00760 frag = mb_y*s->fragment_width[1] + mb_x; 00761 s->motion_val[1][frag][0] = motion_x[0]; 00762 s->motion_val[1][frag][1] = motion_y[0]; 00763 } else if (s->chroma_x_shift) { 00764 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00765 motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); 00766 motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); 00767 motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); 00768 motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); 00769 } else { 00770 motion_x[1] = motion_x[0]; 00771 motion_y[1] = motion_y[0]; 00772 } 00773 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); 00774 motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); 00775 00776 frag = 2*mb_y*s->fragment_width[1] + mb_x; 00777 for (k = 0; k < 2; k++) { 00778 s->motion_val[1][frag][0] = motion_x[k]; 00779 s->motion_val[1][frag][1] = motion_y[k]; 00780 frag += s->fragment_width[1]; 00781 } 00782 } else { 00783 for (k = 0; k < 4; k++) { 00784 frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X; 00785 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00786 s->motion_val[1][frag][0] = motion_x[k]; 00787 s->motion_val[1][frag][1] = motion_y[k]; 00788 } else { 00789 s->motion_val[1][frag][0] = motion_x[0]; 00790 s->motion_val[1][frag][1] = motion_y[0]; 00791 } 00792 } 00793 } 00794 } 00795 } 00796 } 00797 00798 return 0; 00799 } 00800 00801 static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) 00802 { 00803 int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi; 00804 int num_blocks = s->total_num_coded_frags; 00805 00806 for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { 00807 i = blocks_decoded = num_blocks_at_qpi = 0; 00808 00809 bit = get_bits1(gb) ^ 1; 00810 run_length = 0; 00811 00812 do { 00813 if (run_length == MAXIMUM_LONG_BIT_RUN) 00814 bit = get_bits1(gb); 00815 else 00816 bit ^= 1; 00817 00818 run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; 00819 if (run_length == 34) 00820 run_length += get_bits(gb, 12); 00821 blocks_decoded += run_length; 00822 00823 if (!bit) 00824 num_blocks_at_qpi += run_length; 00825 00826 for (j = 0; j < run_length; i++) { 00827 if (i >= s->total_num_coded_frags) 00828 return -1; 00829 00830 if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { 00831 s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit; 00832 j++; 00833 } 00834 } 00835 } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); 00836 00837 num_blocks -= num_blocks_at_qpi; 00838 } 00839 00840 return 0; 00841 } 00842 00843 /* 00844 * This function is called by unpack_dct_coeffs() to extract the VLCs from 00845 * the bitstream. The VLCs encode tokens which are used to unpack DCT 00846 * data. This function unpacks all the VLCs for either the Y plane or both 00847 * C planes, and is called for DC coefficients or different AC coefficient 00848 * levels (since different coefficient types require different VLC tables. 00849 * 00850 * This function returns a residual eob run. E.g, if a particular token gave 00851 * instructions to EOB the next 5 fragments and there were only 2 fragments 00852 * left in the current fragment range, 3 would be returned so that it could 00853 * be passed into the next call to this same function. 00854 */ 00855 static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, 00856 VLC *table, int coeff_index, 00857 int plane, 00858 int eob_run) 00859 { 00860 int i, j = 0; 00861 int token; 00862 int zero_run = 0; 00863 DCTELEM coeff = 0; 00864 int bits_to_get; 00865 int blocks_ended; 00866 int coeff_i = 0; 00867 int num_coeffs = s->num_coded_frags[plane][coeff_index]; 00868 int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; 00869 00870 /* local references to structure members to avoid repeated deferences */ 00871 int *coded_fragment_list = s->coded_fragment_list[plane]; 00872 Vp3Fragment *all_fragments = s->all_fragments; 00873 VLC_TYPE (*vlc_table)[2] = table->table; 00874 00875 if (num_coeffs < 0) 00876 av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index); 00877 00878 if (eob_run > num_coeffs) { 00879 coeff_i = blocks_ended = num_coeffs; 00880 eob_run -= num_coeffs; 00881 } else { 00882 coeff_i = blocks_ended = eob_run; 00883 eob_run = 0; 00884 } 00885 00886 // insert fake EOB token to cover the split between planes or zzi 00887 if (blocks_ended) 00888 dct_tokens[j++] = blocks_ended << 2; 00889 00890 while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { 00891 /* decode a VLC into a token */ 00892 token = get_vlc2(gb, vlc_table, 11, 3); 00893 /* use the token to get a zero run, a coefficient, and an eob run */ 00894 if ((unsigned) token <= 6U) { 00895 eob_run = eob_run_base[token]; 00896 if (eob_run_get_bits[token]) 00897 eob_run += get_bits(gb, eob_run_get_bits[token]); 00898 00899 // record only the number of blocks ended in this plane, 00900 // any spill will be recorded in the next plane. 00901 if (eob_run > num_coeffs - coeff_i) { 00902 dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); 00903 blocks_ended += num_coeffs - coeff_i; 00904 eob_run -= num_coeffs - coeff_i; 00905 coeff_i = num_coeffs; 00906 } else { 00907 dct_tokens[j++] = TOKEN_EOB(eob_run); 00908 blocks_ended += eob_run; 00909 coeff_i += eob_run; 00910 eob_run = 0; 00911 } 00912 } else if (token >= 0) { 00913 bits_to_get = coeff_get_bits[token]; 00914 if (bits_to_get) 00915 bits_to_get = get_bits(gb, bits_to_get); 00916 coeff = coeff_tables[token][bits_to_get]; 00917 00918 zero_run = zero_run_base[token]; 00919 if (zero_run_get_bits[token]) 00920 zero_run += get_bits(gb, zero_run_get_bits[token]); 00921 00922 if (zero_run) { 00923 dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); 00924 } else { 00925 // Save DC into the fragment structure. DC prediction is 00926 // done in raster order, so the actual DC can't be in with 00927 // other tokens. We still need the token in dct_tokens[] 00928 // however, or else the structure collapses on itself. 00929 if (!coeff_index) 00930 all_fragments[coded_fragment_list[coeff_i]].dc = coeff; 00931 00932 dct_tokens[j++] = TOKEN_COEFF(coeff); 00933 } 00934 00935 if (coeff_index + zero_run > 64) { 00936 av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" 00937 " %d coeffs left\n", zero_run, 64-coeff_index); 00938 zero_run = 64 - coeff_index; 00939 } 00940 00941 // zero runs code multiple coefficients, 00942 // so don't try to decode coeffs for those higher levels 00943 for (i = coeff_index+1; i <= coeff_index+zero_run; i++) 00944 s->num_coded_frags[plane][i]--; 00945 coeff_i++; 00946 } else { 00947 av_log(s->avctx, AV_LOG_ERROR, 00948 "Invalid token %d\n", token); 00949 return -1; 00950 } 00951 } 00952 00953 if (blocks_ended > s->num_coded_frags[plane][coeff_index]) 00954 av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); 00955 00956 // decrement the number of blocks that have higher coeffecients for each 00957 // EOB run at this level 00958 if (blocks_ended) 00959 for (i = coeff_index+1; i < 64; i++) 00960 s->num_coded_frags[plane][i] -= blocks_ended; 00961 00962 // setup the next buffer 00963 if (plane < 2) 00964 s->dct_tokens[plane+1][coeff_index] = dct_tokens + j; 00965 else if (coeff_index < 63) 00966 s->dct_tokens[0][coeff_index+1] = dct_tokens + j; 00967 00968 return eob_run; 00969 } 00970 00971 static void reverse_dc_prediction(Vp3DecodeContext *s, 00972 int first_fragment, 00973 int fragment_width, 00974 int fragment_height); 00975 /* 00976 * This function unpacks all of the DCT coefficient data from the 00977 * bitstream. 00978 */ 00979 static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) 00980 { 00981 int i; 00982 int dc_y_table; 00983 int dc_c_table; 00984 int ac_y_table; 00985 int ac_c_table; 00986 int residual_eob_run = 0; 00987 VLC *y_tables[64]; 00988 VLC *c_tables[64]; 00989 00990 s->dct_tokens[0][0] = s->dct_tokens_base; 00991 00992 /* fetch the DC table indexes */ 00993 dc_y_table = get_bits(gb, 4); 00994 dc_c_table = get_bits(gb, 4); 00995 00996 /* unpack the Y plane DC coefficients */ 00997 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 00998 0, residual_eob_run); 00999 if (residual_eob_run < 0) 01000 return residual_eob_run; 01001 01002 /* reverse prediction of the Y-plane DC coefficients */ 01003 reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); 01004 01005 /* unpack the C plane DC coefficients */ 01006 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 01007 1, residual_eob_run); 01008 if (residual_eob_run < 0) 01009 return residual_eob_run; 01010 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 01011 2, residual_eob_run); 01012 if (residual_eob_run < 0) 01013 return residual_eob_run; 01014 01015 /* reverse prediction of the C-plane DC coefficients */ 01016 if (!(s->avctx->flags & CODEC_FLAG_GRAY)) 01017 { 01018 reverse_dc_prediction(s, s->fragment_start[1], 01019 s->fragment_width[1], s->fragment_height[1]); 01020 reverse_dc_prediction(s, s->fragment_start[2], 01021 s->fragment_width[1], s->fragment_height[1]); 01022 } 01023 01024 /* fetch the AC table indexes */ 01025 ac_y_table = get_bits(gb, 4); 01026 ac_c_table = get_bits(gb, 4); 01027 01028 /* build tables of AC VLC tables */ 01029 for (i = 1; i <= 5; i++) { 01030 y_tables[i] = &s->ac_vlc_1[ac_y_table]; 01031 c_tables[i] = &s->ac_vlc_1[ac_c_table]; 01032 } 01033 for (i = 6; i <= 14; i++) { 01034 y_tables[i] = &s->ac_vlc_2[ac_y_table]; 01035 c_tables[i] = &s->ac_vlc_2[ac_c_table]; 01036 } 01037 for (i = 15; i <= 27; i++) { 01038 y_tables[i] = &s->ac_vlc_3[ac_y_table]; 01039 c_tables[i] = &s->ac_vlc_3[ac_c_table]; 01040 } 01041 for (i = 28; i <= 63; i++) { 01042 y_tables[i] = &s->ac_vlc_4[ac_y_table]; 01043 c_tables[i] = &s->ac_vlc_4[ac_c_table]; 01044 } 01045 01046 /* decode all AC coefficents */ 01047 for (i = 1; i <= 63; i++) { 01048 residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, 01049 0, residual_eob_run); 01050 if (residual_eob_run < 0) 01051 return residual_eob_run; 01052 01053 residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 01054 1, residual_eob_run); 01055 if (residual_eob_run < 0) 01056 return residual_eob_run; 01057 residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 01058 2, residual_eob_run); 01059 if (residual_eob_run < 0) 01060 return residual_eob_run; 01061 } 01062 01063 return 0; 01064 } 01065 01066 /* 01067 * This function reverses the DC prediction for each coded fragment in 01068 * the frame. Much of this function is adapted directly from the original 01069 * VP3 source code. 01070 */ 01071 #define COMPATIBLE_FRAME(x) \ 01072 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) 01073 #define DC_COEFF(u) s->all_fragments[u].dc 01074 01075 static void reverse_dc_prediction(Vp3DecodeContext *s, 01076 int first_fragment, 01077 int fragment_width, 01078 int fragment_height) 01079 { 01080 01081 #define PUL 8 01082 #define PU 4 01083 #define PUR 2 01084 #define PL 1 01085 01086 int x, y; 01087 int i = first_fragment; 01088 01089 int predicted_dc; 01090 01091 /* DC values for the left, up-left, up, and up-right fragments */ 01092 int vl, vul, vu, vur; 01093 01094 /* indexes for the left, up-left, up, and up-right fragments */ 01095 int l, ul, u, ur; 01096 01097 /* 01098 * The 6 fields mean: 01099 * 0: up-left multiplier 01100 * 1: up multiplier 01101 * 2: up-right multiplier 01102 * 3: left multiplier 01103 */ 01104 static const int predictor_transform[16][4] = { 01105 { 0, 0, 0, 0}, 01106 { 0, 0, 0,128}, // PL 01107 { 0, 0,128, 0}, // PUR 01108 { 0, 0, 53, 75}, // PUR|PL 01109 { 0,128, 0, 0}, // PU 01110 { 0, 64, 0, 64}, // PU|PL 01111 { 0,128, 0, 0}, // PU|PUR 01112 { 0, 0, 53, 75}, // PU|PUR|PL 01113 {128, 0, 0, 0}, // PUL 01114 { 0, 0, 0,128}, // PUL|PL 01115 { 64, 0, 64, 0}, // PUL|PUR 01116 { 0, 0, 53, 75}, // PUL|PUR|PL 01117 { 0,128, 0, 0}, // PUL|PU 01118 {-104,116, 0,116}, // PUL|PU|PL 01119 { 24, 80, 24, 0}, // PUL|PU|PUR 01120 {-104,116, 0,116} // PUL|PU|PUR|PL 01121 }; 01122 01123 /* This table shows which types of blocks can use other blocks for 01124 * prediction. For example, INTRA is the only mode in this table to 01125 * have a frame number of 0. That means INTRA blocks can only predict 01126 * from other INTRA blocks. There are 2 golden frame coding types; 01127 * blocks encoding in these modes can only predict from other blocks 01128 * that were encoded with these 1 of these 2 modes. */ 01129 static const unsigned char compatible_frame[9] = { 01130 1, /* MODE_INTER_NO_MV */ 01131 0, /* MODE_INTRA */ 01132 1, /* MODE_INTER_PLUS_MV */ 01133 1, /* MODE_INTER_LAST_MV */ 01134 1, /* MODE_INTER_PRIOR_MV */ 01135 2, /* MODE_USING_GOLDEN */ 01136 2, /* MODE_GOLDEN_MV */ 01137 1, /* MODE_INTER_FOUR_MV */ 01138 3 /* MODE_COPY */ 01139 }; 01140 int current_frame_type; 01141 01142 /* there is a last DC predictor for each of the 3 frame types */ 01143 short last_dc[3]; 01144 01145 int transform = 0; 01146 01147 vul = vu = vur = vl = 0; 01148 last_dc[0] = last_dc[1] = last_dc[2] = 0; 01149 01150 /* for each fragment row... */ 01151 for (y = 0; y < fragment_height; y++) { 01152 01153 /* for each fragment in a row... */ 01154 for (x = 0; x < fragment_width; x++, i++) { 01155 01156 /* reverse prediction if this block was coded */ 01157 if (s->all_fragments[i].coding_method != MODE_COPY) { 01158 01159 current_frame_type = 01160 compatible_frame[s->all_fragments[i].coding_method]; 01161 01162 transform= 0; 01163 if(x){ 01164 l= i-1; 01165 vl = DC_COEFF(l); 01166 if(COMPATIBLE_FRAME(l)) 01167 transform |= PL; 01168 } 01169 if(y){ 01170 u= i-fragment_width; 01171 vu = DC_COEFF(u); 01172 if(COMPATIBLE_FRAME(u)) 01173 transform |= PU; 01174 if(x){ 01175 ul= i-fragment_width-1; 01176 vul = DC_COEFF(ul); 01177 if(COMPATIBLE_FRAME(ul)) 01178 transform |= PUL; 01179 } 01180 if(x + 1 < fragment_width){ 01181 ur= i-fragment_width+1; 01182 vur = DC_COEFF(ur); 01183 if(COMPATIBLE_FRAME(ur)) 01184 transform |= PUR; 01185 } 01186 } 01187 01188 if (transform == 0) { 01189 01190 /* if there were no fragments to predict from, use last 01191 * DC saved */ 01192 predicted_dc = last_dc[current_frame_type]; 01193 } else { 01194 01195 /* apply the appropriate predictor transform */ 01196 predicted_dc = 01197 (predictor_transform[transform][0] * vul) + 01198 (predictor_transform[transform][1] * vu) + 01199 (predictor_transform[transform][2] * vur) + 01200 (predictor_transform[transform][3] * vl); 01201 01202 predicted_dc /= 128; 01203 01204 /* check for outranging on the [ul u l] and 01205 * [ul u ur l] predictors */ 01206 if ((transform == 15) || (transform == 13)) { 01207 if (FFABS(predicted_dc - vu) > 128) 01208 predicted_dc = vu; 01209 else if (FFABS(predicted_dc - vl) > 128) 01210 predicted_dc = vl; 01211 else if (FFABS(predicted_dc - vul) > 128) 01212 predicted_dc = vul; 01213 } 01214 } 01215 01216 /* at long last, apply the predictor */ 01217 DC_COEFF(i) += predicted_dc; 01218 /* save the DC */ 01219 last_dc[current_frame_type] = DC_COEFF(i); 01220 } 01221 } 01222 } 01223 } 01224 01225 static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend) 01226 { 01227 int x, y; 01228 int *bounding_values= s->bounding_values_array+127; 01229 01230 int width = s->fragment_width[!!plane]; 01231 int height = s->fragment_height[!!plane]; 01232 int fragment = s->fragment_start [plane] + ystart * width; 01233 int stride = s->current_frame.linesize[plane]; 01234 uint8_t *plane_data = s->current_frame.data [plane]; 01235 if (!s->flipped_image) stride = -stride; 01236 plane_data += s->data_offset[plane] + 8*ystart*stride; 01237 01238 for (y = ystart; y < yend; y++) { 01239 01240 for (x = 0; x < width; x++) { 01241 /* This code basically just deblocks on the edges of coded blocks. 01242 * However, it has to be much more complicated because of the 01243 * braindamaged deblock ordering used in VP3/Theora. Order matters 01244 * because some pixels get filtered twice. */ 01245 if( s->all_fragments[fragment].coding_method != MODE_COPY ) 01246 { 01247 /* do not perform left edge filter for left columns frags */ 01248 if (x > 0) { 01249 s->dsp.vp3_h_loop_filter( 01250 plane_data + 8*x, 01251 stride, bounding_values); 01252 } 01253 01254 /* do not perform top edge filter for top row fragments */ 01255 if (y > 0) { 01256 s->dsp.vp3_v_loop_filter( 01257 plane_data + 8*x, 01258 stride, bounding_values); 01259 } 01260 01261 /* do not perform right edge filter for right column 01262 * fragments or if right fragment neighbor is also coded 01263 * in this frame (it will be filtered in next iteration) */ 01264 if ((x < width - 1) && 01265 (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { 01266 s->dsp.vp3_h_loop_filter( 01267 plane_data + 8*x + 8, 01268 stride, bounding_values); 01269 } 01270 01271 /* do not perform bottom edge filter for bottom row 01272 * fragments or if bottom fragment neighbor is also coded 01273 * in this frame (it will be filtered in the next row) */ 01274 if ((y < height - 1) && 01275 (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { 01276 s->dsp.vp3_v_loop_filter( 01277 plane_data + 8*x + 8*stride, 01278 stride, bounding_values); 01279 } 01280 } 01281 01282 fragment++; 01283 } 01284 plane_data += 8*stride; 01285 } 01286 } 01287 01292 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, 01293 int plane, int inter, DCTELEM block[64]) 01294 { 01295 int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; 01296 uint8_t *perm = s->scantable.permutated; 01297 int i = 0; 01298 01299 do { 01300 int token = *s->dct_tokens[plane][i]; 01301 switch (token & 3) { 01302 case 0: // EOB 01303 if (--token < 4) // 0-3 are token types, so the EOB run must now be 0 01304 s->dct_tokens[plane][i]++; 01305 else 01306 *s->dct_tokens[plane][i] = token & ~3; 01307 goto end; 01308 case 1: // zero run 01309 s->dct_tokens[plane][i]++; 01310 i += (token >> 2) & 0x7f; 01311 if (i > 63) { 01312 av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n"); 01313 return i; 01314 } 01315 block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; 01316 i++; 01317 break; 01318 case 2: // coeff 01319 block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; 01320 s->dct_tokens[plane][i++]++; 01321 break; 01322 default: // shouldn't happen 01323 return i; 01324 } 01325 } while (i < 64); 01326 // return value is expected to be a valid level 01327 i--; 01328 end: 01329 // the actual DC+prediction is in the fragment structure 01330 block[0] = frag->dc * s->qmat[0][inter][plane][0]; 01331 return i; 01332 } 01333 01337 static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) 01338 { 01339 int h, cy; 01340 int offset[4]; 01341 01342 if (HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { 01343 int y_flipped = s->flipped_image ? s->avctx->height-y : y; 01344 01345 // At the end of the frame, report INT_MAX instead of the height of the frame. 01346 // This makes the other threads' ff_thread_await_progress() calls cheaper, because 01347 // they don't have to clip their values. 01348 ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0); 01349 } 01350 01351 if(s->avctx->draw_horiz_band==NULL) 01352 return; 01353 01354 h= y - s->last_slice_end; 01355 s->last_slice_end= y; 01356 y -= h; 01357 01358 if (!s->flipped_image) { 01359 y = s->avctx->height - y - h; 01360 } 01361 01362 cy = y >> s->chroma_y_shift; 01363 offset[0] = s->current_frame.linesize[0]*y; 01364 offset[1] = s->current_frame.linesize[1]*cy; 01365 offset[2] = s->current_frame.linesize[2]*cy; 01366 offset[3] = 0; 01367 01368 emms_c(); 01369 s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); 01370 } 01371 01376 static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y) 01377 { 01378 AVFrame *ref_frame; 01379 int ref_row; 01380 int border = motion_y&1; 01381 01382 if (fragment->coding_method == MODE_USING_GOLDEN || 01383 fragment->coding_method == MODE_GOLDEN_MV) 01384 ref_frame = &s->golden_frame; 01385 else 01386 ref_frame = &s->last_frame; 01387 01388 ref_row = y + (motion_y>>1); 01389 ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border); 01390 01391 ff_thread_await_progress(ref_frame, ref_row, 0); 01392 } 01393 01394 /* 01395 * Perform the final rendering for a particular slice of data. 01396 * The slice number ranges from 0..(c_superblock_height - 1). 01397 */ 01398 static void render_slice(Vp3DecodeContext *s, int slice) 01399 { 01400 int x, y, i, j, fragment; 01401 LOCAL_ALIGNED_16(DCTELEM, block, [64]); 01402 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; 01403 int motion_halfpel_index; 01404 uint8_t *motion_source; 01405 int plane, first_pixel; 01406 01407 if (slice >= s->c_superblock_height) 01408 return; 01409 01410 for (plane = 0; plane < 3; plane++) { 01411 uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane]; 01412 uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; 01413 uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; 01414 int stride = s->current_frame.linesize[plane]; 01415 int plane_width = s->width >> (plane && s->chroma_x_shift); 01416 int plane_height = s->height >> (plane && s->chroma_y_shift); 01417 int8_t (*motion_val)[2] = s->motion_val[!!plane]; 01418 01419 int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); 01420 int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); 01421 int slice_width = plane ? s->c_superblock_width : s->y_superblock_width; 01422 01423 int fragment_width = s->fragment_width[!!plane]; 01424 int fragment_height = s->fragment_height[!!plane]; 01425 int fragment_start = s->fragment_start[plane]; 01426 int do_await = !plane && HAVE_PTHREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME); 01427 01428 if (!s->flipped_image) stride = -stride; 01429 if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) 01430 continue; 01431 01432 /* for each superblock row in the slice (both of them)... */ 01433 for (; sb_y < slice_height; sb_y++) { 01434 01435 /* for each superblock in a row... */ 01436 for (sb_x = 0; sb_x < slice_width; sb_x++) { 01437 01438 /* for each block in a superblock... */ 01439 for (j = 0; j < 16; j++) { 01440 x = 4*sb_x + hilbert_offset[j][0]; 01441 y = 4*sb_y + hilbert_offset[j][1]; 01442 fragment = y*fragment_width + x; 01443 01444 i = fragment_start + fragment; 01445 01446 // bounds check 01447 if (x >= fragment_width || y >= fragment_height) 01448 continue; 01449 01450 first_pixel = 8*y*stride + 8*x; 01451 01452 if (do_await && s->all_fragments[i].coding_method != MODE_INTRA) 01453 await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift); 01454 01455 /* transform if this block was coded */ 01456 if (s->all_fragments[i].coding_method != MODE_COPY) { 01457 if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || 01458 (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) 01459 motion_source= golden_plane; 01460 else 01461 motion_source= last_plane; 01462 01463 motion_source += first_pixel; 01464 motion_halfpel_index = 0; 01465 01466 /* sort out the motion vector if this fragment is coded 01467 * using a motion vector method */ 01468 if ((s->all_fragments[i].coding_method > MODE_INTRA) && 01469 (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { 01470 int src_x, src_y; 01471 motion_x = motion_val[fragment][0]; 01472 motion_y = motion_val[fragment][1]; 01473 01474 src_x= (motion_x>>1) + 8*x; 01475 src_y= (motion_y>>1) + 8*y; 01476 01477 motion_halfpel_index = motion_x & 0x01; 01478 motion_source += (motion_x >> 1); 01479 01480 motion_halfpel_index |= (motion_y & 0x01) << 1; 01481 motion_source += ((motion_y >> 1) * stride); 01482 01483 if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ 01484 uint8_t *temp= s->edge_emu_buffer; 01485 if(stride<0) temp -= 8*stride; 01486 01487 s->dsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); 01488 motion_source= temp; 01489 } 01490 } 01491 01492 01493 /* first, take care of copying a block from either the 01494 * previous or the golden frame */ 01495 if (s->all_fragments[i].coding_method != MODE_INTRA) { 01496 /* Note, it is possible to implement all MC cases with 01497 put_no_rnd_pixels_l2 which would look more like the 01498 VP3 source but this would be slower as 01499 put_no_rnd_pixels_tab is better optimzed */ 01500 if(motion_halfpel_index != 3){ 01501 s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( 01502 output_plane + first_pixel, 01503 motion_source, stride, 8); 01504 }else{ 01505 int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 01506 s->dsp.put_no_rnd_pixels_l2[1]( 01507 output_plane + first_pixel, 01508 motion_source - d, 01509 motion_source + stride + 1 + d, 01510 stride, 8); 01511 } 01512 } 01513 01514 s->dsp.clear_block(block); 01515 01516 /* invert DCT and place (or add) in final output */ 01517 01518 if (s->all_fragments[i].coding_method == MODE_INTRA) { 01519 int index; 01520 index = vp3_dequant(s, s->all_fragments + i, plane, 0, block); 01521 if (index > 63) 01522 continue; 01523 if(s->avctx->idct_algo!=FF_IDCT_VP3) 01524 block[0] += 128<<3; 01525 s->dsp.idct_put( 01526 output_plane + first_pixel, 01527 stride, 01528 block); 01529 } else { 01530 int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block); 01531 if (index > 63) 01532 continue; 01533 if (index > 0) { 01534 s->dsp.idct_add( 01535 output_plane + first_pixel, 01536 stride, 01537 block); 01538 } else { 01539 s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); 01540 } 01541 } 01542 } else { 01543 01544 /* copy directly from the previous frame */ 01545 s->dsp.put_pixels_tab[1][0]( 01546 output_plane + first_pixel, 01547 last_plane + first_pixel, 01548 stride, 8); 01549 01550 } 01551 } 01552 } 01553 01554 // Filter up to the last row in the superblock row 01555 if (!s->skip_loop_filter) 01556 apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); 01557 } 01558 } 01559 01560 /* this looks like a good place for slice dispatch... */ 01561 /* algorithm: 01562 * if (slice == s->macroblock_height - 1) 01563 * dispatch (both last slice & 2nd-to-last slice); 01564 * else if (slice > 0) 01565 * dispatch (slice - 1); 01566 */ 01567 01568 vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16)); 01569 } 01570 01572 static av_cold int allocate_tables(AVCodecContext *avctx) 01573 { 01574 Vp3DecodeContext *s = avctx->priv_data; 01575 int y_fragment_count, c_fragment_count; 01576 01577 y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; 01578 c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; 01579 01580 s->superblock_coding = av_malloc(s->superblock_count); 01581 s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); 01582 s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); 01583 s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); 01584 s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); 01585 s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); 01586 01587 /* work out the block mapping tables */ 01588 s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); 01589 s->macroblock_coding = av_malloc(s->macroblock_count + 1); 01590 01591 if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || 01592 !s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding || 01593 !s->motion_val[0] || !s->motion_val[1]) { 01594 vp3_decode_end(avctx); 01595 return -1; 01596 } 01597 01598 init_block_mapping(s); 01599 01600 return 0; 01601 } 01602 01603 /* 01604 * This is the ffmpeg/libavcodec API init function. 01605 */ 01606 static av_cold int vp3_decode_init(AVCodecContext *avctx) 01607 { 01608 Vp3DecodeContext *s = avctx->priv_data; 01609 int i, inter, plane; 01610 int c_width; 01611 int c_height; 01612 int y_fragment_count, c_fragment_count; 01613 01614 if (avctx->codec_tag == MKTAG('V','P','3','0')) 01615 s->version = 0; 01616 else 01617 s->version = 1; 01618 01619 s->avctx = avctx; 01620 s->width = FFALIGN(avctx->width, 16); 01621 s->height = FFALIGN(avctx->height, 16); 01622 if (avctx->pix_fmt == PIX_FMT_NONE) 01623 avctx->pix_fmt = PIX_FMT_YUV420P; 01624 avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; 01625 if(avctx->idct_algo==FF_IDCT_AUTO) 01626 avctx->idct_algo=FF_IDCT_VP3; 01627 dsputil_init(&s->dsp, avctx); 01628 01629 ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); 01630 01631 /* initialize to an impossible value which will force a recalculation 01632 * in the first frame decode */ 01633 for (i = 0; i < 3; i++) 01634 s->qps[i] = -1; 01635 01636 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); 01637 01638 s->y_superblock_width = (s->width + 31) / 32; 01639 s->y_superblock_height = (s->height + 31) / 32; 01640 s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; 01641 01642 /* work out the dimensions for the C planes */ 01643 c_width = s->width >> s->chroma_x_shift; 01644 c_height = s->height >> s->chroma_y_shift; 01645 s->c_superblock_width = (c_width + 31) / 32; 01646 s->c_superblock_height = (c_height + 31) / 32; 01647 s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; 01648 01649 s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); 01650 s->u_superblock_start = s->y_superblock_count; 01651 s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; 01652 01653 s->macroblock_width = (s->width + 15) / 16; 01654 s->macroblock_height = (s->height + 15) / 16; 01655 s->macroblock_count = s->macroblock_width * s->macroblock_height; 01656 01657 s->fragment_width[0] = s->width / FRAGMENT_PIXELS; 01658 s->fragment_height[0] = s->height / FRAGMENT_PIXELS; 01659 s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; 01660 s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; 01661 01662 /* fragment count covers all 8x8 blocks for all 3 planes */ 01663 y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; 01664 c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; 01665 s->fragment_count = y_fragment_count + 2*c_fragment_count; 01666 s->fragment_start[1] = y_fragment_count; 01667 s->fragment_start[2] = y_fragment_count + c_fragment_count; 01668 01669 if (!s->theora_tables) 01670 { 01671 for (i = 0; i < 64; i++) { 01672 s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; 01673 s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; 01674 s->base_matrix[0][i] = vp31_intra_y_dequant[i]; 01675 s->base_matrix[1][i] = vp31_intra_c_dequant[i]; 01676 s->base_matrix[2][i] = vp31_inter_dequant[i]; 01677 s->filter_limit_values[i] = vp31_filter_limit_values[i]; 01678 } 01679 01680 for(inter=0; inter<2; inter++){ 01681 for(plane=0; plane<3; plane++){ 01682 s->qr_count[inter][plane]= 1; 01683 s->qr_size [inter][plane][0]= 63; 01684 s->qr_base [inter][plane][0]= 01685 s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter; 01686 } 01687 } 01688 01689 /* init VLC tables */ 01690 for (i = 0; i < 16; i++) { 01691 01692 /* DC histograms */ 01693 init_vlc(&s->dc_vlc[i], 11, 32, 01694 &dc_bias[i][0][1], 4, 2, 01695 &dc_bias[i][0][0], 4, 2, 0); 01696 01697 /* group 1 AC histograms */ 01698 init_vlc(&s->ac_vlc_1[i], 11, 32, 01699 &ac_bias_0[i][0][1], 4, 2, 01700 &ac_bias_0[i][0][0], 4, 2, 0); 01701 01702 /* group 2 AC histograms */ 01703 init_vlc(&s->ac_vlc_2[i], 11, 32, 01704 &ac_bias_1[i][0][1], 4, 2, 01705 &ac_bias_1[i][0][0], 4, 2, 0); 01706 01707 /* group 3 AC histograms */ 01708 init_vlc(&s->ac_vlc_3[i], 11, 32, 01709 &ac_bias_2[i][0][1], 4, 2, 01710 &ac_bias_2[i][0][0], 4, 2, 0); 01711 01712 /* group 4 AC histograms */ 01713 init_vlc(&s->ac_vlc_4[i], 11, 32, 01714 &ac_bias_3[i][0][1], 4, 2, 01715 &ac_bias_3[i][0][0], 4, 2, 0); 01716 } 01717 } else { 01718 01719 for (i = 0; i < 16; i++) { 01720 /* DC histograms */ 01721 if (init_vlc(&s->dc_vlc[i], 11, 32, 01722 &s->huffman_table[i][0][1], 8, 4, 01723 &s->huffman_table[i][0][0], 8, 4, 0) < 0) 01724 goto vlc_fail; 01725 01726 /* group 1 AC histograms */ 01727 if (init_vlc(&s->ac_vlc_1[i], 11, 32, 01728 &s->huffman_table[i+16][0][1], 8, 4, 01729 &s->huffman_table[i+16][0][0], 8, 4, 0) < 0) 01730 goto vlc_fail; 01731 01732 /* group 2 AC histograms */ 01733 if (init_vlc(&s->ac_vlc_2[i], 11, 32, 01734 &s->huffman_table[i+16*2][0][1], 8, 4, 01735 &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) 01736 goto vlc_fail; 01737 01738 /* group 3 AC histograms */ 01739 if (init_vlc(&s->ac_vlc_3[i], 11, 32, 01740 &s->huffman_table[i+16*3][0][1], 8, 4, 01741 &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) 01742 goto vlc_fail; 01743 01744 /* group 4 AC histograms */ 01745 if (init_vlc(&s->ac_vlc_4[i], 11, 32, 01746 &s->huffman_table[i+16*4][0][1], 8, 4, 01747 &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) 01748 goto vlc_fail; 01749 } 01750 } 01751 01752 init_vlc(&s->superblock_run_length_vlc, 6, 34, 01753 &superblock_run_length_vlc_table[0][1], 4, 2, 01754 &superblock_run_length_vlc_table[0][0], 4, 2, 0); 01755 01756 init_vlc(&s->fragment_run_length_vlc, 5, 30, 01757 &fragment_run_length_vlc_table[0][1], 4, 2, 01758 &fragment_run_length_vlc_table[0][0], 4, 2, 0); 01759 01760 init_vlc(&s->mode_code_vlc, 3, 8, 01761 &mode_code_vlc_table[0][1], 2, 1, 01762 &mode_code_vlc_table[0][0], 2, 1, 0); 01763 01764 init_vlc(&s->motion_vector_vlc, 6, 63, 01765 &motion_vector_vlc_table[0][1], 2, 1, 01766 &motion_vector_vlc_table[0][0], 2, 1, 0); 01767 01768 for (i = 0; i < 3; i++) { 01769 s->current_frame.data[i] = NULL; 01770 s->last_frame.data[i] = NULL; 01771 s->golden_frame.data[i] = NULL; 01772 } 01773 01774 return allocate_tables(avctx); 01775 01776 vlc_fail: 01777 av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); 01778 return -1; 01779 } 01780 01782 static void update_frames(AVCodecContext *avctx) 01783 { 01784 Vp3DecodeContext *s = avctx->priv_data; 01785 01786 /* release the last frame, if it is allocated and if it is not the 01787 * golden frame */ 01788 if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) 01789 ff_thread_release_buffer(avctx, &s->last_frame); 01790 01791 /* shuffle frames (last = current) */ 01792 s->last_frame= s->current_frame; 01793 01794 if (s->keyframe) { 01795 if (s->golden_frame.data[0]) 01796 ff_thread_release_buffer(avctx, &s->golden_frame); 01797 s->golden_frame = s->current_frame; 01798 s->last_frame.type = FF_BUFFER_TYPE_COPY; 01799 } 01800 01801 s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ 01802 } 01803 01804 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) 01805 { 01806 Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; 01807 int qps_changed = 0, i, err; 01808 01809 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) 01810 01811 if (!s1->current_frame.data[0] 01812 ||s->width != s1->width 01813 ||s->height!= s1->height) { 01814 if (s != s1) 01815 copy_fields(s, s1, golden_frame, current_frame); 01816 return -1; 01817 } 01818 01819 if (s != s1) { 01820 // init tables if the first frame hasn't been decoded 01821 if (!s->current_frame.data[0]) { 01822 int y_fragment_count, c_fragment_count; 01823 s->avctx = dst; 01824 err = allocate_tables(dst); 01825 if (err) 01826 return err; 01827 y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; 01828 c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; 01829 memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0])); 01830 memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); 01831 } 01832 01833 // copy previous frame data 01834 copy_fields(s, s1, golden_frame, dsp); 01835 01836 // copy qscale data if necessary 01837 for (i = 0; i < 3; i++) { 01838 if (s->qps[i] != s1->qps[1]) { 01839 qps_changed = 1; 01840 memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i])); 01841 } 01842 } 01843 01844 if (s->qps[0] != s1->qps[0]) 01845 memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array)); 01846 01847 if (qps_changed) 01848 copy_fields(s, s1, qps, superblock_count); 01849 #undef copy_fields 01850 } 01851 01852 update_frames(dst); 01853 01854 return 0; 01855 } 01856 01857 /* 01858 * This is the ffmpeg/libavcodec API frame decode function. 01859 */ 01860 static int vp3_decode_frame(AVCodecContext *avctx, 01861 void *data, int *data_size, 01862 AVPacket *avpkt) 01863 { 01864 const uint8_t *buf = avpkt->data; 01865 int buf_size = avpkt->size; 01866 Vp3DecodeContext *s = avctx->priv_data; 01867 GetBitContext gb; 01868 int i; 01869 01870 init_get_bits(&gb, buf, buf_size * 8); 01871 01872 if (s->theora && get_bits1(&gb)) 01873 { 01874 av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n"); 01875 return -1; 01876 } 01877 01878 s->keyframe = !get_bits1(&gb); 01879 if (!s->theora) 01880 skip_bits(&gb, 1); 01881 for (i = 0; i < 3; i++) 01882 s->last_qps[i] = s->qps[i]; 01883 01884 s->nqps=0; 01885 do{ 01886 s->qps[s->nqps++]= get_bits(&gb, 6); 01887 } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); 01888 for (i = s->nqps; i < 3; i++) 01889 s->qps[i] = -1; 01890 01891 if (s->avctx->debug & FF_DEBUG_PICT_INFO) 01892 av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", 01893 s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]); 01894 01895 s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] || 01896 avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY); 01897 01898 if (s->qps[0] != s->last_qps[0]) 01899 init_loop_filter(s); 01900 01901 for (i = 0; i < s->nqps; i++) 01902 // reinit all dequantizers if the first one changed, because 01903 // the DC of the first quantizer must be used for all matrices 01904 if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) 01905 init_dequantizer(s, i); 01906 01907 if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) 01908 return buf_size; 01909 01910 s->current_frame.reference = 3; 01911 s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; 01912 if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) { 01913 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 01914 goto error; 01915 } 01916 01917 if (!s->edge_emu_buffer) 01918 s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0])); 01919 01920 if (s->keyframe) { 01921 if (!s->theora) 01922 { 01923 skip_bits(&gb, 4); /* width code */ 01924 skip_bits(&gb, 4); /* height code */ 01925 if (s->version) 01926 { 01927 s->version = get_bits(&gb, 5); 01928 if (avctx->frame_number == 0) 01929 av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); 01930 } 01931 } 01932 if (s->version || s->theora) 01933 { 01934 if (get_bits1(&gb)) 01935 av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); 01936 skip_bits(&gb, 2); /* reserved? */ 01937 } 01938 } else { 01939 if (!s->golden_frame.data[0]) { 01940 av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n"); 01941 01942 s->golden_frame.reference = 3; 01943 s->golden_frame.pict_type = AV_PICTURE_TYPE_I; 01944 if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) { 01945 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 01946 goto error; 01947 } 01948 s->last_frame = s->golden_frame; 01949 s->last_frame.type = FF_BUFFER_TYPE_COPY; 01950 ff_thread_report_progress(&s->last_frame, INT_MAX, 0); 01951 } 01952 } 01953 01954 memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); 01955 ff_thread_finish_setup(avctx); 01956 01957 if (unpack_superblocks(s, &gb)){ 01958 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); 01959 goto error; 01960 } 01961 if (unpack_modes(s, &gb)){ 01962 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); 01963 goto error; 01964 } 01965 if (unpack_vectors(s, &gb)){ 01966 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); 01967 goto error; 01968 } 01969 if (unpack_block_qpis(s, &gb)){ 01970 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); 01971 goto error; 01972 } 01973 if (unpack_dct_coeffs(s, &gb)){ 01974 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); 01975 goto error; 01976 } 01977 01978 for (i = 0; i < 3; i++) { 01979 int height = s->height >> (i && s->chroma_y_shift); 01980 if (s->flipped_image) 01981 s->data_offset[i] = 0; 01982 else 01983 s->data_offset[i] = (height-1) * s->current_frame.linesize[i]; 01984 } 01985 01986 s->last_slice_end = 0; 01987 for (i = 0; i < s->c_superblock_height; i++) 01988 render_slice(s, i); 01989 01990 // filter the last row 01991 for (i = 0; i < 3; i++) { 01992 int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; 01993 apply_loop_filter(s, i, row, row+1); 01994 } 01995 vp3_draw_horiz_band(s, s->avctx->height); 01996 01997 *data_size=sizeof(AVFrame); 01998 *(AVFrame*)data= s->current_frame; 01999 02000 if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) 02001 update_frames(avctx); 02002 02003 return buf_size; 02004 02005 error: 02006 ff_thread_report_progress(&s->current_frame, INT_MAX, 0); 02007 02008 if (!HAVE_PTHREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) 02009 avctx->release_buffer(avctx, &s->current_frame); 02010 02011 return -1; 02012 } 02013 02014 /* 02015 * This is the ffmpeg/libavcodec API module cleanup function. 02016 */ 02017 static av_cold int vp3_decode_end(AVCodecContext *avctx) 02018 { 02019 Vp3DecodeContext *s = avctx->priv_data; 02020 int i; 02021 02022 av_free(s->superblock_coding); 02023 av_free(s->all_fragments); 02024 av_free(s->coded_fragment_list[0]); 02025 av_free(s->dct_tokens_base); 02026 av_free(s->superblock_fragments); 02027 av_free(s->macroblock_coding); 02028 av_free(s->motion_val[0]); 02029 av_free(s->motion_val[1]); 02030 av_free(s->edge_emu_buffer); 02031 02032 if (avctx->is_copy) return 0; 02033 02034 for (i = 0; i < 16; i++) { 02035 free_vlc(&s->dc_vlc[i]); 02036 free_vlc(&s->ac_vlc_1[i]); 02037 free_vlc(&s->ac_vlc_2[i]); 02038 free_vlc(&s->ac_vlc_3[i]); 02039 free_vlc(&s->ac_vlc_4[i]); 02040 } 02041 02042 free_vlc(&s->superblock_run_length_vlc); 02043 free_vlc(&s->fragment_run_length_vlc); 02044 free_vlc(&s->mode_code_vlc); 02045 free_vlc(&s->motion_vector_vlc); 02046 02047 /* release all frames */ 02048 vp3_decode_flush(avctx); 02049 02050 return 0; 02051 } 02052 02053 static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) 02054 { 02055 Vp3DecodeContext *s = avctx->priv_data; 02056 02057 if (get_bits1(gb)) { 02058 int token; 02059 if (s->entries >= 32) { /* overflow */ 02060 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); 02061 return -1; 02062 } 02063 token = get_bits(gb, 5); 02064 //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size); 02065 s->huffman_table[s->hti][token][0] = s->hbits; 02066 s->huffman_table[s->hti][token][1] = s->huff_code_size; 02067 s->entries++; 02068 } 02069 else { 02070 if (s->huff_code_size >= 32) {/* overflow */ 02071 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); 02072 return -1; 02073 } 02074 s->huff_code_size++; 02075 s->hbits <<= 1; 02076 if (read_huffman_tree(avctx, gb)) 02077 return -1; 02078 s->hbits |= 1; 02079 if (read_huffman_tree(avctx, gb)) 02080 return -1; 02081 s->hbits >>= 1; 02082 s->huff_code_size--; 02083 } 02084 return 0; 02085 } 02086 02087 #if CONFIG_THEORA_DECODER 02088 static const enum PixelFormat theora_pix_fmts[4] = { 02089 PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P 02090 }; 02091 02092 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) 02093 { 02094 Vp3DecodeContext *s = avctx->priv_data; 02095 int visible_width, visible_height, colorspace; 02096 int offset_x = 0, offset_y = 0; 02097 AVRational fps, aspect; 02098 02099 s->theora = get_bits_long(gb, 24); 02100 av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); 02101 02102 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */ 02103 /* but previous versions have the image flipped relative to vp3 */ 02104 if (s->theora < 0x030200) 02105 { 02106 s->flipped_image = 1; 02107 av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n"); 02108 } 02109 02110 visible_width = s->width = get_bits(gb, 16) << 4; 02111 visible_height = s->height = get_bits(gb, 16) << 4; 02112 02113 if(av_image_check_size(s->width, s->height, 0, avctx)){ 02114 av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); 02115 s->width= s->height= 0; 02116 return -1; 02117 } 02118 02119 if (s->theora >= 0x030200) { 02120 visible_width = get_bits_long(gb, 24); 02121 visible_height = get_bits_long(gb, 24); 02122 02123 offset_x = get_bits(gb, 8); /* offset x */ 02124 offset_y = get_bits(gb, 8); /* offset y, from bottom */ 02125 } 02126 02127 fps.num = get_bits_long(gb, 32); 02128 fps.den = get_bits_long(gb, 32); 02129 if (fps.num && fps.den) { 02130 av_reduce(&avctx->time_base.num, &avctx->time_base.den, 02131 fps.den, fps.num, 1<<30); 02132 } 02133 02134 aspect.num = get_bits_long(gb, 24); 02135 aspect.den = get_bits_long(gb, 24); 02136 if (aspect.num && aspect.den) { 02137 av_reduce(&avctx->sample_aspect_ratio.num, 02138 &avctx->sample_aspect_ratio.den, 02139 aspect.num, aspect.den, 1<<30); 02140 } 02141 02142 if (s->theora < 0x030200) 02143 skip_bits(gb, 5); /* keyframe frequency force */ 02144 colorspace = get_bits(gb, 8); 02145 skip_bits(gb, 24); /* bitrate */ 02146 02147 skip_bits(gb, 6); /* quality hint */ 02148 02149 if (s->theora >= 0x030200) 02150 { 02151 skip_bits(gb, 5); /* keyframe frequency force */ 02152 avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; 02153 skip_bits(gb, 3); /* reserved */ 02154 } 02155 02156 // align_get_bits(gb); 02157 02158 if ( visible_width <= s->width && visible_width > s->width-16 02159 && visible_height <= s->height && visible_height > s->height-16 02160 && !offset_x && (offset_y == s->height - visible_height)) 02161 avcodec_set_dimensions(avctx, visible_width, visible_height); 02162 else 02163 avcodec_set_dimensions(avctx, s->width, s->height); 02164 02165 if (colorspace == 1) { 02166 avctx->color_primaries = AVCOL_PRI_BT470M; 02167 } else if (colorspace == 2) { 02168 avctx->color_primaries = AVCOL_PRI_BT470BG; 02169 } 02170 if (colorspace == 1 || colorspace == 2) { 02171 avctx->colorspace = AVCOL_SPC_BT470BG; 02172 avctx->color_trc = AVCOL_TRC_BT709; 02173 } 02174 02175 return 0; 02176 } 02177 02178 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) 02179 { 02180 Vp3DecodeContext *s = avctx->priv_data; 02181 int i, n, matrices, inter, plane; 02182 02183 if (s->theora >= 0x030200) { 02184 n = get_bits(gb, 3); 02185 /* loop filter limit values table */ 02186 if (n) 02187 for (i = 0; i < 64; i++) 02188 s->filter_limit_values[i] = get_bits(gb, n); 02189 } 02190 02191 if (s->theora >= 0x030200) 02192 n = get_bits(gb, 4) + 1; 02193 else 02194 n = 16; 02195 /* quality threshold table */ 02196 for (i = 0; i < 64; i++) 02197 s->coded_ac_scale_factor[i] = get_bits(gb, n); 02198 02199 if (s->theora >= 0x030200) 02200 n = get_bits(gb, 4) + 1; 02201 else 02202 n = 16; 02203 /* dc scale factor table */ 02204 for (i = 0; i < 64; i++) 02205 s->coded_dc_scale_factor[i] = get_bits(gb, n); 02206 02207 if (s->theora >= 0x030200) 02208 matrices = get_bits(gb, 9) + 1; 02209 else 02210 matrices = 3; 02211 02212 if(matrices > 384){ 02213 av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n"); 02214 return -1; 02215 } 02216 02217 for(n=0; n<matrices; n++){ 02218 for (i = 0; i < 64; i++) 02219 s->base_matrix[n][i]= get_bits(gb, 8); 02220 } 02221 02222 for (inter = 0; inter <= 1; inter++) { 02223 for (plane = 0; plane <= 2; plane++) { 02224 int newqr= 1; 02225 if (inter || plane > 0) 02226 newqr = get_bits1(gb); 02227 if (!newqr) { 02228 int qtj, plj; 02229 if(inter && get_bits1(gb)){ 02230 qtj = 0; 02231 plj = plane; 02232 }else{ 02233 qtj= (3*inter + plane - 1) / 3; 02234 plj= (plane + 2) % 3; 02235 } 02236 s->qr_count[inter][plane]= s->qr_count[qtj][plj]; 02237 memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0])); 02238 memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0])); 02239 } else { 02240 int qri= 0; 02241 int qi = 0; 02242 02243 for(;;){ 02244 i= get_bits(gb, av_log2(matrices-1)+1); 02245 if(i>= matrices){ 02246 av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n"); 02247 return -1; 02248 } 02249 s->qr_base[inter][plane][qri]= i; 02250 if(qi >= 63) 02251 break; 02252 i = get_bits(gb, av_log2(63-qi)+1) + 1; 02253 s->qr_size[inter][plane][qri++]= i; 02254 qi += i; 02255 } 02256 02257 if (qi > 63) { 02258 av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi); 02259 return -1; 02260 } 02261 s->qr_count[inter][plane]= qri; 02262 } 02263 } 02264 } 02265 02266 /* Huffman tables */ 02267 for (s->hti = 0; s->hti < 80; s->hti++) { 02268 s->entries = 0; 02269 s->huff_code_size = 1; 02270 if (!get_bits1(gb)) { 02271 s->hbits = 0; 02272 if(read_huffman_tree(avctx, gb)) 02273 return -1; 02274 s->hbits = 1; 02275 if(read_huffman_tree(avctx, gb)) 02276 return -1; 02277 } 02278 } 02279 02280 s->theora_tables = 1; 02281 02282 return 0; 02283 } 02284 02285 static av_cold int theora_decode_init(AVCodecContext *avctx) 02286 { 02287 Vp3DecodeContext *s = avctx->priv_data; 02288 GetBitContext gb; 02289 int ptype; 02290 uint8_t *header_start[3]; 02291 int header_len[3]; 02292 int i; 02293 02294 s->theora = 1; 02295 02296 if (!avctx->extradata_size) 02297 { 02298 av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); 02299 return -1; 02300 } 02301 02302 if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size, 02303 42, header_start, header_len) < 0) { 02304 av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); 02305 return -1; 02306 } 02307 02308 for(i=0;i<3;i++) { 02309 init_get_bits(&gb, header_start[i], header_len[i] * 8); 02310 02311 ptype = get_bits(&gb, 8); 02312 02313 if (!(ptype & 0x80)) 02314 { 02315 av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); 02316 // return -1; 02317 } 02318 02319 // FIXME: Check for this as well. 02320 skip_bits_long(&gb, 6*8); /* "theora" */ 02321 02322 switch(ptype) 02323 { 02324 case 0x80: 02325 theora_decode_header(avctx, &gb); 02326 break; 02327 case 0x81: 02328 // FIXME: is this needed? it breaks sometimes 02329 // theora_decode_comments(avctx, gb); 02330 break; 02331 case 0x82: 02332 if (theora_decode_tables(avctx, &gb)) 02333 return -1; 02334 break; 02335 default: 02336 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); 02337 break; 02338 } 02339 if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb)) 02340 av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype); 02341 if (s->theora < 0x030200) 02342 break; 02343 } 02344 02345 return vp3_decode_init(avctx); 02346 } 02347 02348 static void vp3_decode_flush(AVCodecContext *avctx) 02349 { 02350 Vp3DecodeContext *s = avctx->priv_data; 02351 02352 if (s->golden_frame.data[0]) { 02353 if (s->golden_frame.data[0] == s->last_frame.data[0]) 02354 memset(&s->last_frame, 0, sizeof(AVFrame)); 02355 if (s->current_frame.data[0] == s->golden_frame.data[0]) 02356 memset(&s->current_frame, 0, sizeof(AVFrame)); 02357 ff_thread_release_buffer(avctx, &s->golden_frame); 02358 } 02359 if (s->last_frame.data[0]) { 02360 if (s->current_frame.data[0] == s->last_frame.data[0]) 02361 memset(&s->current_frame, 0, sizeof(AVFrame)); 02362 ff_thread_release_buffer(avctx, &s->last_frame); 02363 } 02364 if (s->current_frame.data[0]) 02365 ff_thread_release_buffer(avctx, &s->current_frame); 02366 } 02367 02368 static int vp3_init_thread_copy(AVCodecContext *avctx) 02369 { 02370 Vp3DecodeContext *s = avctx->priv_data; 02371 02372 s->superblock_coding = NULL; 02373 s->all_fragments = NULL; 02374 s->coded_fragment_list[0] = NULL; 02375 s->dct_tokens_base = NULL; 02376 s->superblock_fragments = NULL; 02377 s->macroblock_coding = NULL; 02378 s->motion_val[0] = NULL; 02379 s->motion_val[1] = NULL; 02380 s->edge_emu_buffer = NULL; 02381 02382 return 0; 02383 } 02384 02385 AVCodec ff_theora_decoder = { 02386 "theora", 02387 AVMEDIA_TYPE_VIDEO, 02388 CODEC_ID_THEORA, 02389 sizeof(Vp3DecodeContext), 02390 theora_decode_init, 02391 NULL, 02392 vp3_decode_end, 02393 vp3_decode_frame, 02394 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 02395 NULL, 02396 .flush = vp3_decode_flush, 02397 .long_name = NULL_IF_CONFIG_SMALL("Theora"), 02398 .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), 02399 .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) 02400 }; 02401 #endif 02402 02403 AVCodec ff_vp3_decoder = { 02404 "vp3", 02405 AVMEDIA_TYPE_VIDEO, 02406 CODEC_ID_VP3, 02407 sizeof(Vp3DecodeContext), 02408 vp3_decode_init, 02409 NULL, 02410 vp3_decode_end, 02411 vp3_decode_frame, 02412 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, 02413 NULL, 02414 .flush = vp3_decode_flush, 02415 .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), 02416 .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), 02417 .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) 02418 };