Libav
|
00001 /* 00002 * huffyuv codec for libavcodec 00003 * 00004 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at> 00005 * 00006 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of 00007 * the algorithm used 00008 * 00009 * This file is part of FFmpeg. 00010 * 00011 * FFmpeg is free software; you can redistribute it and/or 00012 * modify it under the terms of the GNU Lesser General Public 00013 * License as published by the Free Software Foundation; either 00014 * version 2.1 of the License, or (at your option) any later version. 00015 * 00016 * FFmpeg is distributed in the hope that it will be useful, 00017 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00018 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00019 * Lesser General Public License for more details. 00020 * 00021 * You should have received a copy of the GNU Lesser General Public 00022 * License along with FFmpeg; if not, write to the Free Software 00023 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00024 */ 00025 00031 #include "avcodec.h" 00032 #include "get_bits.h" 00033 #include "put_bits.h" 00034 #include "dsputil.h" 00035 00036 #define VLC_BITS 11 00037 00038 #if HAVE_BIGENDIAN 00039 #define B 3 00040 #define G 2 00041 #define R 1 00042 #define A 0 00043 #else 00044 #define B 0 00045 #define G 1 00046 #define R 2 00047 #define A 3 00048 #endif 00049 00050 typedef enum Predictor{ 00051 LEFT= 0, 00052 PLANE, 00053 MEDIAN, 00054 } Predictor; 00055 00056 typedef struct HYuvContext{ 00057 AVCodecContext *avctx; 00058 Predictor predictor; 00059 GetBitContext gb; 00060 PutBitContext pb; 00061 int interlaced; 00062 int decorrelate; 00063 int bitstream_bpp; 00064 int version; 00065 int yuy2; //use yuy2 instead of 422P 00066 int bgr32; //use bgr32 instead of bgr24 00067 int width, height; 00068 int flags; 00069 int context; 00070 int picture_number; 00071 int last_slice_end; 00072 uint8_t *temp[3]; 00073 uint64_t stats[3][256]; 00074 uint8_t len[3][256]; 00075 uint32_t bits[3][256]; 00076 uint32_t pix_bgr_map[1<<VLC_BITS]; 00077 VLC vlc[6]; //Y,U,V,YY,YU,YV 00078 AVFrame picture; 00079 uint8_t *bitstream_buffer; 00080 unsigned int bitstream_buffer_size; 00081 DSPContext dsp; 00082 }HYuvContext; 00083 00084 static const unsigned char classic_shift_luma[] = { 00085 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8, 00086 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70, 00087 69,68, 0 00088 }; 00089 00090 static const unsigned char classic_shift_chroma[] = { 00091 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183, 00092 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119, 00093 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0 00094 }; 00095 00096 static const unsigned char classic_add_luma[256] = { 00097 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37, 00098 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36, 00099 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36, 00100 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39, 00101 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37, 00102 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29, 00103 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16, 00104 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14, 00105 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6, 00106 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15, 00107 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25, 00108 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49, 00109 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60, 00110 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52, 00111 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43, 00112 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8, 00113 }; 00114 00115 static const unsigned char classic_add_chroma[256] = { 00116 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9, 00117 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7, 00118 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77, 00119 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63, 00120 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 00121 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22, 00122 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111, 00123 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1, 00124 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134, 00125 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96, 00126 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41, 00127 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36, 00128 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26, 00129 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13, 00130 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8, 00131 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2, 00132 }; 00133 00134 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){ 00135 int i; 00136 if(w<32){ 00137 for(i=0; i<w; i++){ 00138 const int temp= src[i]; 00139 dst[i]= temp - left; 00140 left= temp; 00141 } 00142 return left; 00143 }else{ 00144 for(i=0; i<16; i++){ 00145 const int temp= src[i]; 00146 dst[i]= temp - left; 00147 left= temp; 00148 } 00149 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16); 00150 return src[w-1]; 00151 } 00152 } 00153 00154 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ 00155 int i; 00156 int r,g,b; 00157 r= *red; 00158 g= *green; 00159 b= *blue; 00160 for(i=0; i<FFMIN(w,4); i++){ 00161 const int rt= src[i*4+R]; 00162 const int gt= src[i*4+G]; 00163 const int bt= src[i*4+B]; 00164 dst[i*4+R]= rt - r; 00165 dst[i*4+G]= gt - g; 00166 dst[i*4+B]= bt - b; 00167 r = rt; 00168 g = gt; 00169 b = bt; 00170 } 00171 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16); 00172 *red= src[(w-1)*4+R]; 00173 *green= src[(w-1)*4+G]; 00174 *blue= src[(w-1)*4+B]; 00175 } 00176 00177 static int read_len_table(uint8_t *dst, GetBitContext *gb){ 00178 int i, val, repeat; 00179 00180 for(i=0; i<256;){ 00181 repeat= get_bits(gb, 3); 00182 val = get_bits(gb, 5); 00183 if(repeat==0) 00184 repeat= get_bits(gb, 8); 00185 //printf("%d %d\n", val, repeat); 00186 if(i+repeat > 256) { 00187 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n"); 00188 return -1; 00189 } 00190 while (repeat--) 00191 dst[i++] = val; 00192 } 00193 return 0; 00194 } 00195 00196 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){ 00197 int len, index; 00198 uint32_t bits=0; 00199 00200 for(len=32; len>0; len--){ 00201 for(index=0; index<256; index++){ 00202 if(len_table[index]==len) 00203 dst[index]= bits++; 00204 } 00205 if(bits & 1){ 00206 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); 00207 return -1; 00208 } 00209 bits >>= 1; 00210 } 00211 return 0; 00212 } 00213 00214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00215 typedef struct { 00216 uint64_t val; 00217 int name; 00218 } HeapElem; 00219 00220 static void heap_sift(HeapElem *h, int root, int size) 00221 { 00222 while(root*2+1 < size) { 00223 int child = root*2+1; 00224 if(child < size-1 && h[child].val > h[child+1].val) 00225 child++; 00226 if(h[root].val > h[child].val) { 00227 FFSWAP(HeapElem, h[root], h[child]); 00228 root = child; 00229 } else 00230 break; 00231 } 00232 } 00233 00234 static void generate_len_table(uint8_t *dst, const uint64_t *stats, int size){ 00235 HeapElem h[size]; 00236 int up[2*size]; 00237 int len[2*size]; 00238 int offset, i, next; 00239 00240 for(offset=1; ; offset<<=1){ 00241 for(i=0; i<size; i++){ 00242 h[i].name = i; 00243 h[i].val = (stats[i] << 8) + offset; 00244 } 00245 for(i=size/2-1; i>=0; i--) 00246 heap_sift(h, i, size); 00247 00248 for(next=size; next<size*2-1; next++){ 00249 // merge the two smallest entries, and put it back in the heap 00250 uint64_t min1v = h[0].val; 00251 up[h[0].name] = next; 00252 h[0].val = INT64_MAX; 00253 heap_sift(h, 0, size); 00254 up[h[0].name] = next; 00255 h[0].name = next; 00256 h[0].val += min1v; 00257 heap_sift(h, 0, size); 00258 } 00259 00260 len[2*size-2] = 0; 00261 for(i=2*size-3; i>=size; i--) 00262 len[i] = len[up[i]] + 1; 00263 for(i=0; i<size; i++) { 00264 dst[i] = len[up[i]] + 1; 00265 if(dst[i] >= 32) break; 00266 } 00267 if(i==size) break; 00268 } 00269 } 00270 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00271 00272 static void generate_joint_tables(HYuvContext *s){ 00273 uint16_t symbols[1<<VLC_BITS]; 00274 uint16_t bits[1<<VLC_BITS]; 00275 uint8_t len[1<<VLC_BITS]; 00276 if(s->bitstream_bpp < 24){ 00277 int p, i, y, u; 00278 for(p=0; p<3; p++){ 00279 for(i=y=0; y<256; y++){ 00280 int len0 = s->len[0][y]; 00281 int limit = VLC_BITS - len0; 00282 if(limit <= 0) 00283 continue; 00284 for(u=0; u<256; u++){ 00285 int len1 = s->len[p][u]; 00286 if(len1 > limit) 00287 continue; 00288 len[i] = len0 + len1; 00289 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u]; 00290 symbols[i] = (y<<8) + u; 00291 if(symbols[i] != 0xffff) // reserved to mean "invalid" 00292 i++; 00293 } 00294 } 00295 free_vlc(&s->vlc[3+p]); 00296 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0); 00297 } 00298 }else{ 00299 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map; 00300 int i, b, g, r, code; 00301 int p0 = s->decorrelate; 00302 int p1 = !s->decorrelate; 00303 // restrict the range to +/-16 becaues that's pretty much guaranteed to 00304 // cover all the combinations that fit in 11 bits total, and it doesn't 00305 // matter if we miss a few rare codes. 00306 for(i=0, g=-16; g<16; g++){ 00307 int len0 = s->len[p0][g&255]; 00308 int limit0 = VLC_BITS - len0; 00309 if(limit0 < 2) 00310 continue; 00311 for(b=-16; b<16; b++){ 00312 int len1 = s->len[p1][b&255]; 00313 int limit1 = limit0 - len1; 00314 if(limit1 < 1) 00315 continue; 00316 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255]; 00317 for(r=-16; r<16; r++){ 00318 int len2 = s->len[2][r&255]; 00319 if(len2 > limit1) 00320 continue; 00321 len[i] = len0 + len1 + len2; 00322 bits[i] = (code << len2) + s->bits[2][r&255]; 00323 if(s->decorrelate){ 00324 map[i][G] = g; 00325 map[i][B] = g+b; 00326 map[i][R] = g+r; 00327 }else{ 00328 map[i][B] = g; 00329 map[i][G] = b; 00330 map[i][R] = r; 00331 } 00332 i++; 00333 } 00334 } 00335 } 00336 free_vlc(&s->vlc[3]); 00337 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0); 00338 } 00339 } 00340 00341 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){ 00342 GetBitContext gb; 00343 int i; 00344 00345 init_get_bits(&gb, src, length*8); 00346 00347 for(i=0; i<3; i++){ 00348 if(read_len_table(s->len[i], &gb)<0) 00349 return -1; 00350 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00351 return -1; 00352 } 00353 #if 0 00354 for(j=0; j<256; j++){ 00355 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j); 00356 } 00357 #endif 00358 free_vlc(&s->vlc[i]); 00359 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00360 } 00361 00362 generate_joint_tables(s); 00363 00364 return (get_bits_count(&gb)+7)/8; 00365 } 00366 00367 static int read_old_huffman_tables(HYuvContext *s){ 00368 #if 1 00369 GetBitContext gb; 00370 int i; 00371 00372 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8); 00373 if(read_len_table(s->len[0], &gb)<0) 00374 return -1; 00375 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8); 00376 if(read_len_table(s->len[1], &gb)<0) 00377 return -1; 00378 00379 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; 00380 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; 00381 00382 if(s->bitstream_bpp >= 24){ 00383 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); 00384 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); 00385 } 00386 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); 00387 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); 00388 00389 for(i=0; i<3; i++){ 00390 free_vlc(&s->vlc[i]); 00391 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 00392 } 00393 00394 generate_joint_tables(s); 00395 00396 return 0; 00397 #else 00398 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n"); 00399 return -1; 00400 #endif 00401 } 00402 00403 static av_cold void alloc_temp(HYuvContext *s){ 00404 int i; 00405 00406 if(s->bitstream_bpp<24){ 00407 for(i=0; i<3; i++){ 00408 s->temp[i]= av_malloc(s->width + 16); 00409 } 00410 }else{ 00411 s->temp[0]= av_mallocz(4*s->width + 16); 00412 } 00413 } 00414 00415 static av_cold int common_init(AVCodecContext *avctx){ 00416 HYuvContext *s = avctx->priv_data; 00417 00418 s->avctx= avctx; 00419 s->flags= avctx->flags; 00420 00421 dsputil_init(&s->dsp, avctx); 00422 00423 s->width= avctx->width; 00424 s->height= avctx->height; 00425 assert(s->width>0 && s->height>0); 00426 00427 return 0; 00428 } 00429 00430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00431 static av_cold int decode_init(AVCodecContext *avctx) 00432 { 00433 HYuvContext *s = avctx->priv_data; 00434 00435 common_init(avctx); 00436 memset(s->vlc, 0, 3*sizeof(VLC)); 00437 00438 avctx->coded_frame= &s->picture; 00439 s->interlaced= s->height > 288; 00440 00441 s->bgr32=1; 00442 //if(avctx->extradata) 00443 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size); 00444 if(avctx->extradata_size){ 00445 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12) 00446 s->version=1; // do such files exist at all? 00447 else 00448 s->version=2; 00449 }else 00450 s->version=0; 00451 00452 if(s->version==2){ 00453 int method, interlace; 00454 00455 if (avctx->extradata_size < 4) 00456 return -1; 00457 00458 method= ((uint8_t*)avctx->extradata)[0]; 00459 s->decorrelate= method&64 ? 1 : 0; 00460 s->predictor= method&63; 00461 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1]; 00462 if(s->bitstream_bpp==0) 00463 s->bitstream_bpp= avctx->bits_per_coded_sample&~7; 00464 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4; 00465 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced; 00466 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0; 00467 00468 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0) 00469 return -1; 00470 }else{ 00471 switch(avctx->bits_per_coded_sample&7){ 00472 case 1: 00473 s->predictor= LEFT; 00474 s->decorrelate= 0; 00475 break; 00476 case 2: 00477 s->predictor= LEFT; 00478 s->decorrelate= 1; 00479 break; 00480 case 3: 00481 s->predictor= PLANE; 00482 s->decorrelate= avctx->bits_per_coded_sample >= 24; 00483 break; 00484 case 4: 00485 s->predictor= MEDIAN; 00486 s->decorrelate= 0; 00487 break; 00488 default: 00489 s->predictor= LEFT; //OLD 00490 s->decorrelate= 0; 00491 break; 00492 } 00493 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7; 00494 s->context= 0; 00495 00496 if(read_old_huffman_tables(s) < 0) 00497 return -1; 00498 } 00499 00500 switch(s->bitstream_bpp){ 00501 case 12: 00502 avctx->pix_fmt = PIX_FMT_YUV420P; 00503 break; 00504 case 16: 00505 if(s->yuy2){ 00506 avctx->pix_fmt = PIX_FMT_YUYV422; 00507 }else{ 00508 avctx->pix_fmt = PIX_FMT_YUV422P; 00509 } 00510 break; 00511 case 24: 00512 case 32: 00513 if(s->bgr32){ 00514 avctx->pix_fmt = PIX_FMT_RGB32; 00515 }else{ 00516 avctx->pix_fmt = PIX_FMT_BGR24; 00517 } 00518 break; 00519 default: 00520 assert(0); 00521 } 00522 00523 alloc_temp(s); 00524 00525 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00526 00527 return 0; 00528 } 00529 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 00530 00531 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00532 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){ 00533 int i; 00534 int index= 0; 00535 00536 for(i=0; i<256;){ 00537 int val= len[i]; 00538 int repeat=0; 00539 00540 for(; i<256 && len[i]==val && repeat<255; i++) 00541 repeat++; 00542 00543 assert(val < 32 && val >0 && repeat<256 && repeat>0); 00544 if(repeat>7){ 00545 buf[index++]= val; 00546 buf[index++]= repeat; 00547 }else{ 00548 buf[index++]= val | (repeat<<5); 00549 } 00550 } 00551 00552 return index; 00553 } 00554 00555 static av_cold int encode_init(AVCodecContext *avctx) 00556 { 00557 HYuvContext *s = avctx->priv_data; 00558 int i, j; 00559 00560 common_init(avctx); 00561 00562 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772 00563 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 00564 s->version=2; 00565 00566 avctx->coded_frame= &s->picture; 00567 00568 switch(avctx->pix_fmt){ 00569 case PIX_FMT_YUV420P: 00570 s->bitstream_bpp= 12; 00571 break; 00572 case PIX_FMT_YUV422P: 00573 s->bitstream_bpp= 16; 00574 break; 00575 case PIX_FMT_RGB32: 00576 s->bitstream_bpp= 24; 00577 break; 00578 default: 00579 av_log(avctx, AV_LOG_ERROR, "format not supported\n"); 00580 return -1; 00581 } 00582 avctx->bits_per_coded_sample= s->bitstream_bpp; 00583 s->decorrelate= s->bitstream_bpp >= 24; 00584 s->predictor= avctx->prediction_method; 00585 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; 00586 if(avctx->context_model==1){ 00587 s->context= avctx->context_model; 00588 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){ 00589 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n"); 00590 return -1; 00591 } 00592 }else s->context= 0; 00593 00594 if(avctx->codec->id==CODEC_ID_HUFFYUV){ 00595 if(avctx->pix_fmt==PIX_FMT_YUV420P){ 00596 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n"); 00597 return -1; 00598 } 00599 if(avctx->context_model){ 00600 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n"); 00601 return -1; 00602 } 00603 if(s->interlaced != ( s->height > 288 )) 00604 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); 00605 } 00606 00607 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){ 00608 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n"); 00609 return -1; 00610 } 00611 00612 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6); 00613 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp; 00614 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; 00615 if(s->context) 00616 ((uint8_t*)avctx->extradata)[2]|= 0x40; 00617 ((uint8_t*)avctx->extradata)[3]= 0; 00618 s->avctx->extradata_size= 4; 00619 00620 if(avctx->stats_in){ 00621 char *p= avctx->stats_in; 00622 00623 for(i=0; i<3; i++) 00624 for(j=0; j<256; j++) 00625 s->stats[i][j]= 1; 00626 00627 for(;;){ 00628 for(i=0; i<3; i++){ 00629 char *next; 00630 00631 for(j=0; j<256; j++){ 00632 s->stats[i][j]+= strtol(p, &next, 0); 00633 if(next==p) return -1; 00634 p=next; 00635 } 00636 } 00637 if(p[0]==0 || p[1]==0 || p[2]==0) break; 00638 } 00639 }else{ 00640 for(i=0; i<3; i++) 00641 for(j=0; j<256; j++){ 00642 int d= FFMIN(j, 256-j); 00643 00644 s->stats[i][j]= 100000000/(d+1); 00645 } 00646 } 00647 00648 for(i=0; i<3; i++){ 00649 generate_len_table(s->len[i], s->stats[i], 256); 00650 00651 if(generate_bits_table(s->bits[i], s->len[i])<0){ 00652 return -1; 00653 } 00654 00655 s->avctx->extradata_size+= 00656 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); 00657 } 00658 00659 if(s->context){ 00660 for(i=0; i<3; i++){ 00661 int pels = s->width*s->height / (i?40:10); 00662 for(j=0; j<256; j++){ 00663 int d= FFMIN(j, 256-j); 00664 s->stats[i][j]= pels/(d+1); 00665 } 00666 } 00667 }else{ 00668 for(i=0; i<3; i++) 00669 for(j=0; j<256; j++) 00670 s->stats[i][j]= 0; 00671 } 00672 00673 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 00674 00675 alloc_temp(s); 00676 00677 s->picture_number=0; 00678 00679 return 0; 00680 } 00681 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00682 00683 /* TODO instead of restarting the read when the code isn't in the first level 00684 * of the joint table, jump into the 2nd level of the individual table. */ 00685 #define READ_2PIX(dst0, dst1, plane1){\ 00686 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\ 00687 if(code != 0xffff){\ 00688 dst0 = code>>8;\ 00689 dst1 = code;\ 00690 }else{\ 00691 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\ 00692 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\ 00693 }\ 00694 } 00695 00696 static void decode_422_bitstream(HYuvContext *s, int count){ 00697 int i; 00698 00699 count/=2; 00700 00701 if(count >= (get_bits_left(&s->gb))/(31*4)){ 00702 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00703 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00704 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00705 } 00706 }else{ 00707 for(i=0; i<count; i++){ 00708 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 00709 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 00710 } 00711 } 00712 } 00713 00714 static void decode_gray_bitstream(HYuvContext *s, int count){ 00715 int i; 00716 00717 count/=2; 00718 00719 if(count >= (get_bits_left(&s->gb))/(31*2)){ 00720 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 00721 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00722 } 00723 }else{ 00724 for(i=0; i<count; i++){ 00725 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 00726 } 00727 } 00728 } 00729 00730 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 00731 static int encode_422_bitstream(HYuvContext *s, int offset, int count){ 00732 int i; 00733 const uint8_t *y = s->temp[0] + offset; 00734 const uint8_t *u = s->temp[1] + offset/2; 00735 const uint8_t *v = s->temp[2] + offset/2; 00736 00737 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){ 00738 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00739 return -1; 00740 } 00741 00742 #define LOAD4\ 00743 int y0 = y[2*i];\ 00744 int y1 = y[2*i+1];\ 00745 int u0 = u[i];\ 00746 int v0 = v[i]; 00747 00748 count/=2; 00749 if(s->flags&CODEC_FLAG_PASS1){ 00750 for(i=0; i<count; i++){ 00751 LOAD4; 00752 s->stats[0][y0]++; 00753 s->stats[1][u0]++; 00754 s->stats[0][y1]++; 00755 s->stats[2][v0]++; 00756 } 00757 } 00758 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00759 return 0; 00760 if(s->context){ 00761 for(i=0; i<count; i++){ 00762 LOAD4; 00763 s->stats[0][y0]++; 00764 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00765 s->stats[1][u0]++; 00766 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00767 s->stats[0][y1]++; 00768 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00769 s->stats[2][v0]++; 00770 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00771 } 00772 }else{ 00773 for(i=0; i<count; i++){ 00774 LOAD4; 00775 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 00776 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 00777 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00778 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 00779 } 00780 } 00781 return 0; 00782 } 00783 00784 static int encode_gray_bitstream(HYuvContext *s, int count){ 00785 int i; 00786 00787 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){ 00788 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00789 return -1; 00790 } 00791 00792 #define LOAD2\ 00793 int y0 = s->temp[0][2*i];\ 00794 int y1 = s->temp[0][2*i+1]; 00795 #define STAT2\ 00796 s->stats[0][y0]++;\ 00797 s->stats[0][y1]++; 00798 #define WRITE2\ 00799 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ 00800 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 00801 00802 count/=2; 00803 if(s->flags&CODEC_FLAG_PASS1){ 00804 for(i=0; i<count; i++){ 00805 LOAD2; 00806 STAT2; 00807 } 00808 } 00809 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 00810 return 0; 00811 00812 if(s->context){ 00813 for(i=0; i<count; i++){ 00814 LOAD2; 00815 STAT2; 00816 WRITE2; 00817 } 00818 }else{ 00819 for(i=0; i<count; i++){ 00820 LOAD2; 00821 WRITE2; 00822 } 00823 } 00824 return 0; 00825 } 00826 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 00827 00828 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){ 00829 int i; 00830 for(i=0; i<count; i++){ 00831 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1); 00832 if(code != -1){ 00833 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code]; 00834 }else if(decorrelate){ 00835 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00836 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00837 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 00838 }else{ 00839 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3); 00840 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 00841 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00842 } 00843 if(alpha) 00844 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 00845 } 00846 } 00847 00848 static void decode_bgr_bitstream(HYuvContext *s, int count){ 00849 if(s->decorrelate){ 00850 if(s->bitstream_bpp==24) 00851 decode_bgr_1(s, count, 1, 0); 00852 else 00853 decode_bgr_1(s, count, 1, 1); 00854 }else{ 00855 if(s->bitstream_bpp==24) 00856 decode_bgr_1(s, count, 0, 0); 00857 else 00858 decode_bgr_1(s, count, 0, 1); 00859 } 00860 } 00861 00862 static int encode_bgr_bitstream(HYuvContext *s, int count){ 00863 int i; 00864 00865 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){ 00866 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 00867 return -1; 00868 } 00869 00870 #define LOAD3\ 00871 int g= s->temp[0][4*i+G];\ 00872 int b= (s->temp[0][4*i+B] - g) & 0xff;\ 00873 int r= (s->temp[0][4*i+R] - g) & 0xff; 00874 #define STAT3\ 00875 s->stats[0][b]++;\ 00876 s->stats[1][g]++;\ 00877 s->stats[2][r]++; 00878 #define WRITE3\ 00879 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\ 00880 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\ 00881 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); 00882 00883 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){ 00884 for(i=0; i<count; i++){ 00885 LOAD3; 00886 STAT3; 00887 } 00888 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){ 00889 for(i=0; i<count; i++){ 00890 LOAD3; 00891 STAT3; 00892 WRITE3; 00893 } 00894 }else{ 00895 for(i=0; i<count; i++){ 00896 LOAD3; 00897 WRITE3; 00898 } 00899 } 00900 return 0; 00901 } 00902 00903 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 00904 static void draw_slice(HYuvContext *s, int y){ 00905 int h, cy; 00906 int offset[4]; 00907 00908 if(s->avctx->draw_horiz_band==NULL) 00909 return; 00910 00911 h= y - s->last_slice_end; 00912 y -= h; 00913 00914 if(s->bitstream_bpp==12){ 00915 cy= y>>1; 00916 }else{ 00917 cy= y; 00918 } 00919 00920 offset[0] = s->picture.linesize[0]*y; 00921 offset[1] = s->picture.linesize[1]*cy; 00922 offset[2] = s->picture.linesize[2]*cy; 00923 offset[3] = 0; 00924 emms_c(); 00925 00926 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); 00927 00928 s->last_slice_end= y + h; 00929 } 00930 00931 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ 00932 const uint8_t *buf = avpkt->data; 00933 int buf_size = avpkt->size; 00934 HYuvContext *s = avctx->priv_data; 00935 const int width= s->width; 00936 const int width2= s->width>>1; 00937 const int height= s->height; 00938 int fake_ystride, fake_ustride, fake_vstride; 00939 AVFrame * const p= &s->picture; 00940 int table_size= 0; 00941 00942 AVFrame *picture = data; 00943 00944 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); 00945 if (!s->bitstream_buffer) 00946 return AVERROR(ENOMEM); 00947 00948 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); 00949 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4); 00950 00951 if(p->data[0]) 00952 avctx->release_buffer(avctx, p); 00953 00954 p->reference= 0; 00955 if(avctx->get_buffer(avctx, p) < 0){ 00956 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 00957 return -1; 00958 } 00959 00960 if(s->context){ 00961 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size); 00962 if(table_size < 0) 00963 return -1; 00964 } 00965 00966 if((unsigned)(buf_size-table_size) >= INT_MAX/8) 00967 return -1; 00968 00969 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8); 00970 00971 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; 00972 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; 00973 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; 00974 00975 s->last_slice_end= 0; 00976 00977 if(s->bitstream_bpp<24){ 00978 int y, cy; 00979 int lefty, leftu, leftv; 00980 int lefttopy, lefttopu, lefttopv; 00981 00982 if(s->yuy2){ 00983 p->data[0][3]= get_bits(&s->gb, 8); 00984 p->data[0][2]= get_bits(&s->gb, 8); 00985 p->data[0][1]= get_bits(&s->gb, 8); 00986 p->data[0][0]= get_bits(&s->gb, 8); 00987 00988 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n"); 00989 return -1; 00990 }else{ 00991 00992 leftv= p->data[2][0]= get_bits(&s->gb, 8); 00993 lefty= p->data[0][1]= get_bits(&s->gb, 8); 00994 leftu= p->data[1][0]= get_bits(&s->gb, 8); 00995 p->data[0][0]= get_bits(&s->gb, 8); 00996 00997 switch(s->predictor){ 00998 case LEFT: 00999 case PLANE: 01000 decode_422_bitstream(s, width-2); 01001 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01002 if(!(s->flags&CODEC_FLAG_GRAY)){ 01003 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01004 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01005 } 01006 01007 for(cy=y=1; y<s->height; y++,cy++){ 01008 uint8_t *ydst, *udst, *vdst; 01009 01010 if(s->bitstream_bpp==12){ 01011 decode_gray_bitstream(s, width); 01012 01013 ydst= p->data[0] + p->linesize[0]*y; 01014 01015 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01016 if(s->predictor == PLANE){ 01017 if(y>s->interlaced) 01018 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01019 } 01020 y++; 01021 if(y>=s->height) break; 01022 } 01023 01024 draw_slice(s, y); 01025 01026 ydst= p->data[0] + p->linesize[0]*y; 01027 udst= p->data[1] + p->linesize[1]*cy; 01028 vdst= p->data[2] + p->linesize[2]*cy; 01029 01030 decode_422_bitstream(s, width); 01031 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty); 01032 if(!(s->flags&CODEC_FLAG_GRAY)){ 01033 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu); 01034 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv); 01035 } 01036 if(s->predictor == PLANE){ 01037 if(cy>s->interlaced){ 01038 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 01039 if(!(s->flags&CODEC_FLAG_GRAY)){ 01040 s->dsp.add_bytes(udst, udst - fake_ustride, width2); 01041 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2); 01042 } 01043 } 01044 } 01045 } 01046 draw_slice(s, height); 01047 01048 break; 01049 case MEDIAN: 01050 /* first line except first 2 pixels is left predicted */ 01051 decode_422_bitstream(s, width-2); 01052 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 01053 if(!(s->flags&CODEC_FLAG_GRAY)){ 01054 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 01055 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 01056 } 01057 01058 cy=y=1; 01059 01060 /* second line is left predicted for interlaced case */ 01061 if(s->interlaced){ 01062 decode_422_bitstream(s, width); 01063 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); 01064 if(!(s->flags&CODEC_FLAG_GRAY)){ 01065 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); 01066 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); 01067 } 01068 y++; cy++; 01069 } 01070 01071 /* next 4 pixels are left predicted too */ 01072 decode_422_bitstream(s, 4); 01073 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); 01074 if(!(s->flags&CODEC_FLAG_GRAY)){ 01075 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); 01076 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); 01077 } 01078 01079 /* next line except the first 4 pixels is median predicted */ 01080 lefttopy= p->data[0][3]; 01081 decode_422_bitstream(s, width-4); 01082 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); 01083 if(!(s->flags&CODEC_FLAG_GRAY)){ 01084 lefttopu= p->data[1][1]; 01085 lefttopv= p->data[2][1]; 01086 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); 01087 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); 01088 } 01089 y++; cy++; 01090 01091 for(; y<height; y++,cy++){ 01092 uint8_t *ydst, *udst, *vdst; 01093 01094 if(s->bitstream_bpp==12){ 01095 while(2*cy > y){ 01096 decode_gray_bitstream(s, width); 01097 ydst= p->data[0] + p->linesize[0]*y; 01098 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01099 y++; 01100 } 01101 if(y>=height) break; 01102 } 01103 draw_slice(s, y); 01104 01105 decode_422_bitstream(s, width); 01106 01107 ydst= p->data[0] + p->linesize[0]*y; 01108 udst= p->data[1] + p->linesize[1]*cy; 01109 vdst= p->data[2] + p->linesize[2]*cy; 01110 01111 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 01112 if(!(s->flags&CODEC_FLAG_GRAY)){ 01113 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); 01114 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); 01115 } 01116 } 01117 01118 draw_slice(s, height); 01119 break; 01120 } 01121 } 01122 }else{ 01123 int y; 01124 int leftr, leftg, leftb, lefta; 01125 const int last_line= (height-1)*p->linesize[0]; 01126 01127 if(s->bitstream_bpp==32){ 01128 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8); 01129 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01130 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01131 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01132 }else{ 01133 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 01134 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 01135 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 01136 lefta= p->data[0][last_line+A]= 255; 01137 skip_bits(&s->gb, 8); 01138 } 01139 01140 if(s->bgr32){ 01141 switch(s->predictor){ 01142 case LEFT: 01143 case PLANE: 01144 decode_bgr_bitstream(s, width-1); 01145 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta); 01146 01147 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down. 01148 decode_bgr_bitstream(s, width); 01149 01150 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta); 01151 if(s->predictor == PLANE){ 01152 if(s->bitstream_bpp!=32) lefta=0; 01153 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){ 01154 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, 01155 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); 01156 } 01157 } 01158 } 01159 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order 01160 break; 01161 default: 01162 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); 01163 } 01164 }else{ 01165 01166 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n"); 01167 return -1; 01168 } 01169 } 01170 emms_c(); 01171 01172 *picture= *p; 01173 *data_size = sizeof(AVFrame); 01174 01175 return (get_bits_count(&s->gb)+31)/32*4 + table_size; 01176 } 01177 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01178 01179 static int common_end(HYuvContext *s){ 01180 int i; 01181 01182 for(i=0; i<3; i++){ 01183 av_freep(&s->temp[i]); 01184 } 01185 return 0; 01186 } 01187 01188 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 01189 static av_cold int decode_end(AVCodecContext *avctx) 01190 { 01191 HYuvContext *s = avctx->priv_data; 01192 int i; 01193 01194 if (s->picture.data[0]) 01195 avctx->release_buffer(avctx, &s->picture); 01196 01197 common_end(s); 01198 av_freep(&s->bitstream_buffer); 01199 01200 for(i=0; i<6; i++){ 01201 free_vlc(&s->vlc[i]); 01202 } 01203 01204 return 0; 01205 } 01206 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 01207 01208 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 01209 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ 01210 HYuvContext *s = avctx->priv_data; 01211 AVFrame *pict = data; 01212 const int width= s->width; 01213 const int width2= s->width>>1; 01214 const int height= s->height; 01215 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; 01216 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; 01217 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; 01218 AVFrame * const p= &s->picture; 01219 int i, j, size=0; 01220 01221 *p = *pict; 01222 p->pict_type= FF_I_TYPE; 01223 p->key_frame= 1; 01224 01225 if(s->context){ 01226 for(i=0; i<3; i++){ 01227 generate_len_table(s->len[i], s->stats[i], 256); 01228 if(generate_bits_table(s->bits[i], s->len[i])<0) 01229 return -1; 01230 size+= store_table(s, s->len[i], &buf[size]); 01231 } 01232 01233 for(i=0; i<3; i++) 01234 for(j=0; j<256; j++) 01235 s->stats[i][j] >>= 1; 01236 } 01237 01238 init_put_bits(&s->pb, buf+size, buf_size-size); 01239 01240 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ 01241 int lefty, leftu, leftv, y, cy; 01242 01243 put_bits(&s->pb, 8, leftv= p->data[2][0]); 01244 put_bits(&s->pb, 8, lefty= p->data[0][1]); 01245 put_bits(&s->pb, 8, leftu= p->data[1][0]); 01246 put_bits(&s->pb, 8, p->data[0][0]); 01247 01248 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0); 01249 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); 01250 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); 01251 01252 encode_422_bitstream(s, 2, width-2); 01253 01254 if(s->predictor==MEDIAN){ 01255 int lefttopy, lefttopu, lefttopv; 01256 cy=y=1; 01257 if(s->interlaced){ 01258 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); 01259 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); 01260 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); 01261 01262 encode_422_bitstream(s, 0, width); 01263 y++; cy++; 01264 } 01265 01266 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); 01267 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu); 01268 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv); 01269 01270 encode_422_bitstream(s, 0, 4); 01271 01272 lefttopy= p->data[0][3]; 01273 lefttopu= p->data[1][1]; 01274 lefttopv= p->data[2][1]; 01275 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); 01276 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); 01277 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); 01278 encode_422_bitstream(s, 0, width-4); 01279 y++; cy++; 01280 01281 for(; y<height; y++,cy++){ 01282 uint8_t *ydst, *udst, *vdst; 01283 01284 if(s->bitstream_bpp==12){ 01285 while(2*cy > y){ 01286 ydst= p->data[0] + p->linesize[0]*y; 01287 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01288 encode_gray_bitstream(s, width); 01289 y++; 01290 } 01291 if(y>=height) break; 01292 } 01293 ydst= p->data[0] + p->linesize[0]*y; 01294 udst= p->data[1] + p->linesize[1]*cy; 01295 vdst= p->data[2] + p->linesize[2]*cy; 01296 01297 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 01298 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); 01299 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); 01300 01301 encode_422_bitstream(s, 0, width); 01302 } 01303 }else{ 01304 for(cy=y=1; y<height; y++,cy++){ 01305 uint8_t *ydst, *udst, *vdst; 01306 01307 /* encode a luma only line & y++ */ 01308 if(s->bitstream_bpp==12){ 01309 ydst= p->data[0] + p->linesize[0]*y; 01310 01311 if(s->predictor == PLANE && s->interlaced < y){ 01312 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01313 01314 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01315 }else{ 01316 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01317 } 01318 encode_gray_bitstream(s, width); 01319 y++; 01320 if(y>=height) break; 01321 } 01322 01323 ydst= p->data[0] + p->linesize[0]*y; 01324 udst= p->data[1] + p->linesize[1]*cy; 01325 vdst= p->data[2] + p->linesize[2]*cy; 01326 01327 if(s->predictor == PLANE && s->interlaced < cy){ 01328 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 01329 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); 01330 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); 01331 01332 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 01333 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); 01334 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); 01335 }else{ 01336 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 01337 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu); 01338 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv); 01339 } 01340 01341 encode_422_bitstream(s, 0, width); 01342 } 01343 } 01344 }else if(avctx->pix_fmt == PIX_FMT_RGB32){ 01345 uint8_t *data = p->data[0] + (height-1)*p->linesize[0]; 01346 const int stride = -p->linesize[0]; 01347 const int fake_stride = -fake_ystride; 01348 int y; 01349 int leftr, leftg, leftb; 01350 01351 put_bits(&s->pb, 8, leftr= data[R]); 01352 put_bits(&s->pb, 8, leftg= data[G]); 01353 put_bits(&s->pb, 8, leftb= data[B]); 01354 put_bits(&s->pb, 8, 0); 01355 01356 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb); 01357 encode_bgr_bitstream(s, width-1); 01358 01359 for(y=1; y<s->height; y++){ 01360 uint8_t *dst = data + y*stride; 01361 if(s->predictor == PLANE && s->interlaced < y){ 01362 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4); 01363 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb); 01364 }else{ 01365 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb); 01366 } 01367 encode_bgr_bitstream(s, width); 01368 } 01369 }else{ 01370 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); 01371 } 01372 emms_c(); 01373 01374 size+= (put_bits_count(&s->pb)+31)/8; 01375 put_bits(&s->pb, 16, 0); 01376 put_bits(&s->pb, 15, 0); 01377 size/= 4; 01378 01379 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){ 01380 int j; 01381 char *p= avctx->stats_out; 01382 char *end= p + 1024*30; 01383 for(i=0; i<3; i++){ 01384 for(j=0; j<256; j++){ 01385 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); 01386 p+= strlen(p); 01387 s->stats[i][j]= 0; 01388 } 01389 snprintf(p, end-p, "\n"); 01390 p++; 01391 } 01392 } else 01393 avctx->stats_out[0] = '\0'; 01394 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){ 01395 flush_put_bits(&s->pb); 01396 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); 01397 } 01398 01399 s->picture_number++; 01400 01401 return size*4; 01402 } 01403 01404 static av_cold int encode_end(AVCodecContext *avctx) 01405 { 01406 HYuvContext *s = avctx->priv_data; 01407 01408 common_end(s); 01409 01410 av_freep(&avctx->extradata); 01411 av_freep(&avctx->stats_out); 01412 01413 return 0; 01414 } 01415 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 01416 01417 #if CONFIG_HUFFYUV_DECODER 01418 AVCodec huffyuv_decoder = { 01419 "huffyuv", 01420 AVMEDIA_TYPE_VIDEO, 01421 CODEC_ID_HUFFYUV, 01422 sizeof(HYuvContext), 01423 decode_init, 01424 NULL, 01425 decode_end, 01426 decode_frame, 01427 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 01428 NULL, 01429 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01430 }; 01431 #endif 01432 01433 #if CONFIG_FFVHUFF_DECODER 01434 AVCodec ffvhuff_decoder = { 01435 "ffvhuff", 01436 AVMEDIA_TYPE_VIDEO, 01437 CODEC_ID_FFVHUFF, 01438 sizeof(HYuvContext), 01439 decode_init, 01440 NULL, 01441 decode_end, 01442 decode_frame, 01443 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 01444 NULL, 01445 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01446 }; 01447 #endif 01448 01449 #if CONFIG_HUFFYUV_ENCODER 01450 AVCodec huffyuv_encoder = { 01451 "huffyuv", 01452 AVMEDIA_TYPE_VIDEO, 01453 CODEC_ID_HUFFYUV, 01454 sizeof(HYuvContext), 01455 encode_init, 01456 encode_frame, 01457 encode_end, 01458 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01459 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 01460 }; 01461 #endif 01462 01463 #if CONFIG_FFVHUFF_ENCODER 01464 AVCodec ffvhuff_encoder = { 01465 "ffvhuff", 01466 AVMEDIA_TYPE_VIDEO, 01467 CODEC_ID_FFVHUFF, 01468 sizeof(HYuvContext), 01469 encode_init, 01470 encode_frame, 01471 encode_end, 01472 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 01473 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 01474 }; 01475 #endif