Libav 0.7.1
|
00001 00024 #include "dsputil.h" 00025 #include "vp8dsp.h" 00026 00027 // TODO: Maybe add dequant 00028 static void vp8_luma_dc_wht_c(DCTELEM block[4][4][16], DCTELEM dc[16]) 00029 { 00030 int i, t0, t1, t2, t3; 00031 00032 for (i = 0; i < 4; i++) { 00033 t0 = dc[0*4+i] + dc[3*4+i]; 00034 t1 = dc[1*4+i] + dc[2*4+i]; 00035 t2 = dc[1*4+i] - dc[2*4+i]; 00036 t3 = dc[0*4+i] - dc[3*4+i]; 00037 00038 dc[0*4+i] = t0 + t1; 00039 dc[1*4+i] = t3 + t2; 00040 dc[2*4+i] = t0 - t1; 00041 dc[3*4+i] = t3 - t2; 00042 } 00043 00044 for (i = 0; i < 4; i++) { 00045 t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding 00046 t1 = dc[i*4+1] + dc[i*4+2]; 00047 t2 = dc[i*4+1] - dc[i*4+2]; 00048 t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding 00049 dc[i*4+0] = 0; 00050 dc[i*4+1] = 0; 00051 dc[i*4+2] = 0; 00052 dc[i*4+3] = 0; 00053 00054 block[i][0][0] = (t0 + t1) >> 3; 00055 block[i][1][0] = (t3 + t2) >> 3; 00056 block[i][2][0] = (t0 - t1) >> 3; 00057 block[i][3][0] = (t3 - t2) >> 3; 00058 } 00059 } 00060 00061 static void vp8_luma_dc_wht_dc_c(DCTELEM block[4][4][16], DCTELEM dc[16]) 00062 { 00063 int i, val = (dc[0] + 3) >> 3; 00064 dc[0] = 0; 00065 00066 for (i = 0; i < 4; i++) { 00067 block[i][0][0] = val; 00068 block[i][1][0] = val; 00069 block[i][2][0] = val; 00070 block[i][3][0] = val; 00071 } 00072 } 00073 00074 #define MUL_20091(a) ((((a)*20091) >> 16) + (a)) 00075 #define MUL_35468(a) (((a)*35468) >> 16) 00076 00077 static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], int stride) 00078 { 00079 int i, t0, t1, t2, t3; 00080 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; 00081 DCTELEM tmp[16]; 00082 00083 for (i = 0; i < 4; i++) { 00084 t0 = block[0*4+i] + block[2*4+i]; 00085 t1 = block[0*4+i] - block[2*4+i]; 00086 t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]); 00087 t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]); 00088 block[0*4+i] = 0; 00089 block[1*4+i] = 0; 00090 block[2*4+i] = 0; 00091 block[3*4+i] = 0; 00092 00093 tmp[i*4+0] = t0 + t3; 00094 tmp[i*4+1] = t1 + t2; 00095 tmp[i*4+2] = t1 - t2; 00096 tmp[i*4+3] = t0 - t3; 00097 } 00098 00099 for (i = 0; i < 4; i++) { 00100 t0 = tmp[0*4+i] + tmp[2*4+i]; 00101 t1 = tmp[0*4+i] - tmp[2*4+i]; 00102 t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]); 00103 t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]); 00104 00105 dst[0] = cm[dst[0] + ((t0 + t3 + 4) >> 3)]; 00106 dst[1] = cm[dst[1] + ((t1 + t2 + 4) >> 3)]; 00107 dst[2] = cm[dst[2] + ((t1 - t2 + 4) >> 3)]; 00108 dst[3] = cm[dst[3] + ((t0 - t3 + 4) >> 3)]; 00109 dst += stride; 00110 } 00111 } 00112 00113 static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], int stride) 00114 { 00115 int i, dc = (block[0] + 4) >> 3; 00116 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc; 00117 block[0] = 0; 00118 00119 for (i = 0; i < 4; i++) { 00120 dst[0] = cm[dst[0]]; 00121 dst[1] = cm[dst[1]]; 00122 dst[2] = cm[dst[2]]; 00123 dst[3] = cm[dst[3]]; 00124 dst += stride; 00125 } 00126 } 00127 00128 static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], int stride) 00129 { 00130 vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride); 00131 vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride); 00132 vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride); 00133 vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride); 00134 } 00135 00136 static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], int stride) 00137 { 00138 vp8_idct_dc_add_c(dst+ 0, block[0], stride); 00139 vp8_idct_dc_add_c(dst+ 4, block[1], stride); 00140 vp8_idct_dc_add_c(dst+ 8, block[2], stride); 00141 vp8_idct_dc_add_c(dst+12, block[3], stride); 00142 } 00143 00144 // because I like only having two parameters to pass functions... 00145 #define LOAD_PIXELS\ 00146 int av_unused p3 = p[-4*stride];\ 00147 int av_unused p2 = p[-3*stride];\ 00148 int av_unused p1 = p[-2*stride];\ 00149 int av_unused p0 = p[-1*stride];\ 00150 int av_unused q0 = p[ 0*stride];\ 00151 int av_unused q1 = p[ 1*stride];\ 00152 int av_unused q2 = p[ 2*stride];\ 00153 int av_unused q3 = p[ 3*stride]; 00154 00155 #define clip_int8(n) (cm[n+0x80]-0x80) 00156 00157 static av_always_inline void filter_common(uint8_t *p, int stride, int is4tap) 00158 { 00159 LOAD_PIXELS 00160 int a, f1, f2; 00161 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; 00162 00163 a = 3*(q0 - p0); 00164 00165 if (is4tap) 00166 a += clip_int8(p1 - q1); 00167 00168 a = clip_int8(a); 00169 00170 // We deviate from the spec here with c(a+3) >> 3 00171 // since that's what libvpx does. 00172 f1 = FFMIN(a+4, 127) >> 3; 00173 f2 = FFMIN(a+3, 127) >> 3; 00174 00175 // Despite what the spec says, we do need to clamp here to 00176 // be bitexact with libvpx. 00177 p[-1*stride] = cm[p0 + f2]; 00178 p[ 0*stride] = cm[q0 - f1]; 00179 00180 // only used for _inner on blocks without high edge variance 00181 if (!is4tap) { 00182 a = (f1+1)>>1; 00183 p[-2*stride] = cm[p1 + a]; 00184 p[ 1*stride] = cm[q1 - a]; 00185 } 00186 } 00187 00188 static av_always_inline int simple_limit(uint8_t *p, int stride, int flim) 00189 { 00190 LOAD_PIXELS 00191 return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim; 00192 } 00193 00198 static av_always_inline int normal_limit(uint8_t *p, int stride, int E, int I) 00199 { 00200 LOAD_PIXELS 00201 return simple_limit(p, stride, E) 00202 && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I 00203 && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I; 00204 } 00205 00206 // high edge variance 00207 static av_always_inline int hev(uint8_t *p, int stride, int thresh) 00208 { 00209 LOAD_PIXELS 00210 return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh; 00211 } 00212 00213 static av_always_inline void filter_mbedge(uint8_t *p, int stride) 00214 { 00215 int a0, a1, a2, w; 00216 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; 00217 00218 LOAD_PIXELS 00219 00220 w = clip_int8(p1-q1); 00221 w = clip_int8(w + 3*(q0-p0)); 00222 00223 a0 = (27*w + 63) >> 7; 00224 a1 = (18*w + 63) >> 7; 00225 a2 = ( 9*w + 63) >> 7; 00226 00227 p[-3*stride] = cm[p2 + a2]; 00228 p[-2*stride] = cm[p1 + a1]; 00229 p[-1*stride] = cm[p0 + a0]; 00230 p[ 0*stride] = cm[q0 - a0]; 00231 p[ 1*stride] = cm[q1 - a1]; 00232 p[ 2*stride] = cm[q2 - a2]; 00233 } 00234 00235 #define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \ 00236 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, int stride,\ 00237 int flim_E, int flim_I, int hev_thresh)\ 00238 {\ 00239 int i;\ 00240 \ 00241 for (i = 0; i < size; i++)\ 00242 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\ 00243 if (hev(dst+i*stridea, strideb, hev_thresh))\ 00244 filter_common(dst+i*stridea, strideb, 1);\ 00245 else\ 00246 filter_mbedge(dst+i*stridea, strideb);\ 00247 }\ 00248 }\ 00249 \ 00250 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, int stride,\ 00251 int flim_E, int flim_I, int hev_thresh)\ 00252 {\ 00253 int i;\ 00254 \ 00255 for (i = 0; i < size; i++)\ 00256 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\ 00257 int hv = hev(dst+i*stridea, strideb, hev_thresh);\ 00258 if (hv) \ 00259 filter_common(dst+i*stridea, strideb, 1);\ 00260 else \ 00261 filter_common(dst+i*stridea, strideb, 0);\ 00262 }\ 00263 } 00264 00265 LOOP_FILTER(v, 16, 1, stride,) 00266 LOOP_FILTER(h, 16, stride, 1,) 00267 00268 #define UV_LOOP_FILTER(dir, stridea, strideb) \ 00269 LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \ 00270 static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, int stride,\ 00271 int fE, int fI, int hev_thresh)\ 00272 {\ 00273 vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\ 00274 vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\ 00275 }\ 00276 static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, int stride,\ 00277 int fE, int fI, int hev_thresh)\ 00278 {\ 00279 vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\ 00280 vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\ 00281 } 00282 00283 UV_LOOP_FILTER(v, 1, stride) 00284 UV_LOOP_FILTER(h, stride, 1) 00285 00286 static void vp8_v_loop_filter_simple_c(uint8_t *dst, int stride, int flim) 00287 { 00288 int i; 00289 00290 for (i = 0; i < 16; i++) 00291 if (simple_limit(dst+i, stride, flim)) 00292 filter_common(dst+i, stride, 1); 00293 } 00294 00295 static void vp8_h_loop_filter_simple_c(uint8_t *dst, int stride, int flim) 00296 { 00297 int i; 00298 00299 for (i = 0; i < 16; i++) 00300 if (simple_limit(dst+i*stride, 1, flim)) 00301 filter_common(dst+i*stride, 1, 1); 00302 } 00303 00304 static const uint8_t subpel_filters[7][6] = { 00305 { 0, 6, 123, 12, 1, 0 }, 00306 { 2, 11, 108, 36, 8, 1 }, 00307 { 0, 9, 93, 50, 6, 0 }, 00308 { 3, 16, 77, 77, 16, 3 }, 00309 { 0, 6, 50, 93, 9, 0 }, 00310 { 1, 8, 36, 108, 11, 2 }, 00311 { 0, 1, 12, 123, 6, 0 }, 00312 }; 00313 00314 #define PUT_PIXELS(WIDTH) \ 00315 static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int x, int y) { \ 00316 int i; \ 00317 for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \ 00318 memcpy(dst, src, WIDTH); \ 00319 } \ 00320 } 00321 00322 PUT_PIXELS(16) 00323 PUT_PIXELS(8) 00324 PUT_PIXELS(4) 00325 00326 #define FILTER_6TAP(src, F, stride) \ 00327 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \ 00328 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7] 00329 00330 #define FILTER_4TAP(src, F, stride) \ 00331 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \ 00332 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7] 00333 00334 #define VP8_EPEL_H(SIZE, TAPS) \ 00335 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \ 00336 { \ 00337 const uint8_t *filter = subpel_filters[mx-1]; \ 00338 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \ 00339 int x, y; \ 00340 \ 00341 for (y = 0; y < h; y++) { \ 00342 for (x = 0; x < SIZE; x++) \ 00343 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \ 00344 dst += dststride; \ 00345 src += srcstride; \ 00346 } \ 00347 } 00348 #define VP8_EPEL_V(SIZE, TAPS) \ 00349 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \ 00350 { \ 00351 const uint8_t *filter = subpel_filters[my-1]; \ 00352 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \ 00353 int x, y; \ 00354 \ 00355 for (y = 0; y < h; y++) { \ 00356 for (x = 0; x < SIZE; x++) \ 00357 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \ 00358 dst += dststride; \ 00359 src += srcstride; \ 00360 } \ 00361 } 00362 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \ 00363 static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \ 00364 { \ 00365 const uint8_t *filter = subpel_filters[mx-1]; \ 00366 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \ 00367 int x, y; \ 00368 uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \ 00369 uint8_t *tmp = tmp_array; \ 00370 src -= (2-(VTAPS==4))*srcstride; \ 00371 \ 00372 for (y = 0; y < h+VTAPS-1; y++) { \ 00373 for (x = 0; x < SIZE; x++) \ 00374 tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \ 00375 tmp += SIZE; \ 00376 src += srcstride; \ 00377 } \ 00378 \ 00379 tmp = tmp_array + (2-(VTAPS==4))*SIZE; \ 00380 filter = subpel_filters[my-1]; \ 00381 \ 00382 for (y = 0; y < h; y++) { \ 00383 for (x = 0; x < SIZE; x++) \ 00384 dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \ 00385 dst += dststride; \ 00386 tmp += SIZE; \ 00387 } \ 00388 } 00389 00390 VP8_EPEL_H(16, 4) 00391 VP8_EPEL_H(8, 4) 00392 VP8_EPEL_H(4, 4) 00393 VP8_EPEL_H(16, 6) 00394 VP8_EPEL_H(8, 6) 00395 VP8_EPEL_H(4, 6) 00396 VP8_EPEL_V(16, 4) 00397 VP8_EPEL_V(8, 4) 00398 VP8_EPEL_V(4, 4) 00399 VP8_EPEL_V(16, 6) 00400 VP8_EPEL_V(8, 6) 00401 VP8_EPEL_V(4, 6) 00402 VP8_EPEL_HV(16, 4, 4) 00403 VP8_EPEL_HV(8, 4, 4) 00404 VP8_EPEL_HV(4, 4, 4) 00405 VP8_EPEL_HV(16, 4, 6) 00406 VP8_EPEL_HV(8, 4, 6) 00407 VP8_EPEL_HV(4, 4, 6) 00408 VP8_EPEL_HV(16, 6, 4) 00409 VP8_EPEL_HV(8, 6, 4) 00410 VP8_EPEL_HV(4, 6, 4) 00411 VP8_EPEL_HV(16, 6, 6) 00412 VP8_EPEL_HV(8, 6, 6) 00413 VP8_EPEL_HV(4, 6, 6) 00414 00415 #define VP8_BILINEAR(SIZE) \ 00416 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \ 00417 { \ 00418 int a = 8-mx, b = mx; \ 00419 int x, y; \ 00420 \ 00421 for (y = 0; y < h; y++) { \ 00422 for (x = 0; x < SIZE; x++) \ 00423 dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \ 00424 dst += stride; \ 00425 src += stride; \ 00426 } \ 00427 } \ 00428 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \ 00429 { \ 00430 int c = 8-my, d = my; \ 00431 int x, y; \ 00432 \ 00433 for (y = 0; y < h; y++) { \ 00434 for (x = 0; x < SIZE; x++) \ 00435 dst[x] = (c*src[x] + d*src[x+stride] + 4) >> 3; \ 00436 dst += stride; \ 00437 src += stride; \ 00438 } \ 00439 } \ 00440 \ 00441 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \ 00442 { \ 00443 int a = 8-mx, b = mx; \ 00444 int c = 8-my, d = my; \ 00445 int x, y; \ 00446 uint8_t tmp_array[(2*SIZE+1)*SIZE]; \ 00447 uint8_t *tmp = tmp_array; \ 00448 \ 00449 for (y = 0; y < h+1; y++) { \ 00450 for (x = 0; x < SIZE; x++) \ 00451 tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \ 00452 tmp += SIZE; \ 00453 src += stride; \ 00454 } \ 00455 \ 00456 tmp = tmp_array; \ 00457 \ 00458 for (y = 0; y < h; y++) { \ 00459 for (x = 0; x < SIZE; x++) \ 00460 dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \ 00461 dst += stride; \ 00462 tmp += SIZE; \ 00463 } \ 00464 } 00465 00466 VP8_BILINEAR(16) 00467 VP8_BILINEAR(8) 00468 VP8_BILINEAR(4) 00469 00470 #define VP8_MC_FUNC(IDX, SIZE) \ 00471 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ 00472 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \ 00473 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \ 00474 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \ 00475 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \ 00476 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \ 00477 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \ 00478 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \ 00479 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c 00480 00481 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \ 00482 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ 00483 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \ 00484 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \ 00485 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \ 00486 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \ 00487 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \ 00488 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \ 00489 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \ 00490 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c 00491 00492 av_cold void ff_vp8dsp_init(VP8DSPContext *dsp) 00493 { 00494 dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c; 00495 dsp->vp8_luma_dc_wht_dc = vp8_luma_dc_wht_dc_c; 00496 dsp->vp8_idct_add = vp8_idct_add_c; 00497 dsp->vp8_idct_dc_add = vp8_idct_dc_add_c; 00498 dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c; 00499 dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c; 00500 00501 dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c; 00502 dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c; 00503 dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c; 00504 dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c; 00505 00506 dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c; 00507 dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c; 00508 dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c; 00509 dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c; 00510 00511 dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c; 00512 dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c; 00513 00514 VP8_MC_FUNC(0, 16); 00515 VP8_MC_FUNC(1, 8); 00516 VP8_MC_FUNC(2, 4); 00517 00518 VP8_BILINEAR_MC_FUNC(0, 16); 00519 VP8_BILINEAR_MC_FUNC(1, 8); 00520 VP8_BILINEAR_MC_FUNC(2, 4); 00521 00522 if (HAVE_MMX) 00523 ff_vp8dsp_init_x86(dsp); 00524 if (HAVE_ALTIVEC) 00525 ff_vp8dsp_init_altivec(dsp); 00526 if (ARCH_ARM) 00527 ff_vp8dsp_init_arm(dsp); 00528 }