Libav
|
00001 /* 00002 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. 00003 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> 00004 * 00005 * MMX-optimized DSP functions, based on H.264 optimizations by 00006 * Michael Niedermayer and Loren Merritt 00007 * 00008 * This file is part of FFmpeg. 00009 * 00010 * FFmpeg is free software; you can redistribute it and/or 00011 * modify it under the terms of the GNU Lesser General Public 00012 * License as published by the Free Software Foundation; either 00013 * version 2.1 of the License, or (at your option) any later version. 00014 * 00015 * FFmpeg is distributed in the hope that it will be useful, 00016 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00017 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00018 * Lesser General Public License for more details. 00019 * 00020 * You should have received a copy of the GNU Lesser General Public 00021 * License along with FFmpeg; if not, write to the Free Software 00022 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00023 */ 00024 00025 #include "libavutil/common.h" 00026 #include "libavutil/x86_cpu.h" 00027 #include "libavcodec/dsputil.h" 00028 #include "dsputil_mmx.h" 00029 00030 /***************************************************************************** 00031 * 00032 * inverse transform 00033 * 00034 ****************************************************************************/ 00035 00036 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) 00037 { 00038 __asm__ volatile( 00039 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ 00040 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ 00041 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ 00042 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */ 00043 "movq %%mm4, %%mm0 \n\t" 00044 "movq %%mm5, %%mm3 \n\t" 00045 "movq %%mm2, %%mm6 \n\t" 00046 "movq %%mm7, %%mm1 \n\t" 00047 00048 "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */ 00049 "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */ 00050 "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */ 00051 "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */ 00052 "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */ 00053 "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */ 00054 "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */ 00055 "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */ 00056 "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */ 00057 "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */ 00058 "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */ 00059 "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */ 00060 00061 "movq %%mm5, %%mm4 \n\t" 00062 "movq %%mm7, %%mm6 \n\t" 00063 "movq %%mm3, %%mm0 \n\t" 00064 "movq %%mm1, %%mm2 \n\t" 00065 SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */ 00066 "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */ 00067 "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */ 00068 "paddw %%mm7, %%mm7 \n\t" 00069 "paddw %%mm5, %%mm5 \n\t" 00070 "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */ 00071 "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */ 00072 00073 SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */ 00074 "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */ 00075 "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */ 00076 "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */ 00077 "paddw %%mm1, %%mm1 \n\t" 00078 "paddw %%mm3, %%mm3 \n\t" 00079 "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */ 00080 "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */ 00081 00082 "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */ 00083 "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */ 00084 "movq %%mm2, %%mm4 \n\t" 00085 "movq %%mm6, %%mm0 \n\t" 00086 "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */ 00087 "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */ 00088 "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */ 00089 "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */ 00090 "paddw %%mm2, %%mm2 \n\t" 00091 "paddw %%mm0, %%mm0 \n\t" 00092 "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */ 00093 "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */ 00094 00095 "movq (%0), %%mm2 \n\t" /* mm2 = src0 */ 00096 "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */ 00097 SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */ 00098 "psllw $3, %%mm0 \n\t" 00099 "psllw $3, %%mm2 \n\t" 00100 "paddw %1, %%mm0 \n\t" /* add rounding bias */ 00101 "paddw %1, %%mm2 \n\t" /* add rounding bias */ 00102 00103 SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */ 00104 SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */ 00105 SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */ 00106 SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */ 00107 SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */ 00108 SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */ 00109 :: "r"(block), "m"(bias) 00110 ); 00111 } 00112 00113 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) 00114 { 00115 int i; 00116 DECLARE_ALIGNED(8, int16_t, b2)[64]; 00117 00118 for(i=0; i<2; i++){ 00119 DECLARE_ALIGNED(8, uint64_t, tmp); 00120 00121 cavs_idct8_1d(block+4*i, ff_pw_4); 00122 00123 __asm__ volatile( 00124 "psraw $3, %%mm7 \n\t" 00125 "psraw $3, %%mm6 \n\t" 00126 "psraw $3, %%mm5 \n\t" 00127 "psraw $3, %%mm4 \n\t" 00128 "psraw $3, %%mm3 \n\t" 00129 "psraw $3, %%mm2 \n\t" 00130 "psraw $3, %%mm1 \n\t" 00131 "psraw $3, %%mm0 \n\t" 00132 "movq %%mm7, %0 \n\t" 00133 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) 00134 "movq %%mm0, 8(%1) \n\t" 00135 "movq %%mm6, 24(%1) \n\t" 00136 "movq %%mm7, 40(%1) \n\t" 00137 "movq %%mm4, 56(%1) \n\t" 00138 "movq %0, %%mm7 \n\t" 00139 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) 00140 "movq %%mm7, (%1) \n\t" 00141 "movq %%mm1, 16(%1) \n\t" 00142 "movq %%mm0, 32(%1) \n\t" 00143 "movq %%mm3, 48(%1) \n\t" 00144 : "=m"(tmp) 00145 : "r"(b2+32*i) 00146 : "memory" 00147 ); 00148 } 00149 00150 for(i=0; i<2; i++){ 00151 cavs_idct8_1d(b2+4*i, ff_pw_64.a); 00152 00153 __asm__ volatile( 00154 "psraw $7, %%mm7 \n\t" 00155 "psraw $7, %%mm6 \n\t" 00156 "psraw $7, %%mm5 \n\t" 00157 "psraw $7, %%mm4 \n\t" 00158 "psraw $7, %%mm3 \n\t" 00159 "psraw $7, %%mm2 \n\t" 00160 "psraw $7, %%mm1 \n\t" 00161 "psraw $7, %%mm0 \n\t" 00162 "movq %%mm7, (%0) \n\t" 00163 "movq %%mm5, 16(%0) \n\t" 00164 "movq %%mm3, 32(%0) \n\t" 00165 "movq %%mm1, 48(%0) \n\t" 00166 "movq %%mm0, 64(%0) \n\t" 00167 "movq %%mm2, 80(%0) \n\t" 00168 "movq %%mm4, 96(%0) \n\t" 00169 "movq %%mm6, 112(%0) \n\t" 00170 :: "r"(b2+4*i) 00171 : "memory" 00172 ); 00173 } 00174 00175 add_pixels_clamped_mmx(b2, dst, stride); 00176 } 00177 00178 /***************************************************************************** 00179 * 00180 * motion compensation 00181 * 00182 ****************************************************************************/ 00183 00184 /* vertical filter [-1 -2 96 42 -7 0] */ 00185 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \ 00186 "movd (%0), "#F" \n\t"\ 00187 "movq "#C", %%mm6 \n\t"\ 00188 "pmullw %5, %%mm6 \n\t"\ 00189 "movq "#D", %%mm7 \n\t"\ 00190 "pmullw "MANGLE(MUL2)", %%mm7\n\t"\ 00191 "psllw $3, "#E" \n\t"\ 00192 "psubw "#E", %%mm6 \n\t"\ 00193 "psraw $3, "#E" \n\t"\ 00194 "paddw %%mm7, %%mm6 \n\t"\ 00195 "paddw "#E", %%mm6 \n\t"\ 00196 "paddw "#B", "#B" \n\t"\ 00197 "pxor %%mm7, %%mm7 \n\t"\ 00198 "add %2, %0 \n\t"\ 00199 "punpcklbw %%mm7, "#F" \n\t"\ 00200 "psubw "#B", %%mm6 \n\t"\ 00201 "psraw $1, "#B" \n\t"\ 00202 "psubw "#A", %%mm6 \n\t"\ 00203 "paddw %4, %%mm6 \n\t"\ 00204 "psraw $7, %%mm6 \n\t"\ 00205 "packuswb %%mm6, %%mm6 \n\t"\ 00206 OP(%%mm6, (%1), A, d) \ 00207 "add %3, %1 \n\t" 00208 00209 /* vertical filter [ 0 -1 5 5 -1 0] */ 00210 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \ 00211 "movd (%0), "#F" \n\t"\ 00212 "movq "#C", %%mm6 \n\t"\ 00213 "paddw "#D", %%mm6 \n\t"\ 00214 "pmullw %5, %%mm6 \n\t"\ 00215 "add %2, %0 \n\t"\ 00216 "punpcklbw %%mm7, "#F" \n\t"\ 00217 "psubw "#B", %%mm6 \n\t"\ 00218 "psubw "#E", %%mm6 \n\t"\ 00219 "paddw %4, %%mm6 \n\t"\ 00220 "psraw $3, %%mm6 \n\t"\ 00221 "packuswb %%mm6, %%mm6 \n\t"\ 00222 OP(%%mm6, (%1), A, d) \ 00223 "add %3, %1 \n\t" 00224 00225 /* vertical filter [ 0 -7 42 96 -2 -1] */ 00226 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \ 00227 "movd (%0), "#F" \n\t"\ 00228 "movq "#C", %%mm6 \n\t"\ 00229 "pmullw "MANGLE(MUL2)", %%mm6\n\t"\ 00230 "movq "#D", %%mm7 \n\t"\ 00231 "pmullw %5, %%mm7 \n\t"\ 00232 "psllw $3, "#B" \n\t"\ 00233 "psubw "#B", %%mm6 \n\t"\ 00234 "psraw $3, "#B" \n\t"\ 00235 "paddw %%mm7, %%mm6 \n\t"\ 00236 "paddw "#B", %%mm6 \n\t"\ 00237 "paddw "#E", "#E" \n\t"\ 00238 "pxor %%mm7, %%mm7 \n\t"\ 00239 "add %2, %0 \n\t"\ 00240 "punpcklbw %%mm7, "#F" \n\t"\ 00241 "psubw "#E", %%mm6 \n\t"\ 00242 "psraw $1, "#E" \n\t"\ 00243 "psubw "#F", %%mm6 \n\t"\ 00244 "paddw %4, %%mm6 \n\t"\ 00245 "psraw $7, %%mm6 \n\t"\ 00246 "packuswb %%mm6, %%mm6 \n\t"\ 00247 OP(%%mm6, (%1), A, d) \ 00248 "add %3, %1 \n\t" 00249 00250 00251 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\ 00252 int w= 2;\ 00253 src -= 2*srcStride;\ 00254 \ 00255 while(w--){\ 00256 __asm__ volatile(\ 00257 "pxor %%mm7, %%mm7 \n\t"\ 00258 "movd (%0), %%mm0 \n\t"\ 00259 "add %2, %0 \n\t"\ 00260 "movd (%0), %%mm1 \n\t"\ 00261 "add %2, %0 \n\t"\ 00262 "movd (%0), %%mm2 \n\t"\ 00263 "add %2, %0 \n\t"\ 00264 "movd (%0), %%mm3 \n\t"\ 00265 "add %2, %0 \n\t"\ 00266 "movd (%0), %%mm4 \n\t"\ 00267 "add %2, %0 \n\t"\ 00268 "punpcklbw %%mm7, %%mm0 \n\t"\ 00269 "punpcklbw %%mm7, %%mm1 \n\t"\ 00270 "punpcklbw %%mm7, %%mm2 \n\t"\ 00271 "punpcklbw %%mm7, %%mm3 \n\t"\ 00272 "punpcklbw %%mm7, %%mm4 \n\t"\ 00273 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ 00274 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ 00275 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ 00276 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ 00277 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ 00278 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ 00279 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ 00280 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ 00281 \ 00282 : "+a"(src), "+c"(dst)\ 00283 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ 00284 : "memory"\ 00285 );\ 00286 if(h==16){\ 00287 __asm__ volatile(\ 00288 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ 00289 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ 00290 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ 00291 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ 00292 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ 00293 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ 00294 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ 00295 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ 00296 \ 00297 : "+a"(src), "+c"(dst)\ 00298 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ 00299 : "memory"\ 00300 );\ 00301 }\ 00302 src += 4-(h+5)*srcStride;\ 00303 dst += 4-h*dstStride;\ 00304 } 00305 00306 #define QPEL_CAVS(OPNAME, OP, MMX)\ 00307 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00308 int h=8;\ 00309 __asm__ volatile(\ 00310 "pxor %%mm7, %%mm7 \n\t"\ 00311 "movq %5, %%mm6 \n\t"\ 00312 "1: \n\t"\ 00313 "movq (%0), %%mm0 \n\t"\ 00314 "movq 1(%0), %%mm2 \n\t"\ 00315 "movq %%mm0, %%mm1 \n\t"\ 00316 "movq %%mm2, %%mm3 \n\t"\ 00317 "punpcklbw %%mm7, %%mm0 \n\t"\ 00318 "punpckhbw %%mm7, %%mm1 \n\t"\ 00319 "punpcklbw %%mm7, %%mm2 \n\t"\ 00320 "punpckhbw %%mm7, %%mm3 \n\t"\ 00321 "paddw %%mm2, %%mm0 \n\t"\ 00322 "paddw %%mm3, %%mm1 \n\t"\ 00323 "pmullw %%mm6, %%mm0 \n\t"\ 00324 "pmullw %%mm6, %%mm1 \n\t"\ 00325 "movq -1(%0), %%mm2 \n\t"\ 00326 "movq 2(%0), %%mm4 \n\t"\ 00327 "movq %%mm2, %%mm3 \n\t"\ 00328 "movq %%mm4, %%mm5 \n\t"\ 00329 "punpcklbw %%mm7, %%mm2 \n\t"\ 00330 "punpckhbw %%mm7, %%mm3 \n\t"\ 00331 "punpcklbw %%mm7, %%mm4 \n\t"\ 00332 "punpckhbw %%mm7, %%mm5 \n\t"\ 00333 "paddw %%mm4, %%mm2 \n\t"\ 00334 "paddw %%mm3, %%mm5 \n\t"\ 00335 "psubw %%mm2, %%mm0 \n\t"\ 00336 "psubw %%mm5, %%mm1 \n\t"\ 00337 "movq %6, %%mm5 \n\t"\ 00338 "paddw %%mm5, %%mm0 \n\t"\ 00339 "paddw %%mm5, %%mm1 \n\t"\ 00340 "psraw $3, %%mm0 \n\t"\ 00341 "psraw $3, %%mm1 \n\t"\ 00342 "packuswb %%mm1, %%mm0 \n\t"\ 00343 OP(%%mm0, (%1),%%mm5, q) \ 00344 "add %3, %0 \n\t"\ 00345 "add %4, %1 \n\t"\ 00346 "decl %2 \n\t"\ 00347 " jnz 1b \n\t"\ 00348 : "+a"(src), "+c"(dst), "+m"(h)\ 00349 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\ 00350 : "memory"\ 00351 );\ 00352 }\ 00353 \ 00354 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ 00355 QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ 00356 }\ 00357 \ 00358 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ 00359 QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \ 00360 }\ 00361 \ 00362 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ 00363 QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ 00364 }\ 00365 \ 00366 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00367 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\ 00368 }\ 00369 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00370 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\ 00371 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ 00372 }\ 00373 \ 00374 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00375 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\ 00376 }\ 00377 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00378 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\ 00379 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ 00380 }\ 00381 \ 00382 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00383 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\ 00384 }\ 00385 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00386 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\ 00387 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ 00388 }\ 00389 \ 00390 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ 00391 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ 00392 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ 00393 src += 8*srcStride;\ 00394 dst += 8*dstStride;\ 00395 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ 00396 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ 00397 }\ 00398 00399 #define CAVS_MC(OPNAME, SIZE, MMX) \ 00400 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ 00401 OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\ 00402 }\ 00403 \ 00404 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ 00405 OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\ 00406 }\ 00407 \ 00408 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ 00409 OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\ 00410 }\ 00411 \ 00412 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ 00413 OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\ 00414 }\ 00415 00416 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" 00417 #define AVG_3DNOW_OP(a,b,temp, size) \ 00418 "mov" #size " " #b ", " #temp " \n\t"\ 00419 "pavgusb " #temp ", " #a " \n\t"\ 00420 "mov" #size " " #a ", " #b " \n\t" 00421 #define AVG_MMX2_OP(a,b,temp, size) \ 00422 "mov" #size " " #b ", " #temp " \n\t"\ 00423 "pavgb " #temp ", " #a " \n\t"\ 00424 "mov" #size " " #a ", " #b " \n\t" 00425 00426 QPEL_CAVS(put_, PUT_OP, 3dnow) 00427 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow) 00428 QPEL_CAVS(put_, PUT_OP, mmx2) 00429 QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2) 00430 00431 CAVS_MC(put_, 8, 3dnow) 00432 CAVS_MC(put_, 16,3dnow) 00433 CAVS_MC(avg_, 8, 3dnow) 00434 CAVS_MC(avg_, 16,3dnow) 00435 CAVS_MC(put_, 8, mmx2) 00436 CAVS_MC(put_, 16,mmx2) 00437 CAVS_MC(avg_, 8, mmx2) 00438 CAVS_MC(avg_, 16,mmx2) 00439 00440 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx) { 00441 #define dspfunc(PFX, IDX, NUM) \ 00442 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ 00443 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \ 00444 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \ 00445 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \ 00446 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \ 00447 00448 dspfunc(put_cavs_qpel, 0, 16); 00449 dspfunc(put_cavs_qpel, 1, 8); 00450 dspfunc(avg_cavs_qpel, 0, 16); 00451 dspfunc(avg_cavs_qpel, 1, 8); 00452 #undef dspfunc 00453 c->cavs_idct8_add = cavs_idct8_add_mmx; 00454 } 00455 00456 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx) { 00457 #define dspfunc(PFX, IDX, NUM) \ 00458 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ 00459 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \ 00460 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \ 00461 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \ 00462 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \ 00463 00464 dspfunc(put_cavs_qpel, 0, 16); 00465 dspfunc(put_cavs_qpel, 1, 8); 00466 dspfunc(avg_cavs_qpel, 0, 16); 00467 dspfunc(avg_cavs_qpel, 1, 8); 00468 #undef dspfunc 00469 c->cavs_idct8_add = cavs_idct8_add_mmx; 00470 }