• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/snow_altivec.c

Go to the documentation of this file.
00001 /*
00002  * AltiVec-optimized snow DSP utils
00003  * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
00004  *
00005  * This file is part of FFmpeg.
00006  *
00007  * FFmpeg is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU Lesser General Public
00009  * License as published by the Free Software Foundation; either
00010  * version 2.1 of the License, or (at your option) any later version.
00011  *
00012  * FFmpeg is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Lesser General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Lesser General Public
00018  * License along with FFmpeg; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020  */
00021 
00022 #include "libavcodec/dsputil.h"
00023 #include "libavcodec/snow.h"
00024 
00025 #include "gcc_fixes.h"
00026 #include "dsputil_altivec.h"
00027 
00028 #undef NDEBUG
00029 #include <assert.h>
00030 
00031 
00032 
00033 //FIXME remove this replication
00034 #define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
00035 
00036 static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
00037 {
00038     int offset;
00039     DWTELEM * buffer;
00040 
00041 //  av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
00042 
00043     assert(buf->data_stack_top >= 0);
00044 //  assert(!buf->line[line]);
00045     if (buf->line[line])
00046         return buf->line[line];
00047 
00048     offset = buf->line_width * line;
00049     buffer = buf->data_stack[buf->data_stack_top];
00050     buf->data_stack_top--;
00051     buf->line[line] = buffer;
00052 
00053 //  av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
00054 
00055     return buffer;
00056 }
00057 
00058 
00059 //altivec code
00060 
00061 void ff_snow_horizontal_compose97i_altivec(IDWTELEM *b, int width)
00062 {
00063 #if 0
00064     const int w2= (width+1)>>1;
00065     DECLARE_ALIGNED_16(IDWTELEM, temp[(width>>1)]);
00066     const int w_l= (width>>1);
00067     const int w_r= w2 - 1;
00068     int i;
00069     vector signed short t1, t2, x, y, tmp1, tmp2;
00070     vector signed short *vbuf, *vtmp;
00071     vector unsigned char align;
00072 
00073     { // Lift 0
00074         IDWTELEM * const ref = b + w2 - 1;
00075         IDWTELEM b_0 = b[0];
00076         vector signed short v7 = vec_splat_s16(7);
00077         vbuf = (vector signed short *)b;
00078 
00079         tmp1 = vec_ld (0, ref);
00080         align = vec_lvsl (0, ref);
00081         tmp2 = vec_ld (15, ref);
00082         t1 = vec_perm(tmp1, tmp2, align);
00083 
00084         for (i=0; i<w_l-15; i+=16) {
00085 #if 0
00086 /*        b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
00087         b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
00088         b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
00089         b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);*/
00090         b[i+0] = b[i+0] + ((7 * (ref[i+0] + ref[i+1])-1) >> 8);
00091 #else
00092 
00093         tmp1 = vec_ld (0, ref+8+i);
00094         tmp2 = vec_ld (15, ref+8+i);
00095 
00096         t2 = vec_perm(tmp1, tmp2, align);
00097 
00098         y = vec_add(t1, vec_sld(t1,t2,2));
00099 //        y = vec_add(vec_add(y,y),y);
00100 
00101         tmp1 = vec_ld (0, ref+12+i);
00102 
00103         y = vec_add(y, vec_splat_s32(4));
00104         y = vec_sra(y, vec_splat_u32(3));
00105 
00106         tmp2 = vec_ld (15, ref+12+i);
00107 
00108         *vbuf = vec_sub(*vbuf, y);
00109 
00110         t1 = t2;
00111 
00112         vbuf++;
00113 
00114         t2 = vec_perm(tmp1, tmp2, align);
00115 
00116         y = vec_add(t1,vec_sld(t1,t2,4));
00117         y = vec_add(vec_add(y,y),y);
00118 
00119         tmp1 = vec_ld (0, ref+12+i);
00120 
00121         y = vec_add(y, vec_splat_s32(4));
00122         y = vec_sra(y, vec_splat_u32(3));
00123 
00124         tmp2 = vec_ld (15, ref+12+i);
00125 
00126         *vbuf = vec_sub(*vbuf, y);
00127 
00128         t1=t2;
00129 
00130         vbuf++;
00131 
00132         t2 = vec_perm(tmp1, tmp2, align);
00133 
00134         y = vec_add(t1,vec_sld(t1,t2,4));
00135         y = vec_add(vec_add(y,y),y);
00136 
00137         tmp1 = vec_ld (0, ref+16+i);
00138 
00139         y = vec_add(y, vec_splat_s32(4));
00140         y = vec_sra(y, vec_splat_u32(3));
00141 
00142         tmp2 = vec_ld (15, ref+16+i);
00143 
00144         *vbuf = vec_sub(*vbuf, y);
00145 
00146         t1=t2;
00147 
00148         t2 = vec_perm(tmp1, tmp2, align);
00149 
00150         y = vec_add(t1,vec_sld(t1,t2,4));
00151         y = vec_add(vec_add(y,y),y);
00152 
00153         vbuf++;
00154 
00155         y = vec_add(y, vec_splat_s32(4));
00156         y = vec_sra(y, vec_splat_u32(3));
00157         *vbuf = vec_sub(*vbuf, y);
00158 
00159         t1=t2;
00160 
00161         vbuf++;
00162 
00163 #endif
00164 
00165         }
00166 
00167         snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
00168         b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
00169     }
00170 
00171     { // Lift 1
00172         DWTELEM * const dst = b+w2;
00173 
00174         i = 0;
00175         for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
00176             dst[i] = dst[i] - (b[i] + b[i + 1]);
00177         }
00178 
00179         align = vec_lvsl(0, b+i);
00180         tmp1 = vec_ld(0, b+i);
00181         vbuf = (vector signed int*) (dst + i);
00182         tmp2 = vec_ld(15, b+i);
00183 
00184         t1 = vec_perm(tmp1, tmp2, align);
00185 
00186         for (; i<w_r-3; i+=4) {
00187 
00188 #if 0
00189             dst[i]   = dst[i]   - (b[i]   + b[i + 1]);
00190             dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
00191             dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
00192             dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
00193 #else
00194 
00195         tmp1 = vec_ld(0, b+4+i);
00196         tmp2 = vec_ld(15, b+4+i);
00197 
00198         t2 = vec_perm(tmp1, tmp2, align);
00199 
00200         y = vec_add(t1, vec_sld(t1,t2,4));
00201         *vbuf = vec_sub (*vbuf, y);
00202 
00203         vbuf++;
00204 
00205         t1 = t2;
00206 
00207 #endif
00208 
00209         }
00210 
00211         snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
00212     }
00213 
00214     { // Lift 2
00215         DWTELEM * const ref = b+w2 - 1;
00216         DWTELEM b_0 = b[0];
00217         vbuf= (vector signed int *) b;
00218 
00219         tmp1 = vec_ld (0, ref);
00220         align = vec_lvsl (0, ref);
00221         tmp2 = vec_ld (15, ref);
00222         t1= vec_perm(tmp1, tmp2, align);
00223 
00224         i = 0;
00225         for (; i<w_l-15; i+=16) {
00226 #if 0
00227             b[i]   = b[i]   - (((8 -(ref[i]   + ref[i+1])) - (b[i]  <<2)) >> 4);
00228             b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
00229             b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
00230             b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
00231 #else
00232             tmp1 = vec_ld (0, ref+4+i);
00233             tmp2 = vec_ld (15, ref+4+i);
00234 
00235             t2 = vec_perm(tmp1, tmp2, align);
00236 
00237             y = vec_add(t1,vec_sld(t1,t2,4));
00238             y = vec_sub(vec_splat_s32(8),y);
00239 
00240             tmp1 = vec_ld (0, ref+8+i);
00241 
00242             x = vec_sl(*vbuf,vec_splat_u32(2));
00243             y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00244 
00245             tmp2 = vec_ld (15, ref+8+i);
00246 
00247             *vbuf = vec_sub( *vbuf, y);
00248 
00249             t1 = t2;
00250 
00251             vbuf++;
00252 
00253             t2 = vec_perm(tmp1, tmp2, align);
00254 
00255             y = vec_add(t1,vec_sld(t1,t2,4));
00256             y = vec_sub(vec_splat_s32(8),y);
00257 
00258             tmp1 = vec_ld (0, ref+12+i);
00259 
00260             x = vec_sl(*vbuf,vec_splat_u32(2));
00261             y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00262 
00263             tmp2 = vec_ld (15, ref+12+i);
00264 
00265             *vbuf = vec_sub( *vbuf, y);
00266 
00267             t1 = t2;
00268 
00269             vbuf++;
00270 
00271             t2 = vec_perm(tmp1, tmp2, align);
00272 
00273             y = vec_add(t1,vec_sld(t1,t2,4));
00274             y = vec_sub(vec_splat_s32(8),y);
00275 
00276             tmp1 = vec_ld (0, ref+16+i);
00277 
00278             x = vec_sl(*vbuf,vec_splat_u32(2));
00279             y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00280 
00281             tmp2 = vec_ld (15, ref+16+i);
00282 
00283             *vbuf = vec_sub( *vbuf, y);
00284 
00285             t1 = t2;
00286 
00287             vbuf++;
00288 
00289             t2 = vec_perm(tmp1, tmp2, align);
00290 
00291             y = vec_add(t1,vec_sld(t1,t2,4));
00292             y = vec_sub(vec_splat_s32(8),y);
00293 
00294             t1 = t2;
00295 
00296             x = vec_sl(*vbuf,vec_splat_u32(2));
00297             y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00298             *vbuf = vec_sub( *vbuf, y);
00299 
00300             vbuf++;
00301 
00302 #endif
00303         }
00304 
00305         snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
00306         b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
00307     }
00308 
00309     { // Lift 3
00310         DWTELEM * const src = b+w2;
00311 
00312         vbuf = (vector signed int *)b;
00313         vtmp = (vector signed int *)temp;
00314 
00315         i = 0;
00316         align = vec_lvsl(0, src);
00317 
00318         for (; i<w_r-3; i+=4) {
00319 #if 0
00320             temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
00321             temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
00322             temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
00323             temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
00324 #else
00325             tmp1 = vec_ld(0,src+i);
00326             t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
00327             tmp2 = vec_ld(15,src+i);
00328             t1 = vec_sub(vec_splat_s32(0),t1); //bad!
00329             t1 = vec_add(t1,vec_add(t1,t1));
00330             t2 = vec_perm(tmp1 ,tmp2 ,align);
00331             t1 = vec_sra(t1,vec_splat_u32(1));
00332             vbuf++;
00333             *vtmp = vec_sub(t2,t1);
00334             vtmp++;
00335 
00336 #endif
00337 
00338         }
00339 
00340         snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
00341     }
00342 
00343     {
00344     //Interleave
00345         int a;
00346         vector signed int *t = (vector signed int *)temp,
00347                           *v = (vector signed int *)b;
00348 
00349         snow_interleave_line_header(&i, width, b, temp);
00350 
00351         for (; (i & 0xE) != 0xE; i-=2){
00352             b[i+1] = temp[i>>1];
00353             b[i] = b[i>>1];
00354         }
00355         for (i-=14; i>=0; i-=16){
00356            a=i/4;
00357 
00358            v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
00359            v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
00360            v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
00361            v[a]=vec_mergeh(v[a>>1],t[a>>1]);
00362 
00363         }
00364 
00365     }
00366 #endif
00367 }
00368 
00369 void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
00370 {
00371     int i, w4 = width/4;
00372     vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
00373     vector signed int t1, t2;
00374 
00375     v0=(vector signed int *)b0;
00376     v1=(vector signed int *)b1;
00377     v2=(vector signed int *)b2;
00378     v3=(vector signed int *)b3;
00379     v4=(vector signed int *)b4;
00380     v5=(vector signed int *)b5;
00381 
00382     for (i=0; i< w4;i++) {
00383 
00384     #if 0
00385         b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
00386         b3[i] -= ((b2[i] + b4[i]));
00387         b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
00388         b1[i] += (3*(b0[i] + b2[i]))>>1;
00389     #else
00390         t1 = vec_add(v3[i], v5[i]);
00391         t2 = vec_add(t1, vec_add(t1,t1));
00392         t1 = vec_add(t2, vec_splat_s32(4));
00393         v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
00394 
00395         v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
00396 
00397         t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
00398         t2 = vec_sl(v2[i], vec_splat_u32(2));
00399         v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
00400         t1 = vec_add(v0[i], v2[i]);
00401         t2 = vec_add(t1, vec_add(t1,t1));
00402         v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
00403 
00404     #endif
00405     }
00406 
00407     for(i*=4; i < width; i++)
00408     {
00409         b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
00410         b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
00411         b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
00412         b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
00413     }
00414 }
00415 
00416 #define LOAD_BLOCKS \
00417             tmp1 = vec_ld(0, &block[3][y*src_stride]);\
00418             align = vec_lvsl(0, &block[3][y*src_stride]);\
00419             tmp2 = vec_ld(15, &block[3][y*src_stride]);\
00420 \
00421             b3 = vec_perm(tmp1,tmp2,align);\
00422 \
00423             tmp1 = vec_ld(0, &block[2][y*src_stride]);\
00424             align = vec_lvsl(0, &block[2][y*src_stride]);\
00425             tmp2 = vec_ld(15, &block[2][y*src_stride]);\
00426 \
00427             b2 = vec_perm(tmp1,tmp2,align);\
00428 \
00429             tmp1 = vec_ld(0, &block[1][y*src_stride]);\
00430             align = vec_lvsl(0, &block[1][y*src_stride]);\
00431             tmp2 = vec_ld(15, &block[1][y*src_stride]);\
00432 \
00433             b1 = vec_perm(tmp1,tmp2,align);\
00434 \
00435             tmp1 = vec_ld(0, &block[0][y*src_stride]);\
00436             align = vec_lvsl(0, &block[0][y*src_stride]);\
00437             tmp2 = vec_ld(15, &block[0][y*src_stride]);\
00438 \
00439             b0 = vec_perm(tmp1,tmp2,align);
00440 
00441 #define LOAD_OBMCS \
00442             tmp1 = vec_ld(0, obmc1);\
00443             align = vec_lvsl(0, obmc1);\
00444             tmp2 = vec_ld(15, obmc1);\
00445 \
00446             ob1 = vec_perm(tmp1,tmp2,align);\
00447 \
00448             tmp1 = vec_ld(0, obmc2);\
00449             align = vec_lvsl(0, obmc2);\
00450             tmp2 = vec_ld(15, obmc2);\
00451 \
00452             ob2 = vec_perm(tmp1,tmp2,align);\
00453 \
00454             tmp1 = vec_ld(0, obmc3);\
00455             align = vec_lvsl(0, obmc3);\
00456             tmp2 = vec_ld(15, obmc3);\
00457 \
00458             ob3 = vec_perm(tmp1,tmp2,align);\
00459 \
00460             tmp1 = vec_ld(0, obmc4);\
00461             align = vec_lvsl(0, obmc4);\
00462             tmp2 = vec_ld(15, obmc4);\
00463 \
00464             ob4 = vec_perm(tmp1,tmp2,align);
00465 
00466 /* interleave logic
00467  * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
00468  * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
00469  * h  <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
00470  */
00471 
00472 #define STEPS_0_1\
00473             h1 = (vector unsigned short)\
00474                  vec_mergeh(ob1, ob2);\
00475 \
00476             h2 = (vector unsigned short)\
00477                  vec_mergeh(ob3, ob4);\
00478 \
00479             ih = (vector unsigned char)\
00480                  vec_mergeh(h1,h2);\
00481 \
00482             l1 = (vector unsigned short) vec_mergeh(b3, b2);\
00483 \
00484             ih1 = (vector unsigned char) vec_mergel(h1, h2);\
00485 \
00486             l2 = (vector unsigned short) vec_mergeh(b1, b0);\
00487 \
00488             il = (vector unsigned char) vec_mergeh(l1, l2);\
00489 \
00490             v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
00491 \
00492             il1 = (vector unsigned char) vec_mergel(l1, l2);\
00493 \
00494             v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
00495 
00496 #define FINAL_STEP_SCALAR\
00497         for(x=0; x<b_w; x++)\
00498             if(add){\
00499                 vbuf[x] += dst[x + src_x];\
00500                 vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
00501                 if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
00502                 dst8[x + y*src_stride] = vbuf[x];\
00503             }else{\
00504                 dst[x + src_x] -= vbuf[x];\
00505             }
00506 
00507 static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
00508                                              const int obmc_stride,
00509                                              uint8_t * * block, int b_w,
00510                                              int b_h, int src_x, int src_y,
00511                                              int src_stride, slice_buffer * sb,
00512                                              int add, uint8_t * dst8)
00513 {
00514     int y, x;
00515     DWTELEM * dst;
00516     vector unsigned short h1, h2, l1, l2;
00517     vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00518     vector unsigned char b0,b1,b2,b3;
00519     vector unsigned char ob1,ob2,ob3,ob4;
00520 
00521     DECLARE_ALIGNED_16(int, vbuf[16]);
00522     vector signed int *v = (vector signed int *)vbuf, *d;
00523 
00524     for(y=0; y<b_h; y++){
00525         //FIXME ugly misuse of obmc_stride
00526 
00527         uint8_t *obmc1= obmc + y*obmc_stride;
00528         uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00529         uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00530         uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00531 
00532         dst = slice_buffer_get_line(sb, src_y + y);
00533         d = (vector signed int *)(dst + src_x);
00534 
00535 //FIXME i could avoid some loads!
00536 
00537         // load blocks
00538         LOAD_BLOCKS
00539 
00540         // load obmcs
00541         LOAD_OBMCS
00542 
00543         // steps 0 1
00544         STEPS_0_1
00545 
00546         FINAL_STEP_SCALAR
00547 
00548        }
00549 
00550 }
00551 
00552 #define STEPS_2_3\
00553             h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
00554 \
00555             h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
00556 \
00557             ih = (vector unsigned char) vec_mergeh(h1,h2);\
00558 \
00559             l1 = (vector unsigned short) vec_mergel(b3, b2);\
00560 \
00561             l2 = (vector unsigned short) vec_mergel(b1, b0);\
00562 \
00563             ih1 = (vector unsigned char) vec_mergel(h1,h2);\
00564 \
00565             il = (vector unsigned char) vec_mergeh(l1,l2);\
00566 \
00567             v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
00568 \
00569             il1 = (vector unsigned char) vec_mergel(l1,l2);\
00570 \
00571             v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
00572 
00573 
00574 static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
00575                                              const int obmc_stride,
00576                                              uint8_t * * block, int b_w,
00577                                              int b_h, int src_x, int src_y,
00578                                              int src_stride, slice_buffer * sb,
00579                                              int add, uint8_t * dst8)
00580 {
00581     int y, x;
00582     DWTELEM * dst;
00583     vector unsigned short h1, h2, l1, l2;
00584     vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00585     vector unsigned char b0,b1,b2,b3;
00586     vector unsigned char ob1,ob2,ob3,ob4;
00587     DECLARE_ALIGNED_16(int, vbuf[b_w]);
00588     vector signed int *v = (vector signed int *)vbuf, *d;
00589 
00590     for(y=0; y<b_h; y++){
00591         //FIXME ugly misuse of obmc_stride
00592 
00593         uint8_t *obmc1= obmc + y*obmc_stride;
00594         uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00595         uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00596         uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00597 
00598         dst = slice_buffer_get_line(sb, src_y + y);
00599         d = (vector signed int *)(dst + src_x);
00600 
00601         // load blocks
00602         LOAD_BLOCKS
00603 
00604         // load obmcs
00605         LOAD_OBMCS
00606 
00607         // steps 0 1 2 3
00608         STEPS_0_1
00609 
00610         STEPS_2_3
00611 
00612         FINAL_STEP_SCALAR
00613 
00614     }
00615 }
00616 
00617 #define FINAL_STEP_VEC \
00618 \
00619     if(add)\
00620         {\
00621             for(x=0; x<b_w/4; x++)\
00622             {\
00623                 v[x] = vec_add(v[x], d[x]);\
00624                 v[x] = vec_sra(vec_add(v[x],\
00625                                        vec_sl( vec_splat_s32(1),\
00626                                                vec_splat_u32(7))),\
00627                                vec_splat_u32(8));\
00628 \
00629                 mask = (vector bool int) vec_sl((vector signed int)\
00630                         vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
00631                 mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
00632 \
00633                 mask = (vector bool int)\
00634                         vec_cmpeq((vector signed int)mask,\
00635                                   (vector signed int)vec_splat_u32(0));\
00636 \
00637                 vs = vec_sra(v[x],vec_splat_u32(8));\
00638                 vs = vec_sra(v[x],vec_splat_u32(8));\
00639                 vs = vec_sra(v[x],vec_splat_u32(15));\
00640 \
00641                 vs = vec_nor(vs,vs);\
00642 \
00643                 v[x]= vec_sel(v[x],vs,mask);\
00644             }\
00645 \
00646             for(x=0; x<b_w; x++)\
00647                 dst8[x + y*src_stride] = vbuf[x];\
00648 \
00649         }\
00650          else\
00651             for(x=0; x<b_w/4; x++)\
00652                 d[x] = vec_sub(d[x], v[x]);
00653 
00654 static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
00655                                              const int obmc_stride,
00656                                              uint8_t * * block, int b_w,
00657                                              int b_h, int src_x, int src_y,
00658                                              int src_stride, slice_buffer * sb,
00659                                              int add, uint8_t * dst8)
00660 {
00661     int y, x;
00662     DWTELEM * dst;
00663     vector bool int mask;
00664     vector signed int vs;
00665     vector unsigned short h1, h2, l1, l2;
00666     vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00667     vector unsigned char b0,b1,b2,b3;
00668     vector unsigned char ob1,ob2,ob3,ob4;
00669 
00670     DECLARE_ALIGNED_16(int, vbuf[16]);
00671     vector signed int *v = (vector signed int *)vbuf, *d;
00672 
00673     for(y=0; y<b_h; y++){
00674         //FIXME ugly misuse of obmc_stride
00675 
00676         uint8_t *obmc1= obmc + y*obmc_stride;
00677         uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00678         uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00679         uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00680 
00681         dst = slice_buffer_get_line(sb, src_y + y);
00682         d = (vector signed int *)(dst + src_x);
00683 
00684 //FIXME i could avoid some loads!
00685 
00686         // load blocks
00687         LOAD_BLOCKS
00688 
00689         // load obmcs
00690         LOAD_OBMCS
00691 
00692         // steps 0 1
00693         STEPS_0_1
00694 
00695         FINAL_STEP_VEC
00696 
00697        }
00698 
00699 }
00700 
00701 static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
00702                                              const int obmc_stride,
00703                                              uint8_t * * block, int b_w,
00704                                              int b_h, int src_x, int src_y,
00705                                              int src_stride, slice_buffer * sb,
00706                                              int add, uint8_t * dst8)
00707 {
00708     int y, x;
00709     DWTELEM * dst;
00710     vector bool int mask;
00711     vector signed int vs;
00712     vector unsigned short h1, h2, l1, l2;
00713     vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00714     vector unsigned char b0,b1,b2,b3;
00715     vector unsigned char ob1,ob2,ob3,ob4;
00716     DECLARE_ALIGNED_16(int, vbuf[b_w]);
00717     vector signed int *v = (vector signed int *)vbuf, *d;
00718 
00719     for(y=0; y<b_h; y++){
00720         //FIXME ugly misuse of obmc_stride
00721 
00722         uint8_t *obmc1= obmc + y*obmc_stride;
00723         uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00724         uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00725         uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00726 
00727         dst = slice_buffer_get_line(sb, src_y + y);
00728         d = (vector signed int *)(dst + src_x);
00729 
00730         // load blocks
00731         LOAD_BLOCKS
00732 
00733         // load obmcs
00734         LOAD_OBMCS
00735 
00736         // steps 0 1 2 3
00737         STEPS_0_1
00738 
00739         STEPS_2_3
00740 
00741         FINAL_STEP_VEC
00742 
00743     }
00744 }
00745 
00746 
00747 void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
00748                                       uint8_t * * block, int b_w, int b_h,
00749                                       int src_x, int src_y, int src_stride,
00750                                       slice_buffer * sb, int add,
00751                                       uint8_t * dst8)
00752 {
00753     if (src_x&15) {
00754         if (b_w == 16)
00755             inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
00756                                                    b_w, b_h, src_x, src_y,
00757                                                    src_stride, sb, add, dst8);
00758         else if (b_w == 8)
00759             inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
00760                                                   b_w, b_h, src_x, src_y,
00761                                                   src_stride, sb, add, dst8);
00762         else
00763             ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
00764                                      src_y, src_stride, sb, add, dst8);
00765     } else {
00766         if (b_w == 16)
00767             inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
00768                                                      b_w, b_h, src_x, src_y,
00769                                                      src_stride, sb, add, dst8);
00770         else if (b_w == 8)
00771             inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
00772                                                     b_w, b_h, src_x, src_y,
00773                                                     src_stride, sb, add, dst8);
00774         else
00775             ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
00776                                      src_y, src_stride, sb, add, dst8);
00777     }
00778 }
00779 
00780 
00781 void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
00782 {
00783 #if 0
00784     c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
00785     c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
00786     c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
00787 #endif
00788 }

Generated on Tue Nov 4 2014 12:59:22 for ffmpeg by  doxygen 1.7.1