• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/dsputil_altivec.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2002 Brian Foley
00003  * Copyright (c) 2002 Dieter Shirley
00004  * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or
00009  * modify it under the terms of the GNU Lesser General Public
00010  * License as published by the Free Software Foundation; either
00011  * version 2.1 of the License, or (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016  * Lesser General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU Lesser General Public
00019  * License along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  */
00022 
00023 #include "libavcodec/dsputil.h"
00024 
00025 #include "gcc_fixes.h"
00026 
00027 #include "dsputil_ppc.h"
00028 #include "util_altivec.h"
00029 #include "types_altivec.h"
00030 
00031 int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00032 {
00033     int i;
00034     DECLARE_ALIGNED_16(int, s);
00035     const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00036     vector unsigned char *tv;
00037     vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
00038     vector unsigned int sad;
00039     vector signed int sumdiffs;
00040 
00041     s = 0;
00042     sad = (vector unsigned int)vec_splat_u32(0);
00043     for (i = 0; i < h; i++) {
00044         /* Read unaligned pixels into our vectors. The vectors are as follows:
00045            pix1v: pix1[0]-pix1[15]
00046            pix2v: pix2[0]-pix2[15]      pix2iv: pix2[1]-pix2[16] */
00047         tv = (vector unsigned char *) pix1;
00048         pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00049 
00050         tv = (vector unsigned char *) &pix2[0];
00051         pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00052 
00053         tv = (vector unsigned char *) &pix2[1];
00054         pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
00055 
00056         /* Calculate the average vector */
00057         avgv = vec_avg(pix2v, pix2iv);
00058 
00059         /* Calculate a sum of abs differences vector */
00060         t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00061 
00062         /* Add each 4 pixel group together and put 4 results into sad */
00063         sad = vec_sum4s(t5, sad);
00064 
00065         pix1 += line_size;
00066         pix2 += line_size;
00067     }
00068     /* Sum up the four partial sums, and put the result into s */
00069     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00070     sumdiffs = vec_splat(sumdiffs, 3);
00071     vec_ste(sumdiffs, 0, &s);
00072 
00073     return s;
00074 }
00075 
00076 int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00077 {
00078     int i;
00079     DECLARE_ALIGNED_16(int, s);
00080     const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00081     vector unsigned char *tv;
00082     vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
00083     vector unsigned int sad;
00084     vector signed int sumdiffs;
00085     uint8_t *pix3 = pix2 + line_size;
00086 
00087     s = 0;
00088     sad = (vector unsigned int)vec_splat_u32(0);
00089 
00090     /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
00091        iteration becomes pix2 in the next iteration. We can use this
00092        fact to avoid a potentially expensive unaligned read, each
00093        time around the loop.
00094        Read unaligned pixels into our vectors. The vectors are as follows:
00095        pix2v: pix2[0]-pix2[15]
00096        Split the pixel vectors into shorts */
00097     tv = (vector unsigned char *) &pix2[0];
00098     pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00099 
00100     for (i = 0; i < h; i++) {
00101         /* Read unaligned pixels into our vectors. The vectors are as follows:
00102            pix1v: pix1[0]-pix1[15]
00103            pix3v: pix3[0]-pix3[15] */
00104         tv = (vector unsigned char *) pix1;
00105         pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00106 
00107         tv = (vector unsigned char *) &pix3[0];
00108         pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
00109 
00110         /* Calculate the average vector */
00111         avgv = vec_avg(pix2v, pix3v);
00112 
00113         /* Calculate a sum of abs differences vector */
00114         t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00115 
00116         /* Add each 4 pixel group together and put 4 results into sad */
00117         sad = vec_sum4s(t5, sad);
00118 
00119         pix1 += line_size;
00120         pix2v = pix3v;
00121         pix3 += line_size;
00122 
00123     }
00124 
00125     /* Sum up the four partial sums, and put the result into s */
00126     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00127     sumdiffs = vec_splat(sumdiffs, 3);
00128     vec_ste(sumdiffs, 0, &s);
00129     return s;
00130 }
00131 
00132 int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00133 {
00134     int i;
00135     DECLARE_ALIGNED_16(int, s);
00136     uint8_t *pix3 = pix2 + line_size;
00137     const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00138     const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
00139     vector unsigned char *tv, avgv, t5;
00140     vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
00141     vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
00142     vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
00143     vector unsigned short avghv, avglv;
00144     vector unsigned short t1, t2, t3, t4;
00145     vector unsigned int sad;
00146     vector signed int sumdiffs;
00147 
00148     sad = (vector unsigned int)vec_splat_u32(0);
00149 
00150     s = 0;
00151 
00152     /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
00153        iteration becomes pix2 in the next iteration. We can use this
00154        fact to avoid a potentially expensive unaligned read, as well
00155        as some splitting, and vector addition each time around the loop.
00156        Read unaligned pixels into our vectors. The vectors are as follows:
00157        pix2v: pix2[0]-pix2[15]  pix2iv: pix2[1]-pix2[16]
00158        Split the pixel vectors into shorts */
00159     tv = (vector unsigned char *) &pix2[0];
00160     pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00161 
00162     tv = (vector unsigned char *) &pix2[1];
00163     pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
00164 
00165     pix2hv  = (vector unsigned short) vec_mergeh(zero, pix2v);
00166     pix2lv  = (vector unsigned short) vec_mergel(zero, pix2v);
00167     pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
00168     pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
00169     t1 = vec_add(pix2hv, pix2ihv);
00170     t2 = vec_add(pix2lv, pix2ilv);
00171 
00172     for (i = 0; i < h; i++) {
00173         /* Read unaligned pixels into our vectors. The vectors are as follows:
00174            pix1v: pix1[0]-pix1[15]
00175            pix3v: pix3[0]-pix3[15]      pix3iv: pix3[1]-pix3[16] */
00176         tv = (vector unsigned char *) pix1;
00177         pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00178 
00179         tv = (vector unsigned char *) &pix3[0];
00180         pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
00181 
00182         tv = (vector unsigned char *) &pix3[1];
00183         pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
00184 
00185         /* Note that AltiVec does have vec_avg, but this works on vector pairs
00186            and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
00187            would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
00188            Instead, we have to split the pixel vectors into vectors of shorts,
00189            and do the averaging by hand. */
00190 
00191         /* Split the pixel vectors into shorts */
00192         pix3hv  = (vector unsigned short) vec_mergeh(zero, pix3v);
00193         pix3lv  = (vector unsigned short) vec_mergel(zero, pix3v);
00194         pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
00195         pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
00196 
00197         /* Do the averaging on them */
00198         t3 = vec_add(pix3hv, pix3ihv);
00199         t4 = vec_add(pix3lv, pix3ilv);
00200 
00201         avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
00202         avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
00203 
00204         /* Pack the shorts back into a result */
00205         avgv = vec_pack(avghv, avglv);
00206 
00207         /* Calculate a sum of abs differences vector */
00208         t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00209 
00210         /* Add each 4 pixel group together and put 4 results into sad */
00211         sad = vec_sum4s(t5, sad);
00212 
00213         pix1 += line_size;
00214         pix3 += line_size;
00215         /* Transfer the calculated values for pix3 into pix2 */
00216         t1 = t3;
00217         t2 = t4;
00218     }
00219     /* Sum up the four partial sums, and put the result into s */
00220     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00221     sumdiffs = vec_splat(sumdiffs, 3);
00222     vec_ste(sumdiffs, 0, &s);
00223 
00224     return s;
00225 }
00226 
00227 int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00228 {
00229     int i;
00230     DECLARE_ALIGNED_16(int, s);
00231     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00232     vector unsigned char perm1, perm2, *pix1v, *pix2v;
00233     vector unsigned char t1, t2, t3,t4, t5;
00234     vector unsigned int sad;
00235     vector signed int sumdiffs;
00236 
00237     sad = (vector unsigned int)vec_splat_u32(0);
00238 
00239 
00240     for (i = 0; i < h; i++) {
00241         /* Read potentially unaligned pixels into t1 and t2 */
00242         perm1 = vec_lvsl(0, pix1);
00243         pix1v = (vector unsigned char *) pix1;
00244         perm2 = vec_lvsl(0, pix2);
00245         pix2v = (vector unsigned char *) pix2;
00246         t1 = vec_perm(pix1v[0], pix1v[1], perm1);
00247         t2 = vec_perm(pix2v[0], pix2v[1], perm2);
00248 
00249         /* Calculate a sum of abs differences vector */
00250         t3 = vec_max(t1, t2);
00251         t4 = vec_min(t1, t2);
00252         t5 = vec_sub(t3, t4);
00253 
00254         /* Add each 4 pixel group together and put 4 results into sad */
00255         sad = vec_sum4s(t5, sad);
00256 
00257         pix1 += line_size;
00258         pix2 += line_size;
00259     }
00260 
00261     /* Sum up the four partial sums, and put the result into s */
00262     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00263     sumdiffs = vec_splat(sumdiffs, 3);
00264     vec_ste(sumdiffs, 0, &s);
00265 
00266     return s;
00267 }
00268 
00269 int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00270 {
00271     int i;
00272     DECLARE_ALIGNED_16(int, s);
00273     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00274     vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
00275     vector unsigned char t1, t2, t3,t4, t5;
00276     vector unsigned int sad;
00277     vector signed int sumdiffs;
00278 
00279     sad = (vector unsigned int)vec_splat_u32(0);
00280 
00281     permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
00282 
00283     for (i = 0; i < h; i++) {
00284         /* Read potentially unaligned pixels into t1 and t2
00285            Since we're reading 16 pixels, and actually only want 8,
00286            mask out the last 8 pixels. The 0s don't change the sum. */
00287         perm1 = vec_lvsl(0, pix1);
00288         pix1v = (vector unsigned char *) pix1;
00289         perm2 = vec_lvsl(0, pix2);
00290         pix2v = (vector unsigned char *) pix2;
00291         t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
00292         t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
00293 
00294         /* Calculate a sum of abs differences vector */
00295         t3 = vec_max(t1, t2);
00296         t4 = vec_min(t1, t2);
00297         t5 = vec_sub(t3, t4);
00298 
00299         /* Add each 4 pixel group together and put 4 results into sad */
00300         sad = vec_sum4s(t5, sad);
00301 
00302         pix1 += line_size;
00303         pix2 += line_size;
00304     }
00305 
00306     /* Sum up the four partial sums, and put the result into s */
00307     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00308     sumdiffs = vec_splat(sumdiffs, 3);
00309     vec_ste(sumdiffs, 0, &s);
00310 
00311     return s;
00312 }
00313 
00314 int pix_norm1_altivec(uint8_t *pix, int line_size)
00315 {
00316     int i;
00317     DECLARE_ALIGNED_16(int, s);
00318     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00319     vector unsigned char *tv;
00320     vector unsigned char pixv;
00321     vector unsigned int sv;
00322     vector signed int sum;
00323 
00324     sv = (vector unsigned int)vec_splat_u32(0);
00325 
00326     s = 0;
00327     for (i = 0; i < 16; i++) {
00328         /* Read in the potentially unaligned pixels */
00329         tv = (vector unsigned char *) pix;
00330         pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
00331 
00332         /* Square the values, and add them to our sum */
00333         sv = vec_msum(pixv, pixv, sv);
00334 
00335         pix += line_size;
00336     }
00337     /* Sum up the four partial sums, and put the result into s */
00338     sum = vec_sums((vector signed int) sv, (vector signed int) zero);
00339     sum = vec_splat(sum, 3);
00340     vec_ste(sum, 0, &s);
00341 
00342     return s;
00343 }
00344 
00350 int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00351 {
00352     int i;
00353     DECLARE_ALIGNED_16(int, s);
00354     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00355     vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
00356     vector unsigned char t1, t2, t3,t4, t5;
00357     vector unsigned int sum;
00358     vector signed int sumsqr;
00359 
00360     sum = (vector unsigned int)vec_splat_u32(0);
00361 
00362     permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
00363 
00364 
00365     for (i = 0; i < h; i++) {
00366         /* Read potentially unaligned pixels into t1 and t2
00367            Since we're reading 16 pixels, and actually only want 8,
00368            mask out the last 8 pixels. The 0s don't change the sum. */
00369         perm1 = vec_lvsl(0, pix1);
00370         pix1v = (vector unsigned char *) pix1;
00371         perm2 = vec_lvsl(0, pix2);
00372         pix2v = (vector unsigned char *) pix2;
00373         t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
00374         t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
00375 
00376         /* Since we want to use unsigned chars, we can take advantage
00377            of the fact that abs(a-b)^2 = (a-b)^2. */
00378 
00379         /* Calculate abs differences vector */
00380         t3 = vec_max(t1, t2);
00381         t4 = vec_min(t1, t2);
00382         t5 = vec_sub(t3, t4);
00383 
00384         /* Square the values and add them to our sum */
00385         sum = vec_msum(t5, t5, sum);
00386 
00387         pix1 += line_size;
00388         pix2 += line_size;
00389     }
00390 
00391     /* Sum up the four partial sums, and put the result into s */
00392     sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
00393     sumsqr = vec_splat(sumsqr, 3);
00394     vec_ste(sumsqr, 0, &s);
00395 
00396     return s;
00397 }
00398 
00404 int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00405 {
00406     int i;
00407     DECLARE_ALIGNED_16(int, s);
00408     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00409     vector unsigned char perm1, perm2, *pix1v, *pix2v;
00410     vector unsigned char t1, t2, t3,t4, t5;
00411     vector unsigned int sum;
00412     vector signed int sumsqr;
00413 
00414     sum = (vector unsigned int)vec_splat_u32(0);
00415 
00416     for (i = 0; i < h; i++) {
00417         /* Read potentially unaligned pixels into t1 and t2 */
00418         perm1 = vec_lvsl(0, pix1);
00419         pix1v = (vector unsigned char *) pix1;
00420         perm2 = vec_lvsl(0, pix2);
00421         pix2v = (vector unsigned char *) pix2;
00422         t1 = vec_perm(pix1v[0], pix1v[1], perm1);
00423         t2 = vec_perm(pix2v[0], pix2v[1], perm2);
00424 
00425         /* Since we want to use unsigned chars, we can take advantage
00426            of the fact that abs(a-b)^2 = (a-b)^2. */
00427 
00428         /* Calculate abs differences vector */
00429         t3 = vec_max(t1, t2);
00430         t4 = vec_min(t1, t2);
00431         t5 = vec_sub(t3, t4);
00432 
00433         /* Square the values and add them to our sum */
00434         sum = vec_msum(t5, t5, sum);
00435 
00436         pix1 += line_size;
00437         pix2 += line_size;
00438     }
00439 
00440     /* Sum up the four partial sums, and put the result into s */
00441     sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
00442     sumsqr = vec_splat(sumsqr, 3);
00443     vec_ste(sumsqr, 0, &s);
00444 
00445     return s;
00446 }
00447 
00448 int pix_sum_altivec(uint8_t * pix, int line_size)
00449 {
00450     const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00451     vector unsigned char perm, *pixv;
00452     vector unsigned char t1;
00453     vector unsigned int sad;
00454     vector signed int sumdiffs;
00455 
00456     int i;
00457     DECLARE_ALIGNED_16(int, s);
00458 
00459     sad = (vector unsigned int)vec_splat_u32(0);
00460 
00461     for (i = 0; i < 16; i++) {
00462         /* Read the potentially unaligned 16 pixels into t1 */
00463         perm = vec_lvsl(0, pix);
00464         pixv = (vector unsigned char *) pix;
00465         t1 = vec_perm(pixv[0], pixv[1], perm);
00466 
00467         /* Add each 4 pixel group together and put 4 results into sad */
00468         sad = vec_sum4s(t1, sad);
00469 
00470         pix += line_size;
00471     }
00472 
00473     /* Sum up the four partial sums, and put the result into s */
00474     sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00475     sumdiffs = vec_splat(sumdiffs, 3);
00476     vec_ste(sumdiffs, 0, &s);
00477 
00478     return s;
00479 }
00480 
00481 void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00482 {
00483     int i;
00484     vector unsigned char perm, bytes, *pixv;
00485     const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00486     vector signed short shorts;
00487 
00488     for (i = 0; i < 8; i++) {
00489         // Read potentially unaligned pixels.
00490         // We're reading 16 pixels, and actually only want 8,
00491         // but we simply ignore the extras.
00492         perm = vec_lvsl(0, pixels);
00493         pixv = (vector unsigned char *) pixels;
00494         bytes = vec_perm(pixv[0], pixv[1], perm);
00495 
00496         // convert the bytes into shorts
00497         shorts = (vector signed short)vec_mergeh(zero, bytes);
00498 
00499         // save the data to the block, we assume the block is 16-byte aligned
00500         vec_st(shorts, i*16, (vector signed short*)block);
00501 
00502         pixels += line_size;
00503     }
00504 }
00505 
00506 void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
00507         const uint8_t *s2, int stride)
00508 {
00509     int i;
00510     vector unsigned char perm, bytes, *pixv;
00511     const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00512     vector signed short shorts1, shorts2;
00513 
00514     for (i = 0; i < 4; i++) {
00515         // Read potentially unaligned pixels
00516         // We're reading 16 pixels, and actually only want 8,
00517         // but we simply ignore the extras.
00518         perm = vec_lvsl(0, s1);
00519         pixv = (vector unsigned char *) s1;
00520         bytes = vec_perm(pixv[0], pixv[1], perm);
00521 
00522         // convert the bytes into shorts
00523         shorts1 = (vector signed short)vec_mergeh(zero, bytes);
00524 
00525         // Do the same for the second block of pixels
00526         perm = vec_lvsl(0, s2);
00527         pixv = (vector unsigned char *) s2;
00528         bytes = vec_perm(pixv[0], pixv[1], perm);
00529 
00530         // convert the bytes into shorts
00531         shorts2 = (vector signed short)vec_mergeh(zero, bytes);
00532 
00533         // Do the subtraction
00534         shorts1 = vec_sub(shorts1, shorts2);
00535 
00536         // save the data to the block, we assume the block is 16-byte aligned
00537         vec_st(shorts1, 0, (vector signed short*)block);
00538 
00539         s1 += stride;
00540         s2 += stride;
00541         block += 8;
00542 
00543 
00544         // The code below is a copy of the code above... This is a manual
00545         // unroll.
00546 
00547         // Read potentially unaligned pixels
00548         // We're reading 16 pixels, and actually only want 8,
00549         // but we simply ignore the extras.
00550         perm = vec_lvsl(0, s1);
00551         pixv = (vector unsigned char *) s1;
00552         bytes = vec_perm(pixv[0], pixv[1], perm);
00553 
00554         // convert the bytes into shorts
00555         shorts1 = (vector signed short)vec_mergeh(zero, bytes);
00556 
00557         // Do the same for the second block of pixels
00558         perm = vec_lvsl(0, s2);
00559         pixv = (vector unsigned char *) s2;
00560         bytes = vec_perm(pixv[0], pixv[1], perm);
00561 
00562         // convert the bytes into shorts
00563         shorts2 = (vector signed short)vec_mergeh(zero, bytes);
00564 
00565         // Do the subtraction
00566         shorts1 = vec_sub(shorts1, shorts2);
00567 
00568         // save the data to the block, we assume the block is 16-byte aligned
00569         vec_st(shorts1, 0, (vector signed short*)block);
00570 
00571         s1 += stride;
00572         s2 += stride;
00573         block += 8;
00574     }
00575 }
00576 
00577 
00578 static void clear_block_altivec(DCTELEM *block) {
00579     LOAD_ZERO;
00580     vec_st(zero_s16v,   0, block);
00581     vec_st(zero_s16v,  16, block);
00582     vec_st(zero_s16v,  32, block);
00583     vec_st(zero_s16v,  48, block);
00584     vec_st(zero_s16v,  64, block);
00585     vec_st(zero_s16v,  80, block);
00586     vec_st(zero_s16v,  96, block);
00587     vec_st(zero_s16v, 112, block);
00588 }
00589 
00590 
00591 void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
00592     register int i;
00593     register vector unsigned char vdst, vsrc;
00594 
00595     /* dst and src are 16 bytes-aligned (guaranteed) */
00596     for (i = 0 ; (i + 15) < w ; i+=16) {
00597         vdst = vec_ld(i, (unsigned char*)dst);
00598         vsrc = vec_ld(i, (unsigned char*)src);
00599         vdst = vec_add(vsrc, vdst);
00600         vec_st(vdst, i, (unsigned char*)dst);
00601     }
00602     /* if w is not a multiple of 16 */
00603     for (; (i < w) ; i++) {
00604         dst[i] = src[i];
00605     }
00606 }
00607 
00608 /* next one assumes that ((line_size % 16) == 0) */
00609 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00610 {
00611 POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
00612     register vector unsigned char pixelsv1, pixelsv2;
00613     register vector unsigned char pixelsv1B, pixelsv2B;
00614     register vector unsigned char pixelsv1C, pixelsv2C;
00615     register vector unsigned char pixelsv1D, pixelsv2D;
00616 
00617     register vector unsigned char perm = vec_lvsl(0, pixels);
00618     int i;
00619     register int line_size_2 = line_size << 1;
00620     register int line_size_3 = line_size + line_size_2;
00621     register int line_size_4 = line_size << 2;
00622 
00623 POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
00624 // hand-unrolling the loop by 4 gains about 15%
00625 // mininum execution time goes from 74 to 60 cycles
00626 // it's faster than -funroll-loops, but using
00627 // -funroll-loops w/ this is bad - 74 cycles again.
00628 // all this is on a 7450, tuning for the 7450
00629 #if 0
00630     for (i = 0; i < h; i++) {
00631         pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00632         pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00633         vec_st(vec_perm(pixelsv1, pixelsv2, perm),
00634                0, (unsigned char*)block);
00635         pixels+=line_size;
00636         block +=line_size;
00637     }
00638 #else
00639     for (i = 0; i < h; i += 4) {
00640         pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00641         pixelsv2 = vec_ld(15, (unsigned char*)pixels);
00642         pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
00643         pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
00644         pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
00645         pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
00646         pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
00647         pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
00648         vec_st(vec_perm(pixelsv1, pixelsv2, perm),
00649                0, (unsigned char*)block);
00650         vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
00651                line_size, (unsigned char*)block);
00652         vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
00653                line_size_2, (unsigned char*)block);
00654         vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
00655                line_size_3, (unsigned char*)block);
00656         pixels+=line_size_4;
00657         block +=line_size_4;
00658     }
00659 #endif
00660 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
00661 }
00662 
00663 /* next one assumes that ((line_size % 16) == 0) */
00664 #define op_avg(a,b)  a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
00665 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00666 {
00667 POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
00668     register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
00669     register vector unsigned char perm = vec_lvsl(0, pixels);
00670     int i;
00671 
00672 POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
00673 
00674     for (i = 0; i < h; i++) {
00675         pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00676         pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00677         blockv = vec_ld(0, block);
00678         pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
00679         blockv = vec_avg(blockv,pixelsv);
00680         vec_st(blockv, 0, (unsigned char*)block);
00681         pixels+=line_size;
00682         block +=line_size;
00683     }
00684 
00685 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
00686 }
00687 
00688 /* next one assumes that ((line_size % 8) == 0) */
00689 void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00690 {
00691 POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
00692     register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
00693     int i;
00694 
00695 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
00696 
00697    for (i = 0; i < h; i++) {
00698        /* block is 8 bytes-aligned, so we're either in the
00699           left block (16 bytes-aligned) or in the right block (not) */
00700        int rightside = ((unsigned long)block & 0x0000000F);
00701 
00702        blockv = vec_ld(0, block);
00703        pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00704        pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00705        pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
00706 
00707        if (rightside) {
00708            pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
00709        } else {
00710            pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
00711        }
00712 
00713        blockv = vec_avg(blockv, pixelsv);
00714 
00715        vec_st(blockv, 0, block);
00716 
00717        pixels += line_size;
00718        block += line_size;
00719    }
00720 
00721 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
00722 }
00723 
00724 /* next one assumes that ((line_size % 8) == 0) */
00725 void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00726 {
00727 POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
00728     register int i;
00729     register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
00730     register vector unsigned char blockv, temp1, temp2;
00731     register vector unsigned short pixelssum1, pixelssum2, temp3;
00732     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00733     register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00734 
00735     temp1 = vec_ld(0, pixels);
00736     temp2 = vec_ld(16, pixels);
00737     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00738     if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
00739         pixelsv2 = temp2;
00740     } else {
00741         pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00742     }
00743     pixelsv1 = vec_mergeh(vczero, pixelsv1);
00744     pixelsv2 = vec_mergeh(vczero, pixelsv2);
00745     pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00746                          (vector unsigned short)pixelsv2);
00747     pixelssum1 = vec_add(pixelssum1, vctwo);
00748 
00749 POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
00750     for (i = 0; i < h ; i++) {
00751         int rightside = ((unsigned long)block & 0x0000000F);
00752         blockv = vec_ld(0, block);
00753 
00754         temp1 = vec_ld(line_size, pixels);
00755         temp2 = vec_ld(line_size + 16, pixels);
00756         pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00757         if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
00758             pixelsv2 = temp2;
00759         } else {
00760             pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00761         }
00762 
00763         pixelsv1 = vec_mergeh(vczero, pixelsv1);
00764         pixelsv2 = vec_mergeh(vczero, pixelsv2);
00765         pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00766                              (vector unsigned short)pixelsv2);
00767         temp3 = vec_add(pixelssum1, pixelssum2);
00768         temp3 = vec_sra(temp3, vctwo);
00769         pixelssum1 = vec_add(pixelssum2, vctwo);
00770         pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
00771 
00772         if (rightside) {
00773             blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
00774         } else {
00775             blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
00776         }
00777 
00778         vec_st(blockv, 0, block);
00779 
00780         block += line_size;
00781         pixels += line_size;
00782     }
00783 
00784 POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
00785 }
00786 
00787 /* next one assumes that ((line_size % 8) == 0) */
00788 void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00789 {
00790 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
00791     register int i;
00792     register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
00793     register vector unsigned char blockv, temp1, temp2;
00794     register vector unsigned short pixelssum1, pixelssum2, temp3;
00795     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00796     register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
00797     register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00798 
00799     temp1 = vec_ld(0, pixels);
00800     temp2 = vec_ld(16, pixels);
00801     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00802     if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
00803         pixelsv2 = temp2;
00804     } else {
00805         pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00806     }
00807     pixelsv1 = vec_mergeh(vczero, pixelsv1);
00808     pixelsv2 = vec_mergeh(vczero, pixelsv2);
00809     pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00810                          (vector unsigned short)pixelsv2);
00811     pixelssum1 = vec_add(pixelssum1, vcone);
00812 
00813 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
00814     for (i = 0; i < h ; i++) {
00815         int rightside = ((unsigned long)block & 0x0000000F);
00816         blockv = vec_ld(0, block);
00817 
00818         temp1 = vec_ld(line_size, pixels);
00819         temp2 = vec_ld(line_size + 16, pixels);
00820         pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00821         if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
00822             pixelsv2 = temp2;
00823         } else {
00824             pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00825         }
00826 
00827         pixelsv1 = vec_mergeh(vczero, pixelsv1);
00828         pixelsv2 = vec_mergeh(vczero, pixelsv2);
00829         pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00830                              (vector unsigned short)pixelsv2);
00831         temp3 = vec_add(pixelssum1, pixelssum2);
00832         temp3 = vec_sra(temp3, vctwo);
00833         pixelssum1 = vec_add(pixelssum2, vcone);
00834         pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
00835 
00836         if (rightside) {
00837             blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
00838         } else {
00839             blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
00840         }
00841 
00842         vec_st(blockv, 0, block);
00843 
00844         block += line_size;
00845         pixels += line_size;
00846     }
00847 
00848 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
00849 }
00850 
00851 /* next one assumes that ((line_size % 16) == 0) */
00852 void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00853 {
00854 POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
00855     register int i;
00856     register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
00857     register vector unsigned char blockv, temp1, temp2;
00858     register vector unsigned short temp3, temp4,
00859         pixelssum1, pixelssum2, pixelssum3, pixelssum4;
00860     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00861     register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00862 
00863 POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
00864 
00865     temp1 = vec_ld(0, pixels);
00866     temp2 = vec_ld(16, pixels);
00867     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00868     if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
00869         pixelsv2 = temp2;
00870     } else {
00871         pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00872     }
00873     pixelsv3 = vec_mergel(vczero, pixelsv1);
00874     pixelsv4 = vec_mergel(vczero, pixelsv2);
00875     pixelsv1 = vec_mergeh(vczero, pixelsv1);
00876     pixelsv2 = vec_mergeh(vczero, pixelsv2);
00877     pixelssum3 = vec_add((vector unsigned short)pixelsv3,
00878                          (vector unsigned short)pixelsv4);
00879     pixelssum3 = vec_add(pixelssum3, vctwo);
00880     pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00881                          (vector unsigned short)pixelsv2);
00882     pixelssum1 = vec_add(pixelssum1, vctwo);
00883 
00884     for (i = 0; i < h ; i++) {
00885         blockv = vec_ld(0, block);
00886 
00887         temp1 = vec_ld(line_size, pixels);
00888         temp2 = vec_ld(line_size + 16, pixels);
00889         pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00890         if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
00891             pixelsv2 = temp2;
00892         } else {
00893             pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00894         }
00895 
00896         pixelsv3 = vec_mergel(vczero, pixelsv1);
00897         pixelsv4 = vec_mergel(vczero, pixelsv2);
00898         pixelsv1 = vec_mergeh(vczero, pixelsv1);
00899         pixelsv2 = vec_mergeh(vczero, pixelsv2);
00900 
00901         pixelssum4 = vec_add((vector unsigned short)pixelsv3,
00902                              (vector unsigned short)pixelsv4);
00903         pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00904                              (vector unsigned short)pixelsv2);
00905         temp4 = vec_add(pixelssum3, pixelssum4);
00906         temp4 = vec_sra(temp4, vctwo);
00907         temp3 = vec_add(pixelssum1, pixelssum2);
00908         temp3 = vec_sra(temp3, vctwo);
00909 
00910         pixelssum3 = vec_add(pixelssum4, vctwo);
00911         pixelssum1 = vec_add(pixelssum2, vctwo);
00912 
00913         blockv = vec_packsu(temp3, temp4);
00914 
00915         vec_st(blockv, 0, block);
00916 
00917         block += line_size;
00918         pixels += line_size;
00919     }
00920 
00921 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
00922 }
00923 
00924 /* next one assumes that ((line_size % 16) == 0) */
00925 void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00926 {
00927 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
00928     register int i;
00929     register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
00930     register vector unsigned char blockv, temp1, temp2;
00931     register vector unsigned short temp3, temp4,
00932         pixelssum1, pixelssum2, pixelssum3, pixelssum4;
00933     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00934     register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
00935     register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00936 
00937 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
00938 
00939     temp1 = vec_ld(0, pixels);
00940     temp2 = vec_ld(16, pixels);
00941     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00942     if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
00943         pixelsv2 = temp2;
00944     } else {
00945         pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00946     }
00947     pixelsv3 = vec_mergel(vczero, pixelsv1);
00948     pixelsv4 = vec_mergel(vczero, pixelsv2);
00949     pixelsv1 = vec_mergeh(vczero, pixelsv1);
00950     pixelsv2 = vec_mergeh(vczero, pixelsv2);
00951     pixelssum3 = vec_add((vector unsigned short)pixelsv3,
00952                          (vector unsigned short)pixelsv4);
00953     pixelssum3 = vec_add(pixelssum3, vcone);
00954     pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00955                          (vector unsigned short)pixelsv2);
00956     pixelssum1 = vec_add(pixelssum1, vcone);
00957 
00958     for (i = 0; i < h ; i++) {
00959         blockv = vec_ld(0, block);
00960 
00961         temp1 = vec_ld(line_size, pixels);
00962         temp2 = vec_ld(line_size + 16, pixels);
00963         pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00964         if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
00965             pixelsv2 = temp2;
00966         } else {
00967             pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00968         }
00969 
00970         pixelsv3 = vec_mergel(vczero, pixelsv1);
00971         pixelsv4 = vec_mergel(vczero, pixelsv2);
00972         pixelsv1 = vec_mergeh(vczero, pixelsv1);
00973         pixelsv2 = vec_mergeh(vczero, pixelsv2);
00974 
00975         pixelssum4 = vec_add((vector unsigned short)pixelsv3,
00976                              (vector unsigned short)pixelsv4);
00977         pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00978                              (vector unsigned short)pixelsv2);
00979         temp4 = vec_add(pixelssum3, pixelssum4);
00980         temp4 = vec_sra(temp4, vctwo);
00981         temp3 = vec_add(pixelssum1, pixelssum2);
00982         temp3 = vec_sra(temp3, vctwo);
00983 
00984         pixelssum3 = vec_add(pixelssum4, vcone);
00985         pixelssum1 = vec_add(pixelssum2, vcone);
00986 
00987         blockv = vec_packsu(temp3, temp4);
00988 
00989         vec_st(blockv, 0, block);
00990 
00991         block += line_size;
00992         pixels += line_size;
00993     }
00994 
00995 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
00996 }
00997 
00998 int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
00999 POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
01000     int sum;
01001     register const vector unsigned char vzero =
01002                             (const vector unsigned char)vec_splat_u8(0);
01003     register vector signed short temp0, temp1, temp2, temp3, temp4,
01004                                  temp5, temp6, temp7;
01005 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
01006     {
01007     register const vector signed short vprod1 =(const vector signed short)
01008                                                { 1,-1, 1,-1, 1,-1, 1,-1 };
01009     register const vector signed short vprod2 =(const vector signed short)
01010                                                { 1, 1,-1,-1, 1, 1,-1,-1 };
01011     register const vector signed short vprod3 =(const vector signed short)
01012                                                { 1, 1, 1, 1,-1,-1,-1,-1 };
01013     register const vector unsigned char perm1 = (const vector unsigned char)
01014         {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
01015          0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
01016     register const vector unsigned char perm2 = (const vector unsigned char)
01017         {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
01018          0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
01019     register const vector unsigned char perm3 = (const vector unsigned char)
01020         {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
01021          0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
01022 
01023 #define ONEITERBUTTERFLY(i, res)                                          \
01024     {                                                                     \
01025     register vector unsigned char src1, src2, srcO;                   \
01026     register vector unsigned char dst1, dst2, dstO;                   \
01027     register vector signed short srcV, dstV;                          \
01028     register vector signed short but0, but1, but2, op1, op2, op3;     \
01029     src1 = vec_ld(stride * i, src);                                   \
01030     src2 = vec_ld((stride * i) + 15, src);                            \
01031     srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
01032     dst1 = vec_ld(stride * i, dst);                                   \
01033     dst2 = vec_ld((stride * i) + 15, dst);                            \
01034     dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
01035     /* promote the unsigned chars to signed shorts */                 \
01036     /* we're in the 8x8 function, we only care for the first 8 */     \
01037     srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
01038            (vector signed char)srcO);                                 \
01039     dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
01040            (vector signed char)dstO);                                 \
01041     /* subtractions inside the first butterfly */                     \
01042     but0 = vec_sub(srcV, dstV);                                       \
01043     op1  = vec_perm(but0, but0, perm1);                               \
01044     but1 = vec_mladd(but0, vprod1, op1);                              \
01045     op2  = vec_perm(but1, but1, perm2);                               \
01046     but2 = vec_mladd(but1, vprod2, op2);                              \
01047     op3  = vec_perm(but2, but2, perm3);                               \
01048     res  = vec_mladd(but2, vprod3, op3);                              \
01049     }
01050     ONEITERBUTTERFLY(0, temp0);
01051     ONEITERBUTTERFLY(1, temp1);
01052     ONEITERBUTTERFLY(2, temp2);
01053     ONEITERBUTTERFLY(3, temp3);
01054     ONEITERBUTTERFLY(4, temp4);
01055     ONEITERBUTTERFLY(5, temp5);
01056     ONEITERBUTTERFLY(6, temp6);
01057     ONEITERBUTTERFLY(7, temp7);
01058     }
01059 #undef ONEITERBUTTERFLY
01060     {
01061     register vector signed int vsum;
01062     register vector signed short line0 = vec_add(temp0, temp1);
01063     register vector signed short line1 = vec_sub(temp0, temp1);
01064     register vector signed short line2 = vec_add(temp2, temp3);
01065     register vector signed short line3 = vec_sub(temp2, temp3);
01066     register vector signed short line4 = vec_add(temp4, temp5);
01067     register vector signed short line5 = vec_sub(temp4, temp5);
01068     register vector signed short line6 = vec_add(temp6, temp7);
01069     register vector signed short line7 = vec_sub(temp6, temp7);
01070 
01071     register vector signed short line0B = vec_add(line0, line2);
01072     register vector signed short line2B = vec_sub(line0, line2);
01073     register vector signed short line1B = vec_add(line1, line3);
01074     register vector signed short line3B = vec_sub(line1, line3);
01075     register vector signed short line4B = vec_add(line4, line6);
01076     register vector signed short line6B = vec_sub(line4, line6);
01077     register vector signed short line5B = vec_add(line5, line7);
01078     register vector signed short line7B = vec_sub(line5, line7);
01079 
01080     register vector signed short line0C = vec_add(line0B, line4B);
01081     register vector signed short line4C = vec_sub(line0B, line4B);
01082     register vector signed short line1C = vec_add(line1B, line5B);
01083     register vector signed short line5C = vec_sub(line1B, line5B);
01084     register vector signed short line2C = vec_add(line2B, line6B);
01085     register vector signed short line6C = vec_sub(line2B, line6B);
01086     register vector signed short line3C = vec_add(line3B, line7B);
01087     register vector signed short line7C = vec_sub(line3B, line7B);
01088 
01089     vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
01090     vsum = vec_sum4s(vec_abs(line1C), vsum);
01091     vsum = vec_sum4s(vec_abs(line2C), vsum);
01092     vsum = vec_sum4s(vec_abs(line3C), vsum);
01093     vsum = vec_sum4s(vec_abs(line4C), vsum);
01094     vsum = vec_sum4s(vec_abs(line5C), vsum);
01095     vsum = vec_sum4s(vec_abs(line6C), vsum);
01096     vsum = vec_sum4s(vec_abs(line7C), vsum);
01097     vsum = vec_sums(vsum, (vector signed int)vzero);
01098     vsum = vec_splat(vsum, 3);
01099     vec_ste(vsum, 0, &sum);
01100     }
01101 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
01102     return sum;
01103 }
01104 
01105 /*
01106 16x8 works with 16 elements; it allows to avoid replicating loads, and
01107 give the compiler more rooms for scheduling.  It's only used from
01108 inside hadamard8_diff16_altivec.
01109 
01110 Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
01111 of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
01112 by itself. The following code include hand-made registers allocation. It's not
01113 clean, but on a 7450 the resulting code is much faster (best case fall from
01114 700+ cycles to 550).
01115 
01116 xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
01117 and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
01118 instructions...)
01119 
01120 On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
01121 xlc goes to around 660 on the regular C code...
01122 */
01123 
01124 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
01125     int sum;
01126     register vector signed short
01127         temp0 REG_v(v0),
01128         temp1 REG_v(v1),
01129         temp2 REG_v(v2),
01130         temp3 REG_v(v3),
01131         temp4 REG_v(v4),
01132         temp5 REG_v(v5),
01133         temp6 REG_v(v6),
01134         temp7 REG_v(v7);
01135     register vector signed short
01136         temp0S REG_v(v8),
01137         temp1S REG_v(v9),
01138         temp2S REG_v(v10),
01139         temp3S REG_v(v11),
01140         temp4S REG_v(v12),
01141         temp5S REG_v(v13),
01142         temp6S REG_v(v14),
01143         temp7S REG_v(v15);
01144     register const vector unsigned char vzero REG_v(v31)=
01145         (const vector unsigned char)vec_splat_u8(0);
01146     {
01147     register const vector signed short vprod1 REG_v(v16)=
01148         (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
01149     register const vector signed short vprod2 REG_v(v17)=
01150         (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
01151     register const vector signed short vprod3 REG_v(v18)=
01152         (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
01153     register const vector unsigned char perm1 REG_v(v19)=
01154         (const vector unsigned char)
01155         {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
01156          0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
01157     register const vector unsigned char perm2 REG_v(v20)=
01158         (const vector unsigned char)
01159         {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
01160          0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
01161     register const vector unsigned char perm3 REG_v(v21)=
01162         (const vector unsigned char)
01163         {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
01164          0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
01165 
01166 #define ONEITERBUTTERFLY(i, res1, res2)                                   \
01167     {                                                                     \
01168     register vector unsigned char src1 REG_v(v22),                    \
01169                                   src2 REG_v(v23),                    \
01170                                   dst1 REG_v(v24),                    \
01171                                   dst2 REG_v(v25),                    \
01172                                   srcO REG_v(v22),                    \
01173                                   dstO REG_v(v23);                    \
01174                                                                       \
01175     register vector signed short  srcV REG_v(v24),                    \
01176                                   dstV REG_v(v25),                    \
01177                                   srcW REG_v(v26),                    \
01178                                   dstW REG_v(v27),                    \
01179                                   but0 REG_v(v28),                    \
01180                                   but0S REG_v(v29),                   \
01181                                   op1 REG_v(v30),                     \
01182                                   but1 REG_v(v22),                    \
01183                                   op1S REG_v(v23),                    \
01184                                   but1S REG_v(v24),                   \
01185                                   op2 REG_v(v25),                     \
01186                                   but2 REG_v(v26),                    \
01187                                   op2S REG_v(v27),                    \
01188                                   but2S REG_v(v28),                   \
01189                                   op3 REG_v(v29),                     \
01190                                   op3S REG_v(v30);                    \
01191                                                                       \
01192     src1 = vec_ld(stride * i, src);                                   \
01193     src2 = vec_ld((stride * i) + 16, src);                            \
01194     srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
01195     dst1 = vec_ld(stride * i, dst);                                   \
01196     dst2 = vec_ld((stride * i) + 16, dst);                            \
01197     dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
01198     /* promote the unsigned chars to signed shorts */                 \
01199     srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
01200            (vector signed char)srcO);                                 \
01201     dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
01202            (vector signed char)dstO);                                 \
01203     srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
01204            (vector signed char)srcO);                                 \
01205     dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
01206            (vector signed char)dstO);                                 \
01207     /* subtractions inside the first butterfly */                     \
01208     but0 = vec_sub(srcV, dstV);                                       \
01209     but0S = vec_sub(srcW, dstW);                                      \
01210     op1 = vec_perm(but0, but0, perm1);                                \
01211     but1 = vec_mladd(but0, vprod1, op1);                              \
01212     op1S = vec_perm(but0S, but0S, perm1);                             \
01213     but1S = vec_mladd(but0S, vprod1, op1S);                           \
01214     op2 = vec_perm(but1, but1, perm2);                                \
01215     but2 = vec_mladd(but1, vprod2, op2);                              \
01216     op2S = vec_perm(but1S, but1S, perm2);                             \
01217     but2S = vec_mladd(but1S, vprod2, op2S);                           \
01218     op3 = vec_perm(but2, but2, perm3);                                \
01219     res1 = vec_mladd(but2, vprod3, op3);                              \
01220     op3S = vec_perm(but2S, but2S, perm3);                             \
01221     res2 = vec_mladd(but2S, vprod3, op3S);                            \
01222     }
01223     ONEITERBUTTERFLY(0, temp0, temp0S);
01224     ONEITERBUTTERFLY(1, temp1, temp1S);
01225     ONEITERBUTTERFLY(2, temp2, temp2S);
01226     ONEITERBUTTERFLY(3, temp3, temp3S);
01227     ONEITERBUTTERFLY(4, temp4, temp4S);
01228     ONEITERBUTTERFLY(5, temp5, temp5S);
01229     ONEITERBUTTERFLY(6, temp6, temp6S);
01230     ONEITERBUTTERFLY(7, temp7, temp7S);
01231     }
01232 #undef ONEITERBUTTERFLY
01233     {
01234     register vector signed int vsum;
01235     register vector signed short line0S, line1S, line2S, line3S, line4S,
01236                                  line5S, line6S, line7S, line0BS,line2BS,
01237                                  line1BS,line3BS,line4BS,line6BS,line5BS,
01238                                  line7BS,line0CS,line4CS,line1CS,line5CS,
01239                                  line2CS,line6CS,line3CS,line7CS;
01240 
01241     register vector signed short line0 = vec_add(temp0, temp1);
01242     register vector signed short line1 = vec_sub(temp0, temp1);
01243     register vector signed short line2 = vec_add(temp2, temp3);
01244     register vector signed short line3 = vec_sub(temp2, temp3);
01245     register vector signed short line4 = vec_add(temp4, temp5);
01246     register vector signed short line5 = vec_sub(temp4, temp5);
01247     register vector signed short line6 = vec_add(temp6, temp7);
01248     register vector signed short line7 = vec_sub(temp6, temp7);
01249 
01250     register vector signed short line0B = vec_add(line0, line2);
01251     register vector signed short line2B = vec_sub(line0, line2);
01252     register vector signed short line1B = vec_add(line1, line3);
01253     register vector signed short line3B = vec_sub(line1, line3);
01254     register vector signed short line4B = vec_add(line4, line6);
01255     register vector signed short line6B = vec_sub(line4, line6);
01256     register vector signed short line5B = vec_add(line5, line7);
01257     register vector signed short line7B = vec_sub(line5, line7);
01258 
01259     register vector signed short line0C = vec_add(line0B, line4B);
01260     register vector signed short line4C = vec_sub(line0B, line4B);
01261     register vector signed short line1C = vec_add(line1B, line5B);
01262     register vector signed short line5C = vec_sub(line1B, line5B);
01263     register vector signed short line2C = vec_add(line2B, line6B);
01264     register vector signed short line6C = vec_sub(line2B, line6B);
01265     register vector signed short line3C = vec_add(line3B, line7B);
01266     register vector signed short line7C = vec_sub(line3B, line7B);
01267 
01268     vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
01269     vsum = vec_sum4s(vec_abs(line1C), vsum);
01270     vsum = vec_sum4s(vec_abs(line2C), vsum);
01271     vsum = vec_sum4s(vec_abs(line3C), vsum);
01272     vsum = vec_sum4s(vec_abs(line4C), vsum);
01273     vsum = vec_sum4s(vec_abs(line5C), vsum);
01274     vsum = vec_sum4s(vec_abs(line6C), vsum);
01275     vsum = vec_sum4s(vec_abs(line7C), vsum);
01276 
01277     line0S = vec_add(temp0S, temp1S);
01278     line1S = vec_sub(temp0S, temp1S);
01279     line2S = vec_add(temp2S, temp3S);
01280     line3S = vec_sub(temp2S, temp3S);
01281     line4S = vec_add(temp4S, temp5S);
01282     line5S = vec_sub(temp4S, temp5S);
01283     line6S = vec_add(temp6S, temp7S);
01284     line7S = vec_sub(temp6S, temp7S);
01285 
01286     line0BS = vec_add(line0S, line2S);
01287     line2BS = vec_sub(line0S, line2S);
01288     line1BS = vec_add(line1S, line3S);
01289     line3BS = vec_sub(line1S, line3S);
01290     line4BS = vec_add(line4S, line6S);
01291     line6BS = vec_sub(line4S, line6S);
01292     line5BS = vec_add(line5S, line7S);
01293     line7BS = vec_sub(line5S, line7S);
01294 
01295     line0CS = vec_add(line0BS, line4BS);
01296     line4CS = vec_sub(line0BS, line4BS);
01297     line1CS = vec_add(line1BS, line5BS);
01298     line5CS = vec_sub(line1BS, line5BS);
01299     line2CS = vec_add(line2BS, line6BS);
01300     line6CS = vec_sub(line2BS, line6BS);
01301     line3CS = vec_add(line3BS, line7BS);
01302     line7CS = vec_sub(line3BS, line7BS);
01303 
01304     vsum = vec_sum4s(vec_abs(line0CS), vsum);
01305     vsum = vec_sum4s(vec_abs(line1CS), vsum);
01306     vsum = vec_sum4s(vec_abs(line2CS), vsum);
01307     vsum = vec_sum4s(vec_abs(line3CS), vsum);
01308     vsum = vec_sum4s(vec_abs(line4CS), vsum);
01309     vsum = vec_sum4s(vec_abs(line5CS), vsum);
01310     vsum = vec_sum4s(vec_abs(line6CS), vsum);
01311     vsum = vec_sum4s(vec_abs(line7CS), vsum);
01312     vsum = vec_sums(vsum, (vector signed int)vzero);
01313     vsum = vec_splat(vsum, 3);
01314     vec_ste(vsum, 0, &sum);
01315     }
01316     return sum;
01317 }
01318 
01319 int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
01320 POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
01321     int score;
01322 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
01323     score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
01324     if (h==16) {
01325         dst += 8*stride;
01326         src += 8*stride;
01327         score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
01328     }
01329 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
01330     return score;
01331 }
01332 
01333 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
01334                                             int blocksize)
01335 {
01336     int i;
01337     vector float m, a;
01338     vector bool int t0, t1;
01339     const vector unsigned int v_31 = //XXX
01340         vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
01341     for (i = 0; i < blocksize; i += 4) {
01342         m = vec_ld(0, mag+i);
01343         a = vec_ld(0, ang+i);
01344         t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
01345         t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
01346         a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
01347         t0 = (vector bool int)vec_and(a, t1);
01348         t1 = (vector bool int)vec_andc(a, t1);
01349         a = vec_sub(m, (vector float)t1);
01350         m = vec_add(m, (vector float)t0);
01351         vec_stl(a, 0, ang+i);
01352         vec_stl(m, 0, mag+i);
01353     }
01354 }
01355 
01356 /* next one assumes that ((line_size % 8) == 0) */
01357 void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
01358 {
01359 POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
01360     register int i;
01361     register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
01362     register vector unsigned char blockv, temp1, temp2, blocktemp;
01363     register vector unsigned short pixelssum1, pixelssum2, temp3;
01364 
01365     register const vector unsigned char vczero = (const vector unsigned char)
01366                                         vec_splat_u8(0);
01367     register const vector unsigned short vctwo = (const vector unsigned short)
01368                                         vec_splat_u16(2);
01369 
01370     temp1 = vec_ld(0, pixels);
01371     temp2 = vec_ld(16, pixels);
01372     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
01373     if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
01374         pixelsv2 = temp2;
01375     } else {
01376         pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
01377     }
01378     pixelsv1 = vec_mergeh(vczero, pixelsv1);
01379     pixelsv2 = vec_mergeh(vczero, pixelsv2);
01380     pixelssum1 = vec_add((vector unsigned short)pixelsv1,
01381                          (vector unsigned short)pixelsv2);
01382     pixelssum1 = vec_add(pixelssum1, vctwo);
01383 
01384 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
01385     for (i = 0; i < h ; i++) {
01386         int rightside = ((unsigned long)block & 0x0000000F);
01387         blockv = vec_ld(0, block);
01388 
01389         temp1 = vec_ld(line_size, pixels);
01390         temp2 = vec_ld(line_size + 16, pixels);
01391         pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
01392         if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F) {
01393             pixelsv2 = temp2;
01394         } else {
01395             pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
01396         }
01397 
01398         pixelsv1 = vec_mergeh(vczero, pixelsv1);
01399         pixelsv2 = vec_mergeh(vczero, pixelsv2);
01400         pixelssum2 = vec_add((vector unsigned short)pixelsv1,
01401                              (vector unsigned short)pixelsv2);
01402         temp3 = vec_add(pixelssum1, pixelssum2);
01403         temp3 = vec_sra(temp3, vctwo);
01404         pixelssum1 = vec_add(pixelssum2, vctwo);
01405         pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
01406 
01407         if (rightside) {
01408             blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
01409         } else {
01410             blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
01411         }
01412 
01413         blockv = vec_avg(blocktemp, blockv);
01414         vec_st(blockv, 0, block);
01415 
01416         block += line_size;
01417         pixels += line_size;
01418     }
01419 
01420 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
01421 }
01422 
01423 void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
01424 {
01425     c->pix_abs[0][1] = sad16_x2_altivec;
01426     c->pix_abs[0][2] = sad16_y2_altivec;
01427     c->pix_abs[0][3] = sad16_xy2_altivec;
01428     c->pix_abs[0][0] = sad16_altivec;
01429     c->pix_abs[1][0] = sad8_altivec;
01430     c->sad[0]= sad16_altivec;
01431     c->sad[1]= sad8_altivec;
01432     c->pix_norm1 = pix_norm1_altivec;
01433     c->sse[1]= sse8_altivec;
01434     c->sse[0]= sse16_altivec;
01435     c->pix_sum = pix_sum_altivec;
01436     c->diff_pixels = diff_pixels_altivec;
01437     c->get_pixels = get_pixels_altivec;
01438     c->clear_block = clear_block_altivec;
01439     c->add_bytes= add_bytes_altivec;
01440     c->put_pixels_tab[0][0] = put_pixels16_altivec;
01441     /* the two functions do the same thing, so use the same code */
01442     c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
01443     c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
01444     c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
01445     c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
01446     c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
01447     c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
01448     c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
01449     c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
01450 
01451     c->hadamard8_diff[0] = hadamard8_diff16_altivec;
01452     c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
01453     if (CONFIG_VORBIS_DECODER)
01454         c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
01455 }

Generated on Tue Nov 4 2014 12:59:22 for ffmpeg by  doxygen 1.7.1