• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/gmc_altivec.c

Go to the documentation of this file.
00001 /*
00002  * GMC (Global Motion Compensation)
00003  * AltiVec-enabled
00004  * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or
00009  * modify it under the terms of the GNU Lesser General Public
00010  * License as published by the Free Software Foundation; either
00011  * version 2.1 of the License, or (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016  * Lesser General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU Lesser General Public
00019  * License along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  */
00022 
00023 #include "libavcodec/dsputil.h"
00024 
00025 #include "gcc_fixes.h"
00026 
00027 #include "dsputil_ppc.h"
00028 #include "util_altivec.h"
00029 
00030 /*
00031   altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
00032   to preserve proper dst alignment.
00033 */
00034 #define GMC1_PERF_COND (h==8)
00035 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
00036 {
00037 POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
00038     const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
00039         {rounder, rounder, rounder, rounder,
00040          rounder, rounder, rounder, rounder};
00041     const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
00042         {
00043             (16-x16)*(16-y16), /* A */
00044             (   x16)*(16-y16), /* B */
00045             (16-x16)*(   y16), /* C */
00046             (   x16)*(   y16), /* D */
00047             0, 0, 0, 0         /* padding */
00048         };
00049     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00050     register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
00051     register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
00052     register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
00053     int i;
00054     unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
00055     unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
00056 
00057 
00058 POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
00059 
00060     tempA = vec_ld(0, (unsigned short*)ABCD);
00061     Av = vec_splat(tempA, 0);
00062     Bv = vec_splat(tempA, 1);
00063     Cv = vec_splat(tempA, 2);
00064     Dv = vec_splat(tempA, 3);
00065 
00066     rounderV = vec_ld(0, (unsigned short*)rounder_a);
00067 
00068     // we'll be able to pick-up our 9 char elements
00069     // at src from those 32 bytes
00070     // we load the first batch here, as inside the loop
00071     // we can re-use 'src+stride' from one iteration
00072     // as the 'src' of the next.
00073     src_0 = vec_ld(0, src);
00074     src_1 = vec_ld(16, src);
00075     srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
00076 
00077     if (src_really_odd != 0x0000000F) {
00078         // if src & 0xF == 0xF, then (src+1) is properly aligned
00079         // on the second vector.
00080         srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
00081     } else {
00082         srcvB = src_1;
00083     }
00084     srcvA = vec_mergeh(vczero, srcvA);
00085     srcvB = vec_mergeh(vczero, srcvB);
00086 
00087     for(i=0; i<h; i++) {
00088         dst_odd = (unsigned long)dst & 0x0000000F;
00089         src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
00090 
00091         dstv = vec_ld(0, dst);
00092 
00093         // we we'll be able to pick-up our 9 char elements
00094         // at src + stride from those 32 bytes
00095         // then reuse the resulting 2 vectors srvcC and srcvD
00096         // as the next srcvA and srcvB
00097         src_0 = vec_ld(stride + 0, src);
00098         src_1 = vec_ld(stride + 16, src);
00099         srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
00100 
00101         if (src_really_odd != 0x0000000F) {
00102             // if src & 0xF == 0xF, then (src+1) is properly aligned
00103             // on the second vector.
00104             srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
00105         } else {
00106             srcvD = src_1;
00107         }
00108 
00109         srcvC = vec_mergeh(vczero, srcvC);
00110         srcvD = vec_mergeh(vczero, srcvD);
00111 
00112 
00113         // OK, now we (finally) do the math :-)
00114         // those four instructions replaces 32 int muls & 32 int adds.
00115         // isn't AltiVec nice ?
00116         tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
00117         tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
00118         tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
00119         tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
00120 
00121         srcvA = srcvC;
00122         srcvB = srcvD;
00123 
00124         tempD = vec_sr(tempD, vcsr8);
00125 
00126         dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
00127 
00128         if (dst_odd) {
00129             dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
00130         } else {
00131             dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
00132         }
00133 
00134         vec_st(dstv2, 0, dst);
00135 
00136         dst += stride;
00137         src += stride;
00138     }
00139 
00140 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
00141 }

Generated on Tue Nov 4 2014 12:59:22 for ffmpeg by  doxygen 1.7.1