Actual source code: dgefa4.c
1: /*$Id: dgefa4.c,v 1.21 2001/06/22 19:50:38 buschelm Exp $*/
2: /*
3: Inverts 4 by 4 matrix using partial pivoting.
5: Used by the sparse factorization routines in
6: src/mat/impls/baij/seq and src/mat/impls/bdiag/seq
8: See also src/inline/ilu.h
10: This is a combination of the Linpack routines
11: dgefa() and dgedi() specialized for a size of 4.
13: */
14: #include petsc.h
16: int Kernel_A_gets_inverse_A_4(MatScalar *a)
17: {
18: int i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3;
19: int k4,j3;
20: MatScalar *aa,*ax,*ay,work[16],stmp;
21: MatReal tmp,max;
23: /* gaussian elimination with partial pivoting */
26: /* Parameter adjustments */
27: a -= 5;
29: for (k = 1; k <= 3; ++k) {
30: kp1 = k + 1;
31: k3 = 4*k;
32: k4 = k3 + k;
33: /* find l = pivot index */
35: i__2 = 4 - k;
36: aa = &a[k4];
37: max = PetscAbsScalar(aa[0]);
38: l = 1;
39: for (ll=1; ll<i__2; ll++) {
40: tmp = PetscAbsScalar(aa[ll]);
41: if (tmp > max) { max = tmp; l = ll+1;}
42: }
43: l += k - 1;
44: ipvt[k-1] = l;
46: if (a[l + k3] == 0.) {
47: SETERRQ(k,"Zero pivot");
48: }
50: /* interchange if necessary */
52: if (l != k) {
53: stmp = a[l + k3];
54: a[l + k3] = a[k4];
55: a[k4] = stmp;
56: }
58: /* compute multipliers */
60: stmp = -1. / a[k4];
61: i__2 = 4 - k;
62: aa = &a[1 + k4];
63: for (ll=0; ll<i__2; ll++) {
64: aa[ll] *= stmp;
65: }
67: /* row elimination with column indexing */
69: ax = &a[k4+1];
70: for (j = kp1; j <= 4; ++j) {
71: j3 = 4*j;
72: stmp = a[l + j3];
73: if (l != k) {
74: a[l + j3] = a[k + j3];
75: a[k + j3] = stmp;
76: }
78: i__3 = 4 - k;
79: ay = &a[1+k+j3];
80: for (ll=0; ll<i__3; ll++) {
81: ay[ll] += stmp*ax[ll];
82: }
83: }
84: }
85: ipvt[3] = 4;
86: if (a[20] == 0.) {
87: SETERRQ(3,"Zero pivot,final row");
88: }
90: /*
91: Now form the inverse
92: */
94: /* compute inverse(u) */
96: for (k = 1; k <= 4; ++k) {
97: k3 = 4*k;
98: k4 = k3 + k;
99: a[k4] = 1.0 / a[k4];
100: stmp = -a[k4];
101: i__2 = k - 1;
102: aa = &a[k3 + 1];
103: for (ll=0; ll<i__2; ll++) aa[ll] *= stmp;
104: kp1 = k + 1;
105: if (4 < kp1) continue;
106: ax = aa;
107: for (j = kp1; j <= 4; ++j) {
108: j3 = 4*j;
109: stmp = a[k + j3];
110: a[k + j3] = 0.0;
111: ay = &a[j3 + 1];
112: for (ll=0; ll<k; ll++) {
113: ay[ll] += stmp*ax[ll];
114: }
115: }
116: }
118: /* form inverse(u)*inverse(l) */
120: for (kb = 1; kb <= 3; ++kb) {
121: k = 4 - kb;
122: k3 = 4*k;
123: kp1 = k + 1;
124: aa = a + k3;
125: for (i = kp1; i <= 4; ++i) {
126: work[i-1] = aa[i];
127: aa[i] = 0.0;
128: }
129: for (j = kp1; j <= 4; ++j) {
130: stmp = work[j-1];
131: ax = &a[4*j + 1];
132: ay = &a[k3 + 1];
133: ay[0] += stmp*ax[0];
134: ay[1] += stmp*ax[1];
135: ay[2] += stmp*ax[2];
136: ay[3] += stmp*ax[3];
137: }
138: l = ipvt[k-1];
139: if (l != k) {
140: ax = &a[k3 + 1];
141: ay = &a[4*l + 1];
142: stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp;
143: stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp;
144: stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp;
145: stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp;
146: }
147: }
148: return(0);
149: }
151: #if defined(PETSC_HAVE_SSE)
152: #include PETSC_HAVE_SSE
154: int Kernel_A_gets_inverse_A_4_SSE(float *a)
155: {
156: /*
157: This routine is converted from Intel's Small Matrix Library.
158: See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
159: Order Number: 245043-001
160: March 1999
161: http://www.intel.com
163: Inverse of a 4x4 matrix via Kramer's Rule:
164: bool Invert4x4(SMLXMatrix &);
165: */
168: SSE_SCOPE_BEGIN;
169: SSE_INLINE_BEGIN_1(a)
171: /* ----------------------------------------------- */
173: SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
174: SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0)
176: SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
177: SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5)
179: SSE_COPY_PS(XMM3,XMM0)
180: SSE_SHUFFLE(XMM3,XMM5,0x88)
182: SSE_SHUFFLE(XMM5,XMM0,0xDD)
184: SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0)
185: SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0)
187: SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6)
188: SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6)
190: SSE_COPY_PS(XMM4,XMM0)
191: SSE_SHUFFLE(XMM4,XMM6,0x88)
193: SSE_SHUFFLE(XMM6,XMM0,0xDD)
195: /* ----------------------------------------------- */
197: SSE_COPY_PS(XMM7,XMM4)
198: SSE_MULT_PS(XMM7,XMM6)
200: SSE_SHUFFLE(XMM7,XMM7,0xB1)
202: SSE_COPY_PS(XMM0,XMM5)
203: SSE_MULT_PS(XMM0,XMM7)
205: SSE_COPY_PS(XMM2,XMM3)
206: SSE_MULT_PS(XMM2,XMM7)
208: SSE_SHUFFLE(XMM7,XMM7,0x4E)
210: SSE_COPY_PS(XMM1,XMM5)
211: SSE_MULT_PS(XMM1,XMM7)
212: SSE_SUB_PS(XMM1,XMM0)
214: SSE_MULT_PS(XMM7,XMM3)
215: SSE_SUB_PS(XMM7,XMM2)
217: SSE_SHUFFLE(XMM7,XMM7,0x4E)
218: SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7)
220: /* ----------------------------------------------- */
222: SSE_COPY_PS(XMM0,XMM5)
223: SSE_MULT_PS(XMM0,XMM4)
225: SSE_SHUFFLE(XMM0,XMM0,0xB1)
227: SSE_COPY_PS(XMM2,XMM6)
228: SSE_MULT_PS(XMM2,XMM0)
229: SSE_ADD_PS(XMM2,XMM1)
230:
231: SSE_COPY_PS(XMM7,XMM3)
232: SSE_MULT_PS(XMM7,XMM0)
234: SSE_SHUFFLE(XMM0,XMM0,0x4E)
236: SSE_COPY_PS(XMM1,XMM6)
237: SSE_MULT_PS(XMM1,XMM0)
238: SSE_SUB_PS(XMM2,XMM1)
240: SSE_MULT_PS(XMM0,XMM3)
241: SSE_SUB_PS(XMM0,XMM7)
243: SSE_SHUFFLE(XMM0,XMM0,0x4E)
244: SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0)
246: /* ----------------------------------------------- */
248: SSE_COPY_PS(XMM7,XMM5)
249: SSE_SHUFFLE(XMM7,XMM5,0x4E)
250: SSE_MULT_PS(XMM7,XMM6)
252: SSE_SHUFFLE(XMM7,XMM7,0xB1)
254: SSE_SHUFFLE(XMM4,XMM4,0x4E)
256: SSE_COPY_PS(XMM0,XMM4)
257: SSE_MULT_PS(XMM0,XMM7)
258: SSE_ADD_PS(XMM0,XMM2)
260: SSE_COPY_PS(XMM2,XMM3)
261: SSE_MULT_PS(XMM2,XMM7)
263: SSE_SHUFFLE(XMM7,XMM7,0x4E)
265: SSE_COPY_PS(XMM1,XMM4)
266: SSE_MULT_PS(XMM1,XMM7)
267: SSE_SUB_PS(XMM0,XMM1)
268: SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0)
270: SSE_MULT_PS(XMM7,XMM3)
271: SSE_SUB_PS(XMM7,XMM2)
273: SSE_SHUFFLE(XMM7,XMM7,0x4E)
275: /* ----------------------------------------------- */
277: SSE_COPY_PS(XMM1,XMM3)
278: SSE_MULT_PS(XMM1,XMM5)
280: SSE_SHUFFLE(XMM1,XMM1,0xB1)
282: SSE_COPY_PS(XMM0,XMM6)
283: SSE_MULT_PS(XMM0,XMM1)
284: SSE_ADD_PS(XMM0,XMM7)
285:
286: SSE_COPY_PS(XMM2,XMM4)
287: SSE_MULT_PS(XMM2,XMM1)
288: SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12)
290: SSE_SHUFFLE(XMM1,XMM1,0x4E)
292: SSE_COPY_PS(XMM7,XMM6)
293: SSE_MULT_PS(XMM7,XMM1)
294: SSE_SUB_PS(XMM7,XMM0)
296: SSE_MULT_PS(XMM1,XMM4)
297: SSE_SUB_PS(XMM2,XMM1)
298: SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2)
300: /* ----------------------------------------------- */
302: SSE_COPY_PS(XMM1,XMM3)
303: SSE_MULT_PS(XMM1,XMM6)
305: SSE_SHUFFLE(XMM1,XMM1,0xB1)
307: SSE_COPY_PS(XMM2,XMM4)
308: SSE_MULT_PS(XMM2,XMM1)
309: SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0)
310: SSE_SUB_PS(XMM0,XMM2)
312: SSE_COPY_PS(XMM2,XMM5)
313: SSE_MULT_PS(XMM2,XMM1)
314: SSE_ADD_PS(XMM2,XMM7)
316: SSE_SHUFFLE(XMM1,XMM1,0x4E)
318: SSE_COPY_PS(XMM7,XMM4)
319: SSE_MULT_PS(XMM7,XMM1)
320: SSE_ADD_PS(XMM7,XMM0)
322: SSE_MULT_PS(XMM1,XMM5)
323: SSE_SUB_PS(XMM2,XMM1)
325: /* ----------------------------------------------- */
327: SSE_MULT_PS(XMM4,XMM3)
329: SSE_SHUFFLE(XMM4,XMM4,0xB1)
331: SSE_COPY_PS(XMM1,XMM6)
332: SSE_MULT_PS(XMM1,XMM4)
333: SSE_ADD_PS(XMM1,XMM7)
335: SSE_COPY_PS(XMM0,XMM5)
336: SSE_MULT_PS(XMM0,XMM4)
337: SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7)
338: SSE_SUB_PS(XMM7,XMM0)
340: SSE_SHUFFLE(XMM4,XMM4,0x4E)
342: SSE_MULT_PS(XMM6,XMM4)
343: SSE_SUB_PS(XMM1,XMM6)
345: SSE_MULT_PS(XMM5,XMM4)
346: SSE_ADD_PS(XMM5,XMM7)
348: /* ----------------------------------------------- */
350: SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0)
351: SSE_MULT_PS(XMM3,XMM0)
353: SSE_COPY_PS(XMM4,XMM3)
354: SSE_SHUFFLE(XMM4,XMM3,0x4E)
355: SSE_ADD_PS(XMM4,XMM3)
357: SSE_COPY_PS(XMM6,XMM4)
358: SSE_SHUFFLE(XMM6,XMM4,0xB1)
359: SSE_ADD_SS(XMM6,XMM4)
361: SSE_COPY_PS(XMM3,XMM6)
362: SSE_RECIP_SS(XMM3,XMM6)
363: SSE_COPY_SS(XMM4,XMM3)
364: SSE_ADD_SS(XMM4,XMM3)
365: SSE_MULT_SS(XMM3,XMM3)
366: SSE_MULT_SS(XMM6,XMM3)
367: SSE_SUB_SS(XMM4,XMM6)
369: SSE_SHUFFLE(XMM4,XMM4,0x00)
371: SSE_MULT_PS(XMM0,XMM4)
372: SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0)
373: SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0)
375: SSE_MULT_PS(XMM1,XMM4)
376: SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1)
377: SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1)
379: SSE_MULT_PS(XMM2,XMM4)
380: SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2)
381: SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2)
383: SSE_MULT_PS(XMM4,XMM5)
384: SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
385: SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)
387: /* ----------------------------------------------- */
389: SSE_INLINE_END_1;
390: SSE_SCOPE_END;
392: return(0);
393: }
395: #endif