Inverse_SSE.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2001 Intel Corporation
5 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
7 //
8 // Eigen is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 3 of the License, or (at your option) any later version.
12 //
13 // Alternatively, you can redistribute it and/or
14 // modify it under the terms of the GNU General Public License as
15 // published by the Free Software Foundation; either version 2 of
16 // the License, or (at your option) any later version.
17 //
18 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
19 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
20 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
21 // GNU General Public License for more details.
22 //
23 // You should have received a copy of the GNU Lesser General Public
24 // License and a copy of the GNU General Public License along with
25 // Eigen. If not, see <http://www.gnu.org/licenses/>.
26 
27 // The SSE code for the 4x4 float and double matrix inverse in this file
28 // comes from the following Intel's library:
29 // http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
30 //
31 // Here is the respective copyright and license statement:
32 //
33 // Copyright (c) 2001 Intel Corporation.
34 //
35 // Permition is granted to use, copy, distribute and prepare derivative works
36 // of this library for any purpose and without fee, provided, that the above
37 // copyright notice and this statement appear in all copies.
38 // Intel makes no representations about the suitability of this software for
39 // any purpose, and specifically disclaims all warranties.
40 // See LEGAL.TXT for all the legal information.
41 
42 #ifndef EIGEN_INVERSE_SSE_H
43 #define EIGEN_INVERSE_SSE_H
44 
45 namespace Eigen {
46 
47 namespace internal {
48 
49 template<typename MatrixType, typename ResultType>
50 struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
51 {
52  enum {
53  MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
54  ResultAlignment = bool(ResultType::Flags&AlignedBit),
55  StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
56  };
57 
58  static void run(const MatrixType& matrix, ResultType& result)
59  {
60  EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
61 
62  // Load the full matrix into registers
63  __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
64  __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
65  __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
66  __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
67 
68  // The inverse is calculated using "Divide and Conquer" technique. The
69  // original matrix is divide into four 2x2 sub-matrices. Since each
70  // register holds four matrix element, the smaller matrices are
71  // represented as a registers. Hence we get a better locality of the
72  // calculations.
73 
74  __m128 A, B, C, D; // the four sub-matrices
75  if(!StorageOrdersMatch)
76  {
77  A = _mm_unpacklo_ps(_L1, _L2);
78  B = _mm_unpacklo_ps(_L3, _L4);
79  C = _mm_unpackhi_ps(_L1, _L2);
80  D = _mm_unpackhi_ps(_L3, _L4);
81  }
82  else
83  {
84  A = _mm_movelh_ps(_L1, _L2);
85  B = _mm_movehl_ps(_L2, _L1);
86  C = _mm_movelh_ps(_L3, _L4);
87  D = _mm_movehl_ps(_L4, _L3);
88  }
89 
90  __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
91  DC, AB;
92  __m128 dA, dB, dC, dD; // determinant of the sub-matrices
93  __m128 det, d, d1, d2;
94  __m128 rd; // reciprocal of the determinant
95 
96  // AB = A# * B
97  AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
98  AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
99  // DC = D# * C
100  DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
101  DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
102 
103  // dA = |A|
104  dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
105  dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
106  // dB = |B|
107  dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
108  dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
109 
110  // dC = |C|
111  dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
112  dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
113  // dD = |D|
114  dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
115  dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
116 
117  // d = trace(AB*DC) = trace(A#*B*D#*C)
118  d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
119 
120  // iD = C*A#*B
121  iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
122  iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
123  // iA = B*D#*C
124  iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
125  iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
126 
127  // d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
128  d = _mm_add_ps(d, _mm_movehl_ps(d, d));
129  d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
130  d1 = _mm_mul_ss(dA,dD);
131  d2 = _mm_mul_ss(dB,dC);
132 
133  // iD = D*|A| - C*A#*B
134  iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
135 
136  // iA = A*|D| - B*D#*C;
137  iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
138 
139  // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
140  det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
141  rd = _mm_div_ss(_mm_set_ss(1.0f), det);
142 
143 // #ifdef ZERO_SINGULAR
144 // rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
145 // #endif
146 
147  // iB = D * (A#B)# = D*B#*A
148  iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
149  iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
150  // iC = A * (D#C)# = A*C#*D
151  iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
152  iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
153 
154  rd = _mm_shuffle_ps(rd,rd,0);
155  rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
156 
157  // iB = C*|B| - D*B#*A
158  iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
159 
160  // iC = B*|C| - A*C#*D;
161  iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
162 
163  // iX = iX / det
164  iA = _mm_mul_ps(rd,iA);
165  iB = _mm_mul_ps(rd,iB);
166  iC = _mm_mul_ps(rd,iC);
167  iD = _mm_mul_ps(rd,iD);
168 
169  result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
170  result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
171  result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
172  result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
173  }
174 
175 };
176 
177 template<typename MatrixType, typename ResultType>
178 struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
179 {
180  enum {
181  MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
182  ResultAlignment = bool(ResultType::Flags&AlignedBit),
183  StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
184  };
185  static void run(const MatrixType& matrix, ResultType& result)
186  {
187  const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
188  const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
189 
190  // The inverse is calculated using "Divide and Conquer" technique. The
191  // original matrix is divide into four 2x2 sub-matrices. Since each
192  // register of the matrix holds two element, the smaller matrices are
193  // consisted of two registers. Hence we get a better locality of the
194  // calculations.
195 
196  // the four sub-matrices
197  __m128d A1, A2, B1, B2, C1, C2, D1, D2;
198 
199  if(StorageOrdersMatch)
200  {
201  A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
202  A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
203  C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
204  C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
205  }
206  else
207  {
208  __m128d tmp;
209  A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
210  A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
211  tmp = A1;
212  A1 = _mm_unpacklo_pd(A1,A2);
213  A2 = _mm_unpackhi_pd(tmp,A2);
214  tmp = C1;
215  C1 = _mm_unpacklo_pd(C1,C2);
216  C2 = _mm_unpackhi_pd(tmp,C2);
217 
218  B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
219  B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
220  tmp = B1;
221  B1 = _mm_unpacklo_pd(B1,B2);
222  B2 = _mm_unpackhi_pd(tmp,B2);
223  tmp = D1;
224  D1 = _mm_unpacklo_pd(D1,D2);
225  D2 = _mm_unpackhi_pd(tmp,D2);
226  }
227 
228  __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
229  DC1, DC2, AB1, AB2;
230  __m128d dA, dB, dC, dD; // determinant of the sub-matrices
231  __m128d det, d1, d2, rd;
232 
233  // dA = |A|
234  dA = _mm_shuffle_pd(A2, A2, 1);
235  dA = _mm_mul_pd(A1, dA);
236  dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
237  // dB = |B|
238  dB = _mm_shuffle_pd(B2, B2, 1);
239  dB = _mm_mul_pd(B1, dB);
240  dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
241 
242  // AB = A# * B
243  AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
244  AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
245  AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
246  AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
247 
248  // dC = |C|
249  dC = _mm_shuffle_pd(C2, C2, 1);
250  dC = _mm_mul_pd(C1, dC);
251  dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
252  // dD = |D|
253  dD = _mm_shuffle_pd(D2, D2, 1);
254  dD = _mm_mul_pd(D1, dD);
255  dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
256 
257  // DC = D# * C
258  DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
259  DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
260  DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
261  DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
262 
263  // rd = trace(AB*DC) = trace(A#*B*D#*C)
264  d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
265  d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
266  rd = _mm_add_pd(d1, d2);
267  rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
268 
269  // iD = C*A#*B
270  iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
271  iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
272  iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
273  iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
274 
275  // iA = B*D#*C
276  iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
277  iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
278  iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
279  iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
280 
281  // iD = D*|A| - C*A#*B
282  dA = _mm_shuffle_pd(dA,dA,0);
283  iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
284  iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
285 
286  // iA = A*|D| - B*D#*C;
287  dD = _mm_shuffle_pd(dD,dD,0);
288  iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
289  iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
290 
291  d1 = _mm_mul_sd(dA, dD);
292  d2 = _mm_mul_sd(dB, dC);
293 
294  // iB = D * (A#B)# = D*B#*A
295  iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
296  iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
297  iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
298  iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
299 
300  // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
301  det = _mm_add_sd(d1, d2);
302  det = _mm_sub_sd(det, rd);
303 
304  // iC = A * (D#C)# = A*C#*D
305  iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
306  iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
307  iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
308  iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
309 
310  rd = _mm_div_sd(_mm_set_sd(1.0), det);
311 // #ifdef ZERO_SINGULAR
312 // rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
313 // #endif
314  rd = _mm_shuffle_pd(rd,rd,0);
315 
316  // iB = C*|B| - D*B#*A
317  dB = _mm_shuffle_pd(dB,dB,0);
318  iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
319  iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
320 
321  d1 = _mm_xor_pd(rd, _Sign_PN);
322  d2 = _mm_xor_pd(rd, _Sign_NP);
323 
324  // iC = B*|C| - A*C#*D;
325  dC = _mm_shuffle_pd(dC,dC,0);
326  iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
327  iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
328 
329  result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det
330  result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
331  result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det
332  result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
333  result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det
334  result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
335  result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det
336  result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
337  }
338 };
339 
340 } // end namespace internal
341 
342 } // end namespace Eigen
343 
344 #endif // EIGEN_INVERSE_SSE_H