Eigen  3.2.93
SSE/PacketMath.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_SSE_H
11 #define EIGEN_PACKET_MATH_SSE_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19 #endif
20 
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23 #endif
24 
25 #ifdef __FMA__
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
28 #endif
29 #endif
30 
31 #if (defined EIGEN_VECTORIZE_AVX) && EIGEN_COMP_GNUC_STRICT && (__GXX_ABI_VERSION < 1004)
32 // With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
33 // have overloads for both types without linking error.
34 // One solution is to increase ABI version using -fabi-version=4 (or greater).
35 // Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
36 // structure:
37 template<typename T>
38 struct eigen_packet_wrapper
39 {
40  EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
41  EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
42  EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
43  EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
44  EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
45  m_val = v;
46  return *this;
47  }
48 
49  T m_val;
50 };
51 typedef eigen_packet_wrapper<__m128> Packet4f;
52 typedef eigen_packet_wrapper<__m128i> Packet4i;
53 typedef eigen_packet_wrapper<__m128d> Packet2d;
54 #else
55 typedef __m128 Packet4f;
56 typedef __m128i Packet4i;
57 typedef __m128d Packet2d;
58 #endif
59 
60 template<> struct is_arithmetic<__m128> { enum { value = true }; };
61 template<> struct is_arithmetic<__m128i> { enum { value = true }; };
62 template<> struct is_arithmetic<__m128d> { enum { value = true }; };
63 
64 #define vec4f_swizzle1(v,p,q,r,s) \
65  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
66 
67 #define vec4i_swizzle1(v,p,q,r,s) \
68  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
69 
70 #define vec2d_swizzle1(v,p,q) \
71  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
72 
73 #define vec4f_swizzle2(a,b,p,q,r,s) \
74  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
75 
76 #define vec4i_swizzle2(a,b,p,q,r,s) \
77  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
78 
79 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
80  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
81 
82 #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
83  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
84 
85 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
86  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
87 
88 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
89  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
90 
91 
92 // Use the packet_traits defined in AVX/PacketMath.h instead if we're going
93 // to leverage AVX instructions.
94 #ifndef EIGEN_VECTORIZE_AVX
95 template<> struct packet_traits<float> : default_packet_traits
96 {
97  typedef Packet4f type;
98  typedef Packet4f half;
99  enum {
100  Vectorizable = 1,
101  AlignedOnScalar = 1,
102  size=4,
103  HasHalfPacket = 0,
104 
105  HasDiv = 1,
106  HasSin = EIGEN_FAST_MATH,
107  HasCos = EIGEN_FAST_MATH,
108  HasLog = 1,
109  HasExp = 1,
110  HasSqrt = 1,
111  HasRsqrt = 1,
112  HasTanh = EIGEN_FAST_MATH,
113  HasBlend = 1
114 
115 #ifdef EIGEN_VECTORIZE_SSE4_1
116  ,
117  HasRound = 1,
118  HasFloor = 1,
119  HasCeil = 1
120 #endif
121  };
122 };
123 template<> struct packet_traits<double> : default_packet_traits
124 {
125  typedef Packet2d type;
126  typedef Packet2d half;
127  enum {
128  Vectorizable = 1,
129  AlignedOnScalar = 1,
130  size=2,
131  HasHalfPacket = 0,
132 
133  HasDiv = 1,
134  HasExp = 1,
135  HasSqrt = 1,
136  HasRsqrt = 1,
137  HasBlend = 1
138 
139 #ifdef EIGEN_VECTORIZE_SSE4_1
140  ,
141  HasRound = 1,
142  HasFloor = 1,
143  HasCeil = 1
144 #endif
145  };
146 };
147 #endif
148 template<> struct packet_traits<int> : default_packet_traits
149 {
150  typedef Packet4i type;
151  typedef Packet4i half;
152  enum {
153  Vectorizable = 1,
154  AlignedOnScalar = 1,
155  size=4,
156 
157  HasBlend = 1
158  };
159 };
160 
161 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
162 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };
163 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
164 
165 #if EIGEN_COMP_MSVC==1500
166 // Workaround MSVC 9 internal compiler error.
167 // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
168 // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
169 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
170 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
171 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
172 #else
173 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
174 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
175 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
176 #endif
177 
178 // GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
179 // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
180 // Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
181 // Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
182 // Also note that with AVX, we want it to generate a vbroadcastss.
183 #if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)
184 template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
185  return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
186 }
187 #endif
188 
189 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
190 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
191 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
192 
193 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
194 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
195 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
196 
197 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
198 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
199 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
200 
201 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
202 {
203  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
204  return _mm_xor_ps(a,mask);
205 }
206 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
207 {
208  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
209  return _mm_xor_pd(a,mask);
210 }
211 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
212 {
213  return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
214 }
215 
216 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
217 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
218 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
219 
220 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
221 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
222 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
223 {
224 #ifdef EIGEN_VECTORIZE_SSE4_1
225  return _mm_mullo_epi32(a,b);
226 #else
227  // this version is slightly faster than 4 scalar products
228  return vec4i_swizzle1(
229  vec4i_swizzle2(
230  _mm_mul_epu32(a,b),
231  _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
232  vec4i_swizzle1(b,1,0,3,2)),
233  0,2,0,2),
234  0,2,1,3);
235 #endif
236 }
237 
238 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
239 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
240 
241 // for some weird raisons, it has to be overloaded for packet of integers
242 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
243 #ifdef __FMA__
244 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
245 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
246 #endif
247 
248 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
249 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
250 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
251 {
252 #ifdef EIGEN_VECTORIZE_SSE4_1
253  return _mm_min_epi32(a,b);
254 #else
255  // after some bench, this version *is* faster than a scalar implementation
256  Packet4i mask = _mm_cmplt_epi32(a,b);
257  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
258 #endif
259 }
260 
261 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
262 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
263 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
264 {
265 #ifdef EIGEN_VECTORIZE_SSE4_1
266  return _mm_max_epi32(a,b);
267 #else
268  // after some bench, this version *is* faster than a scalar implementation
269  Packet4i mask = _mm_cmpgt_epi32(a,b);
270  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
271 #endif
272 }
273 
274 #ifdef EIGEN_VECTORIZE_SSE4_1
275 template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
276 template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
277 
278 template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
279 template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
280 
281 template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
282 template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
283 #endif
284 
285 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
286 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
287 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
288 
289 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
290 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
291 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
292 
293 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
294 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
295 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
296 
297 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
298 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
299 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
300 
301 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
302 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
303 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
304 
305 #if EIGEN_COMP_MSVC
306  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
307  EIGEN_DEBUG_UNALIGNED_LOAD
308  #if (EIGEN_COMP_MSVC==1600)
309  // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
310  // (i.e., it does not generate an unaligned load!!
311  __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
312  res = _mm_loadh_pi(res, (const __m64*)(from+2));
313  return res;
314  #else
315  return _mm_loadu_ps(from);
316  #endif
317  }
318 #else
319 // NOTE: with the code below, MSVC's compiler crashes!
320 
321 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
322 {
323  EIGEN_DEBUG_UNALIGNED_LOAD
324  return _mm_loadu_ps(from);
325 }
326 #endif
327 
328 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
329 {
330  EIGEN_DEBUG_UNALIGNED_LOAD
331  return _mm_loadu_pd(from);
332 }
333 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
334 {
335  EIGEN_DEBUG_UNALIGNED_LOAD
336  return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
337 }
338 
339 
340 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
341 {
342  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
343 }
344 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
345 { return pset1<Packet2d>(from[0]); }
346 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
347 {
348  Packet4i tmp;
349  tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
350  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
351 }
352 
353 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
354 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
355 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
356 
357 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
358 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
359 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
360 
361 template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
362 {
363  return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
364 }
365 template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
366 {
367  return _mm_set_pd(from[1*stride], from[0*stride]);
368 }
369 template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
370 {
371  return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
372  }
373 
374 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
375 {
376  to[stride*0] = _mm_cvtss_f32(from);
377  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
378  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
379  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
380 }
381 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
382 {
383  to[stride*0] = _mm_cvtsd_f64(from);
384  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
385 }
386 template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
387 {
388  to[stride*0] = _mm_cvtsi128_si32(from);
389  to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
390  to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
391  to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
392 }
393 
394 // some compilers might be tempted to perform multiple moves instead of using a vector path.
395 template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
396 {
397  Packet4f pa = _mm_set_ss(a);
398  pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
399 }
400 // some compilers might be tempted to perform multiple moves instead of using a vector path.
401 template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
402 {
403  Packet2d pa = _mm_set_sd(a);
404  pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
405 }
406 
407 #ifndef EIGEN_VECTORIZE_AVX
408 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
409 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
410 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
411 #endif
412 
413 #if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
414 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
415 // Direct of the struct members fixed bug #62.
416 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
417 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
418 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
419 #elif EIGEN_COMP_MSVC_STRICT
420 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
421 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
422 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
423 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
424 #else
425 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
426 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
427 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
428 #endif
429 
430 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
431 { return _mm_shuffle_ps(a,a,0x1B); }
432 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
433 { return _mm_shuffle_pd(a,a,0x1); }
434 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
435 { return _mm_shuffle_epi32(a,0x1B); }
436 
437 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
438 {
439  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
440  return _mm_and_ps(a,mask);
441 }
442 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
443 {
444  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
445  return _mm_and_pd(a,mask);
446 }
447 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
448 {
449  #ifdef EIGEN_VECTORIZE_SSSE3
450  return _mm_abs_epi32(a);
451  #else
452  Packet4i aux = _mm_srai_epi32(a,31);
453  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
454  #endif
455 }
456 
457 // with AVX, the default implementations based on pload1 are faster
458 #ifndef __AVX__
459 template<> EIGEN_STRONG_INLINE void
460 pbroadcast4<Packet4f>(const float *a,
461  Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
462 {
463  a3 = pload<Packet4f>(a);
464  a0 = vec4f_swizzle1(a3, 0,0,0,0);
465  a1 = vec4f_swizzle1(a3, 1,1,1,1);
466  a2 = vec4f_swizzle1(a3, 2,2,2,2);
467  a3 = vec4f_swizzle1(a3, 3,3,3,3);
468 }
469 template<> EIGEN_STRONG_INLINE void
470 pbroadcast4<Packet2d>(const double *a,
471  Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
472 {
473 #ifdef EIGEN_VECTORIZE_SSE3
474  a0 = _mm_loaddup_pd(a+0);
475  a1 = _mm_loaddup_pd(a+1);
476  a2 = _mm_loaddup_pd(a+2);
477  a3 = _mm_loaddup_pd(a+3);
478 #else
479  a1 = pload<Packet2d>(a);
480  a0 = vec2d_swizzle1(a1, 0,0);
481  a1 = vec2d_swizzle1(a1, 1,1);
482  a3 = pload<Packet2d>(a+2);
483  a2 = vec2d_swizzle1(a3, 0,0);
484  a3 = vec2d_swizzle1(a3, 1,1);
485 #endif
486 }
487 #endif
488 
489 EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
490 {
491  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
492  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
493  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
494  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
495 }
496 
497 #ifdef EIGEN_VECTORIZE_SSE3
498 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
499 {
500  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
501 }
502 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
503 {
504  return _mm_hadd_pd(vecs[0], vecs[1]);
505 }
506 
507 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
508 {
509  Packet4f tmp0 = _mm_hadd_ps(a,a);
510  return pfirst<Packet4f>(_mm_hadd_ps(tmp0, tmp0));
511 }
512 
513 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst<Packet2d>(_mm_hadd_pd(a, a)); }
514 #else
515 // SSE2 versions
516 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
517 {
518  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
519  return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
520 }
521 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
522 {
523  return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
524 }
525 
526 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
527 {
528  Packet4f tmp0, tmp1, tmp2;
529  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
530  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
531  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
532  tmp0 = _mm_add_ps(tmp0, tmp1);
533  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
534  tmp1 = _mm_add_ps(tmp1, tmp2);
535  tmp2 = _mm_movehl_ps(tmp1, tmp0);
536  tmp0 = _mm_movelh_ps(tmp0, tmp1);
537  return _mm_add_ps(tmp0, tmp2);
538 }
539 
540 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
541 {
542  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
543 }
544 #endif // SSE3
545 
546 
547 #ifdef EIGEN_VECTORIZE_SSSE3
548 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
549 {
550  return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
551 }
552 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
553 {
554  Packet4i tmp0 = _mm_hadd_epi32(a,a);
555  return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
556 }
557 #else
558 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
559 {
560  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
561  return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
562 }
563 
564 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
565 {
566  Packet4i tmp0, tmp1, tmp2;
567  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
568  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
569  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
570  tmp0 = _mm_add_epi32(tmp0, tmp1);
571  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
572  tmp1 = _mm_add_epi32(tmp1, tmp2);
573  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
574  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
575  return _mm_add_epi32(tmp0, tmp2);
576 }
577 #endif
578 // Other reduction functions:
579 
580 // mul
581 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
582 {
583  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
584  return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
585 }
586 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
587 {
588  return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
589 }
590 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
591 {
592  // after some experiments, it is seems this is the fastest way to implement it
593  // for GCC (eg., reusing pmul is very slow !)
594  // TODO try to call _mm_mul_epu32 directly
595  EIGEN_ALIGN16 int aux[4];
596  pstore(aux, a);
597  return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
598 }
599 
600 // min
601 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
602 {
603  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
604  return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
605 }
606 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
607 {
608  return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
609 }
610 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
611 {
612 #ifdef EIGEN_VECTORIZE_SSE4_1
613  Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
614  return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
615 #else
616  // after some experiments, it is seems this is the fastest way to implement it
617  // for GCC (eg., it does not like using std::min after the pstore !!)
618  EIGEN_ALIGN16 int aux[4];
619  pstore(aux, a);
620  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
621  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
622  return aux0<aux2 ? aux0 : aux2;
623 #endif // EIGEN_VECTORIZE_SSE4_1
624 }
625 
626 // max
627 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
628 {
629  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
630  return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
631 }
632 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
633 {
634  return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
635 }
636 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
637 {
638 #ifdef EIGEN_VECTORIZE_SSE4_1
639  Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
640  return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
641 #else
642  // after some experiments, it is seems this is the fastest way to implement it
643  // for GCC (eg., it does not like using std::min after the pstore !!)
644  EIGEN_ALIGN16 int aux[4];
645  pstore(aux, a);
646  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
647  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
648  return aux0>aux2 ? aux0 : aux2;
649 #endif // EIGEN_VECTORIZE_SSE4_1
650 }
651 
652 #if EIGEN_COMP_GNUC
653 // template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
654 // {
655 // Packet4f res = b;
656 // asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
657 // return res;
658 // }
659 // EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
660 // {
661 // Packet4i res = a;
662 // asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
663 // return res;
664 // }
665 #endif
666 
667 #ifdef EIGEN_VECTORIZE_SSSE3
668 // SSSE3 versions
669 template<int Offset>
670 struct palign_impl<Offset,Packet4f>
671 {
672  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
673  {
674  if (Offset!=0)
675  first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
676  }
677 };
678 
679 template<int Offset>
680 struct palign_impl<Offset,Packet4i>
681 {
682  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
683  {
684  if (Offset!=0)
685  first = _mm_alignr_epi8(second,first, Offset*4);
686  }
687 };
688 
689 template<int Offset>
690 struct palign_impl<Offset,Packet2d>
691 {
692  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
693  {
694  if (Offset==1)
695  first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
696  }
697 };
698 #else
699 // SSE2 versions
700 template<int Offset>
701 struct palign_impl<Offset,Packet4f>
702 {
703  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
704  {
705  if (Offset==1)
706  {
707  first = _mm_move_ss(first,second);
708  first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
709  }
710  else if (Offset==2)
711  {
712  first = _mm_movehl_ps(first,first);
713  first = _mm_movelh_ps(first,second);
714  }
715  else if (Offset==3)
716  {
717  first = _mm_move_ss(first,second);
718  first = _mm_shuffle_ps(first,second,0x93);
719  }
720  }
721 };
722 
723 template<int Offset>
724 struct palign_impl<Offset,Packet4i>
725 {
726  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
727  {
728  if (Offset==1)
729  {
730  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
731  first = _mm_shuffle_epi32(first,0x39);
732  }
733  else if (Offset==2)
734  {
735  first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
736  first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
737  }
738  else if (Offset==3)
739  {
740  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
741  first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
742  }
743  }
744 };
745 
746 template<int Offset>
747 struct palign_impl<Offset,Packet2d>
748 {
749  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
750  {
751  if (Offset==1)
752  {
753  first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
754  first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
755  }
756  }
757 };
758 #endif
759 
760 EIGEN_DEVICE_FUNC inline void
761 ptranspose(PacketBlock<Packet4f,4>& kernel) {
762  _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
763 }
764 
765 EIGEN_DEVICE_FUNC inline void
766 ptranspose(PacketBlock<Packet2d,2>& kernel) {
767  __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
768  kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
769  kernel.packet[1] = tmp;
770 }
771 
772 EIGEN_DEVICE_FUNC inline void
773 ptranspose(PacketBlock<Packet4i,4>& kernel) {
774  __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
775  __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
776  __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
777  __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
778 
779  kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);
780  kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);
781  kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);
782  kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
783 }
784 
785 template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
786  const __m128i zero = _mm_setzero_si128();
787  const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
788  __m128i false_mask = _mm_cmpeq_epi32(select, zero);
789 #ifdef EIGEN_VECTORIZE_SSE4_1
790  return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
791 #else
792  return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
793 #endif
794 }
795 template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
796  const __m128 zero = _mm_setzero_ps();
797  const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
798  __m128 false_mask = _mm_cmpeq_ps(select, zero);
799 #ifdef EIGEN_VECTORIZE_SSE4_1
800  return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
801 #else
802  return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
803 #endif
804 }
805 template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
806  const __m128d zero = _mm_setzero_pd();
807  const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
808  __m128d false_mask = _mm_cmpeq_pd(select, zero);
809 #ifdef EIGEN_VECTORIZE_SSE4_1
810  return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
811 #else
812  return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
813 #endif
814 }
815 
816 } // end namespace internal
817 
818 } // end namespace Eigen
819 
820 #endif // EIGEN_PACKET_MATH_SSE_H
Definition: Constants.h:230
Namespace containing all symbols from the Eigen library.
Definition: Core:271
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: XprHelper.h:35
Definition: Eigen_Colamd.h:50