12 #ifndef EIGEN_PACKET_MATH_NEON_H 13 #define EIGEN_PACKET_MATH_NEON_H 19 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 20 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 23 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD 24 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 27 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 28 #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 33 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 34 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 37 typedef float32x2_t Packet2f;
38 typedef float32x4_t Packet4f;
39 typedef int32x4_t Packet4i;
40 typedef int32x2_t Packet2i;
41 typedef uint32x4_t Packet4ui;
43 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 44 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 46 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 47 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X)) 49 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 50 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 54 #if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC 55 #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); 57 #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) 58 #elif !EIGEN_ARCH_ARM64 59 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); 62 #define EIGEN_ARM_PREFETCH(ADDR) 65 template<>
struct packet_traits<float> : default_packet_traits
67 typedef Packet4f type;
68 typedef Packet4f half;
84 template<>
struct packet_traits<int> : default_packet_traits
86 typedef Packet4i type;
87 typedef Packet4i half;
97 #if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM 99 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(
const float* x) { return ::vld1q_f32((
const float32_t*)x); }
100 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (
const float* x) { return ::vld1_f32 ((
const float32_t*)x); }
101 EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (
const float* x) { return ::vld1_dup_f32 ((
const float32_t*)x); }
102 EIGEN_STRONG_INLINE
void vst1q_f32(
float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
103 EIGEN_STRONG_INLINE
void vst1_f32 (
float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
106 template<>
struct unpacket_traits<Packet4f> {
typedef float type;
enum {size=4, alignment=
Aligned16};
typedef Packet4f half; };
107 template<>
struct unpacket_traits<Packet4i> {
typedef int type;
enum {size=4, alignment=
Aligned16};
typedef Packet4i half; };
109 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(
const float& from) {
return vdupq_n_f32(from); }
110 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(
const int& from) {
return vdupq_n_s32(from); }
112 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(
const float& a)
114 const float32_t f[] = {0, 1, 2, 3};
115 Packet4f countdown = vld1q_f32(f);
116 return vaddq_f32(pset1<Packet4f>(a), countdown);
118 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(
const int& a)
120 const int32_t i[] = {0, 1, 2, 3};
121 Packet4i countdown = vld1q_s32(i);
122 return vaddq_s32(pset1<Packet4i>(a), countdown);
125 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(
const Packet4f& a,
const Packet4f& b) {
return vaddq_f32(a,b); }
126 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vaddq_s32(a,b); }
128 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(
const Packet4f& a,
const Packet4f& b) {
return vsubq_f32(a,b); }
129 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vsubq_s32(a,b); }
131 template<> EIGEN_STRONG_INLINE Packet4f pnegate(
const Packet4f& a) {
return vnegq_f32(a); }
132 template<> EIGEN_STRONG_INLINE Packet4i pnegate(
const Packet4i& a) {
return vnegq_s32(a); }
134 template<> EIGEN_STRONG_INLINE Packet4f pconj(
const Packet4f& a) {
return a; }
135 template<> EIGEN_STRONG_INLINE Packet4i pconj(
const Packet4i& a) {
return a; }
137 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(
const Packet4f& a,
const Packet4f& b) {
return vmulq_f32(a,b); }
138 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vmulq_s32(a,b); }
140 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(
const Packet4f& a,
const Packet4f& b)
143 return vdivq_f32(a,b);
145 Packet4f inv, restep, div;
152 inv = vrecpeq_f32(b);
156 restep = vrecpsq_f32(b, inv);
157 inv = vmulq_f32(restep, inv);
160 div = vmulq_f32(a, inv);
166 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(
const Packet4i& ,
const Packet4i& )
167 { eigen_assert(
false &&
"packet integer division are not supported by NEON");
168 return pset1<Packet4i>(0);
175 #if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM) 182 template<> EIGEN_STRONG_INLINE Packet4f pmadd(
const Packet4f& a,
const Packet4f& b,
const Packet4f& c) {
return vfmaq_f32(c,a,b); }
184 template<> EIGEN_STRONG_INLINE Packet4f pmadd(
const Packet4f& a,
const Packet4f& b,
const Packet4f& c) {
185 #if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM 195 "vmla.f32 %q[r], %q[a], %q[b]" 202 return vmlaq_f32(c,a,b);
208 template<> EIGEN_STRONG_INLINE Packet4i pmadd(
const Packet4i& a,
const Packet4i& b,
const Packet4i& c) {
return vmlaq_s32(c,a,b); }
210 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(
const Packet4f& a,
const Packet4f& b) {
return vminq_f32(a,b); }
211 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vminq_s32(a,b); }
213 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(
const Packet4f& a,
const Packet4f& b) {
return vmaxq_f32(a,b); }
214 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vmaxq_s32(a,b); }
217 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(
const Packet4f& a,
const Packet4f& b)
219 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
221 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vandq_s32(a,b); }
223 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(
const Packet4f& a,
const Packet4f& b)
225 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
227 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vorrq_s32(a,b); }
229 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(
const Packet4f& a,
const Packet4f& b)
231 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
233 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return veorq_s32(a,b); }
235 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(
const Packet4f& a,
const Packet4f& b)
237 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
239 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(
const Packet4i& a,
const Packet4i& b) {
return vbicq_s32(a,b); }
241 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(
const float* from) { EIGEN_DEBUG_ALIGNED_LOAD
return vld1q_f32(from); }
242 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(
const int* from) { EIGEN_DEBUG_ALIGNED_LOAD
return vld1q_s32(from); }
244 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(
const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return vld1q_f32(from); }
245 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(
const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return vld1q_s32(from); }
247 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(
const float* from)
250 lo = vld1_dup_f32(from);
251 hi = vld1_dup_f32(from+1);
252 return vcombine_f32(lo, hi);
254 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(
const int* from)
257 lo = vld1_dup_s32(from);
258 hi = vld1_dup_s32(from+1);
259 return vcombine_s32(lo, hi);
262 template<> EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
263 template<> EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
265 template<> EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
266 template<> EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
268 template<> EIGEN_DEVICE_FUNC
inline Packet4f pgather<float, Packet4f>(
const float* from,
Index stride)
270 Packet4f res = pset1<Packet4f>(0.f);
271 res = vsetq_lane_f32(from[0*stride], res, 0);
272 res = vsetq_lane_f32(from[1*stride], res, 1);
273 res = vsetq_lane_f32(from[2*stride], res, 2);
274 res = vsetq_lane_f32(from[3*stride], res, 3);
277 template<> EIGEN_DEVICE_FUNC
inline Packet4i pgather<int, Packet4i>(
const int* from,
Index stride)
279 Packet4i res = pset1<Packet4i>(0);
280 res = vsetq_lane_s32(from[0*stride], res, 0);
281 res = vsetq_lane_s32(from[1*stride], res, 1);
282 res = vsetq_lane_s32(from[2*stride], res, 2);
283 res = vsetq_lane_s32(from[3*stride], res, 3);
287 template<> EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet4f>(
float* to,
const Packet4f& from,
Index stride)
289 to[stride*0] = vgetq_lane_f32(from, 0);
290 to[stride*1] = vgetq_lane_f32(from, 1);
291 to[stride*2] = vgetq_lane_f32(from, 2);
292 to[stride*3] = vgetq_lane_f32(from, 3);
294 template<> EIGEN_DEVICE_FUNC
inline void pscatter<int, Packet4i>(
int* to,
const Packet4i& from,
Index stride)
296 to[stride*0] = vgetq_lane_s32(from, 0);
297 to[stride*1] = vgetq_lane_s32(from, 1);
298 to[stride*2] = vgetq_lane_s32(from, 2);
299 to[stride*3] = vgetq_lane_s32(from, 3);
302 template<> EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) { EIGEN_ARM_PREFETCH(addr); }
303 template<> EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) { EIGEN_ARM_PREFETCH(addr); }
306 template<> EIGEN_STRONG_INLINE
float pfirst<Packet4f>(
const Packet4f& a) {
float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a);
return x[0]; }
307 template<> EIGEN_STRONG_INLINE
int pfirst<Packet4i>(
const Packet4i& a) {
int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a);
return x[0]; }
309 template<> EIGEN_STRONG_INLINE Packet4f preverse(
const Packet4f& a) {
310 float32x2_t a_lo, a_hi;
313 a_r64 = vrev64q_f32(a);
314 a_lo = vget_low_f32(a_r64);
315 a_hi = vget_high_f32(a_r64);
316 return vcombine_f32(a_hi, a_lo);
318 template<> EIGEN_STRONG_INLINE Packet4i preverse(
const Packet4i& a) {
319 int32x2_t a_lo, a_hi;
322 a_r64 = vrev64q_s32(a);
323 a_lo = vget_low_s32(a_r64);
324 a_hi = vget_high_s32(a_r64);
325 return vcombine_s32(a_hi, a_lo);
328 template<> EIGEN_STRONG_INLINE Packet4f pabs(
const Packet4f& a) {
return vabsq_f32(a); }
329 template<> EIGEN_STRONG_INLINE Packet4i pabs(
const Packet4i& a) {
return vabsq_s32(a); }
331 template<> EIGEN_STRONG_INLINE
float predux<Packet4f>(
const Packet4f& a)
333 float32x2_t a_lo, a_hi, sum;
335 a_lo = vget_low_f32(a);
336 a_hi = vget_high_f32(a);
337 sum = vpadd_f32(a_lo, a_hi);
338 sum = vpadd_f32(sum, sum);
339 return vget_lane_f32(sum, 0);
342 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(
const Packet4f* vecs)
344 float32x4x2_t vtrn1, vtrn2, res1, res2;
345 Packet4f sum1, sum2, sum;
349 vtrn1 = vzipq_f32(vecs[0], vecs[2]);
350 vtrn2 = vzipq_f32(vecs[1], vecs[3]);
351 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
352 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
355 sum1 = vaddq_f32(res1.val[0], res1.val[1]);
356 sum2 = vaddq_f32(res2.val[0], res2.val[1]);
357 sum = vaddq_f32(sum1, sum2);
362 template<> EIGEN_STRONG_INLINE
int predux<Packet4i>(
const Packet4i& a)
364 int32x2_t a_lo, a_hi, sum;
366 a_lo = vget_low_s32(a);
367 a_hi = vget_high_s32(a);
368 sum = vpadd_s32(a_lo, a_hi);
369 sum = vpadd_s32(sum, sum);
370 return vget_lane_s32(sum, 0);
373 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(
const Packet4i* vecs)
375 int32x4x2_t vtrn1, vtrn2, res1, res2;
376 Packet4i sum1, sum2, sum;
380 vtrn1 = vzipq_s32(vecs[0], vecs[2]);
381 vtrn2 = vzipq_s32(vecs[1], vecs[3]);
382 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
383 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
386 sum1 = vaddq_s32(res1.val[0], res1.val[1]);
387 sum2 = vaddq_s32(res2.val[0], res2.val[1]);
388 sum = vaddq_s32(sum1, sum2);
395 template<> EIGEN_STRONG_INLINE
float predux_mul<Packet4f>(
const Packet4f& a)
397 float32x2_t a_lo, a_hi, prod;
400 a_lo = vget_low_f32(a);
401 a_hi = vget_high_f32(a);
403 prod = vmul_f32(a_lo, a_hi);
405 prod = vmul_f32(prod, vrev64_f32(prod));
407 return vget_lane_f32(prod, 0);
409 template<> EIGEN_STRONG_INLINE
int predux_mul<Packet4i>(
const Packet4i& a)
411 int32x2_t a_lo, a_hi, prod;
414 a_lo = vget_low_s32(a);
415 a_hi = vget_high_s32(a);
417 prod = vmul_s32(a_lo, a_hi);
419 prod = vmul_s32(prod, vrev64_s32(prod));
421 return vget_lane_s32(prod, 0);
425 template<> EIGEN_STRONG_INLINE
float predux_min<Packet4f>(
const Packet4f& a)
427 float32x2_t a_lo, a_hi, min;
429 a_lo = vget_low_f32(a);
430 a_hi = vget_high_f32(a);
431 min = vpmin_f32(a_lo, a_hi);
432 min = vpmin_f32(min, min);
434 return vget_lane_f32(min, 0);
437 template<> EIGEN_STRONG_INLINE
int predux_min<Packet4i>(
const Packet4i& a)
439 int32x2_t a_lo, a_hi, min;
441 a_lo = vget_low_s32(a);
442 a_hi = vget_high_s32(a);
443 min = vpmin_s32(a_lo, a_hi);
444 min = vpmin_s32(min, min);
446 return vget_lane_s32(min, 0);
450 template<> EIGEN_STRONG_INLINE
float predux_max<Packet4f>(
const Packet4f& a)
452 float32x2_t a_lo, a_hi, max;
454 a_lo = vget_low_f32(a);
455 a_hi = vget_high_f32(a);
456 max = vpmax_f32(a_lo, a_hi);
457 max = vpmax_f32(max, max);
459 return vget_lane_f32(max, 0);
462 template<> EIGEN_STRONG_INLINE
int predux_max<Packet4i>(
const Packet4i& a)
464 int32x2_t a_lo, a_hi, max;
466 a_lo = vget_low_s32(a);
467 a_hi = vget_high_s32(a);
468 max = vpmax_s32(a_lo, a_hi);
469 max = vpmax_s32(max, max);
471 return vget_lane_s32(max, 0);
476 #define PALIGN_NEON(Offset,Type,Command) \ 478 struct palign_impl<Offset,Type>\ 480 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 483 first = Command(first, second, Offset);\ 487 PALIGN_NEON(0,Packet4f,vextq_f32)
488 PALIGN_NEON(1,Packet4f,vextq_f32)
489 PALIGN_NEON(2,Packet4f,vextq_f32)
490 PALIGN_NEON(3,Packet4f,vextq_f32)
491 PALIGN_NEON(0,Packet4i,vextq_s32)
492 PALIGN_NEON(1,Packet4i,vextq_s32)
493 PALIGN_NEON(2,Packet4i,vextq_s32)
494 PALIGN_NEON(3,Packet4i,vextq_s32)
498 EIGEN_DEVICE_FUNC
inline void 499 ptranspose(PacketBlock<Packet4f,4>& kernel) {
500 float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);
501 float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);
503 kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));
504 kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));
505 kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1]));
506 kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));
509 EIGEN_DEVICE_FUNC
inline void 510 ptranspose(PacketBlock<Packet4i,4>& kernel) {
511 int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);
512 int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);
513 kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));
514 kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));
515 kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));
516 kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1]));
523 #ifdef __apple_build_version__ 527 #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) 529 #define EIGEN_APPLE_DOUBLE_NEON_BUG 0 532 #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG 538 template <
typename T>
539 uint64x2_t vreinterpretq_u64_f64(T a)
541 return (uint64x2_t) a;
544 template <
typename T>
545 float64x2_t vreinterpretq_f64_u64(T a)
547 return (float64x2_t) a;
550 typedef float64x2_t Packet2d;
551 typedef float64x1_t Packet1d;
553 template<>
struct packet_traits<double> : default_packet_traits
555 typedef Packet2d type;
556 typedef Packet2d half;
573 template<>
struct unpacket_traits<Packet2d> {
typedef double type;
enum {size=2, alignment=
Aligned16};
typedef Packet2d half; };
575 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(
const double& from) {
return vdupq_n_f64(from); }
577 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(
const double& a)
579 const double countdown_raw[] = {0.0,1.0};
580 const Packet2d countdown = vld1q_f64(countdown_raw);
581 return vaddq_f64(pset1<Packet2d>(a), countdown);
583 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vaddq_f64(a,b); }
585 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vsubq_f64(a,b); }
587 template<> EIGEN_STRONG_INLINE Packet2d pnegate(
const Packet2d& a) {
return vnegq_f64(a); }
589 template<> EIGEN_STRONG_INLINE Packet2d pconj(
const Packet2d& a) {
return a; }
591 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vmulq_f64(a,b); }
593 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vdivq_f64(a,b); }
595 #ifdef __ARM_FEATURE_FMA 597 template<> EIGEN_STRONG_INLINE Packet2d pmadd(
const Packet2d& a,
const Packet2d& b,
const Packet2d& c) {
return vfmaq_f64(c,a,b); }
599 template<> EIGEN_STRONG_INLINE Packet2d pmadd(
const Packet2d& a,
const Packet2d& b,
const Packet2d& c) {
return vmlaq_f64(c,a,b); }
602 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vminq_f64(a,b); }
604 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(
const Packet2d& a,
const Packet2d& b) {
return vmaxq_f64(a,b); }
607 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(
const Packet2d& a,
const Packet2d& b)
609 return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
612 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(
const Packet2d& a,
const Packet2d& b)
614 return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
617 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(
const Packet2d& a,
const Packet2d& b)
619 return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
622 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(
const Packet2d& a,
const Packet2d& b)
624 return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
627 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(
const double* from) { EIGEN_DEBUG_ALIGNED_LOAD
return vld1q_f64(from); }
629 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(
const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return vld1q_f64(from); }
631 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(
const double* from)
633 return vld1q_dup_f64(from);
635 template<> EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); }
637 template<> EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
639 template<> EIGEN_DEVICE_FUNC
inline Packet2d pgather<double, Packet2d>(
const double* from,
Index stride)
641 Packet2d res = pset1<Packet2d>(0.0);
642 res = vsetq_lane_f64(from[0*stride], res, 0);
643 res = vsetq_lane_f64(from[1*stride], res, 1);
646 template<> EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet2d>(
double* to,
const Packet2d& from,
Index stride)
648 to[stride*0] = vgetq_lane_f64(from, 0);
649 to[stride*1] = vgetq_lane_f64(from, 1);
651 template<> EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) { EIGEN_ARM_PREFETCH(addr); }
654 template<> EIGEN_STRONG_INLINE
double pfirst<Packet2d>(
const Packet2d& a) {
return vgetq_lane_f64(a, 0); }
656 template<> EIGEN_STRONG_INLINE Packet2d preverse(
const Packet2d& a) {
return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }
658 template<> EIGEN_STRONG_INLINE Packet2d pabs(
const Packet2d& a) {
return vabsq_f64(a); }
660 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 662 template<> EIGEN_STRONG_INLINE
double predux<Packet2d>(
const Packet2d& a) {
return (vget_low_f64(a) + vget_high_f64(a))[0]; }
664 template<> EIGEN_STRONG_INLINE
double predux<Packet2d>(
const Packet2d& a) {
return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }
667 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(
const Packet2d* vecs)
669 float64x2_t trn1, trn2;
673 trn1 = vzip1q_f64(vecs[0], vecs[1]);
674 trn2 = vzip2q_f64(vecs[0], vecs[1]);
677 return vaddq_f64(trn1, trn2);
681 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 682 template<> EIGEN_STRONG_INLINE
double predux_mul<Packet2d>(
const Packet2d& a) {
return (vget_low_f64(a) * vget_high_f64(a))[0]; }
684 template<> EIGEN_STRONG_INLINE
double predux_mul<Packet2d>(
const Packet2d& a) {
return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }
688 template<> EIGEN_STRONG_INLINE
double predux_min<Packet2d>(
const Packet2d& a) {
return vgetq_lane_f64(vpminq_f64(a, a), 0); }
691 template<> EIGEN_STRONG_INLINE
double predux_max<Packet2d>(
const Packet2d& a) {
return vgetq_lane_f64(vpmaxq_f64(a, a), 0); }
695 #define PALIGN_NEON(Offset,Type,Command) \ 697 struct palign_impl<Offset,Type>\ 699 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 702 first = Command(first, second, Offset);\ 706 PALIGN_NEON(0,Packet2d,vextq_f64)
707 PALIGN_NEON(1,Packet2d,vextq_f64)
710 EIGEN_DEVICE_FUNC
inline void 711 ptranspose(PacketBlock<Packet2d,2>& kernel) {
712 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
713 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
715 kernel.packet[0] = trn1;
716 kernel.packet[1] = trn2;
718 #endif // EIGEN_ARCH_ARM64 724 #endif // EIGEN_PACKET_MATH_NEON_H Definition: Constants.h:230
Namespace containing all symbols from the Eigen library.
Definition: Core:271
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: XprHelper.h:35
Definition: Eigen_Colamd.h:50