Medial Code Documentation
Loading...
Searching...
No Matches
PacketMath.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
12
13namespace Eigen {
14
15namespace internal {
16
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19#endif
20
21#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
23#endif
24
25#ifdef EIGEN_VECTORIZE_FMA
26#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28#endif
29#endif
30
31typedef __m256 Packet8f;
32typedef __m256i Packet8i;
33typedef __m256d Packet4d;
34typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
35typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
36
37template<> struct is_arithmetic<__m256> { enum { value = true }; };
38template<> struct is_arithmetic<__m256i> { enum { value = true }; };
39template<> struct is_arithmetic<__m256d> { enum { value = true }; };
40template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
41template<> struct is_arithmetic<Packet8bf> { enum { value = true }; };
42
43#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
44 const Packet8f p8f_##NAME = pset1<Packet8f>(X)
45
46#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
47 const Packet4d p4d_##NAME = pset1<Packet4d>(X)
48
49#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
50 const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
51
52#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
53 const Packet8i p8i_##NAME = pset1<Packet8i>(X)
54
55// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
56// to leverage AVX512 instructions.
57#ifndef EIGEN_VECTORIZE_AVX512
58template<> struct packet_traits<float> : default_packet_traits
59{
60 typedef Packet8f type;
61 typedef Packet4f half;
62 enum {
63 Vectorizable = 1,
64 AlignedOnScalar = 1,
65 size = 8,
66 HasHalfPacket = 1,
67
68 HasCmp = 1,
69 HasDiv = 1,
70 HasSin = EIGEN_FAST_MATH,
71 HasCos = EIGEN_FAST_MATH,
72 HasLog = 1,
73 HasLog1p = 1,
74 HasExpm1 = 1,
75 HasExp = 1,
76 HasNdtri = 1,
77 HasBessel = 1,
78 HasSqrt = 1,
79 HasRsqrt = 1,
80 HasTanh = EIGEN_FAST_MATH,
81 HasErf = EIGEN_FAST_MATH,
82 HasBlend = 1,
83 HasRound = 1,
84 HasFloor = 1,
85 HasCeil = 1,
86 HasRint = 1
87 };
88};
90{
91 typedef Packet4d type;
92 typedef Packet2d half;
93 enum {
94 Vectorizable = 1,
95 AlignedOnScalar = 1,
96 size=4,
97 HasHalfPacket = 1,
98
99 HasCmp = 1,
100 HasDiv = 1,
101 HasLog = 1,
102 HasExp = 1,
103 HasSqrt = 1,
104 HasRsqrt = 1,
105 HasBlend = 1,
106 HasRound = 1,
107 HasFloor = 1,
108 HasCeil = 1,
109 HasRint = 1
110 };
111};
112
113template <>
115 typedef Packet8h type;
116 // There is no half-size packet for Packet8h.
117 typedef Packet8h half;
118 enum {
119 Vectorizable = 1,
120 AlignedOnScalar = 1,
121 size = 8,
122 HasHalfPacket = 0,
123
124 HasCmp = 1,
125 HasAdd = 1,
126 HasSub = 1,
127 HasMul = 1,
128 HasDiv = 1,
129 HasSin = EIGEN_FAST_MATH,
130 HasCos = EIGEN_FAST_MATH,
131 HasNegate = 1,
132 HasAbs = 1,
133 HasAbs2 = 0,
134 HasMin = 1,
135 HasMax = 1,
136 HasConj = 1,
137 HasSetLinear = 0,
138 HasLog = 1,
139 HasLog1p = 1,
140 HasExpm1 = 1,
141 HasExp = 1,
142 HasSqrt = 1,
143 HasRsqrt = 1,
144 HasTanh = EIGEN_FAST_MATH,
145 HasErf = EIGEN_FAST_MATH,
146 HasBlend = 0,
147 HasRound = 1,
148 HasFloor = 1,
149 HasCeil = 1,
150 HasRint = 1,
151 HasBessel = 1,
152 HasNdtri = 1
153 };
154};
155
156template <>
158 typedef Packet8bf type;
159 // There is no half-size packet for current Packet8bf.
160 // TODO: support as SSE path.
161 typedef Packet8bf half;
162 enum {
163 Vectorizable = 1,
164 AlignedOnScalar = 1,
165 size = 8,
166 HasHalfPacket = 0,
167
168 HasCmp = 1,
169 HasAdd = 1,
170 HasSub = 1,
171 HasMul = 1,
172 HasDiv = 1,
173 HasSin = EIGEN_FAST_MATH,
174 HasCos = EIGEN_FAST_MATH,
175 HasNegate = 1,
176 HasAbs = 1,
177 HasAbs2 = 0,
178 HasMin = 1,
179 HasMax = 1,
180 HasConj = 1,
181 HasSetLinear = 0,
182 HasLog = 1,
183 HasLog1p = 1,
184 HasExpm1 = 1,
185 HasExp = 1,
186 HasSqrt = 1,
187 HasRsqrt = 1,
188 HasTanh = EIGEN_FAST_MATH,
189 HasErf = EIGEN_FAST_MATH,
190 HasBlend = 0,
191 HasRound = 1,
192 HasFloor = 1,
193 HasCeil = 1,
194 HasRint = 1,
195 HasBessel = 1,
196 HasNdtri = 1
197 };
198};
199#endif
200
201template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
202template<> struct scalar_div_cost<double,true> { enum { value = 16 }; };
203
204/* Proper support for integers is only provided by AVX2. In the meantime, we'll
205 use SSE instructions and packets to deal with integers.
206template<> struct packet_traits<int> : default_packet_traits
207{
208 typedef Packet8i type;
209 enum {
210 Vectorizable = 1,
211 AlignedOnScalar = 1,
212 size=8
213 };
214};
215*/
216
217template<> struct unpacket_traits<Packet8f> {
218 typedef float type;
219 typedef Packet4f half;
220 typedef Packet8i integer_packet;
221 typedef uint8_t mask_t;
222 enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=true, masked_store_available=true};
223};
224template<> struct unpacket_traits<Packet4d> {
225 typedef double type;
226 typedef Packet2d half;
227 enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
228};
229template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false, masked_load_available=false, masked_store_available=false}; };
230template<> struct unpacket_traits<Packet8bf> { typedef bfloat16 type; typedef Packet8bf half; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; };
231
232// Helper function for bit packing snippet of low precision comparison.
233// It packs the flags from 16x16 to 8x16.
234EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
235 return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
236 _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
237}
238
239
240template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
241template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
242template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
243
244template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) { return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
245template<> EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); }
246
247template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); }
248template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); }
249template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); }
250
251
252template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); }
253template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); }
254template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); }
255
256template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
257template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
258
259template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
260template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
261
262template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
263template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
264template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
265#ifdef EIGEN_VECTORIZE_AVX2
266 return _mm256_add_epi32(a,b);
267#else
268 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
269 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
270 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
271#endif
272}
273
274template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
275template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
276template<> EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
277#ifdef EIGEN_VECTORIZE_AVX2
278 return _mm256_sub_epi32(a,b);
279#else
280 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
281 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
282 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
283#endif
284}
285
286template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
287{
288 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
289 return _mm256_xor_ps(a, mask);
290}
291template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)
292{
293 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
294 return _mm256_xor_pd(a, mask);
295}
296
297template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
298template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
299template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
300
301template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
302template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
303template<> EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
304#ifdef EIGEN_VECTORIZE_AVX2
305 return _mm256_mullo_epi32(a,b);
306#else
307 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
308 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
309 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
310#endif
311}
312
313template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
314template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
315template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
316{ eigen_assert(false && "packet integer division are not supported by AVX");
317 return pset1<Packet8i>(0);
318}
319
320#ifdef EIGEN_VECTORIZE_FMA
321template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
322#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
323 // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
324 // and even register spilling with clang>=6.0 (bug 1637).
325 // Gcc stupidly generates a vfmadd132ps instruction.
326 // So let's enforce it to generate a vfmadd231ps instruction since the most common use
327 // case is to accumulate the result of the product.
328 Packet8f res = c;
329 __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
330 return res;
331#else
332 return _mm256_fmadd_ps(a,b,c);
333#endif
334}
335template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
336#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
337 // see above
338 Packet4d res = c;
339 __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
340 return res;
341#else
342 return _mm256_fmadd_pd(a,b,c);
343#endif
344}
345#endif
346
347template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
348template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
349template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
350template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
351
352template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LE_OQ); }
353template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LT_OQ); }
354template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); }
355template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
356
357
358template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
359#ifdef EIGEN_VECTORIZE_AVX2
360 return _mm256_cmpeq_epi32(a,b);
361#else
362 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
363 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
364 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
365#endif
366}
367
368template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
369#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
370 // There appears to be a bug in GCC, by which the optimizer may flip
371 // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
372 // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
373 // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
374 Packet8f res;
375 asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
376 return res;
377#else
378 // Arguments are swapped to match NaN propagation behavior of std::min.
379 return _mm256_min_ps(b,a);
380#endif
381}
382template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
383#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
384 // See pmin above
385 Packet4d res;
386 asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
387 return res;
388#else
389 // Arguments are swapped to match NaN propagation behavior of std::min.
390 return _mm256_min_pd(b,a);
391#endif
392}
393
394template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
395#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
396 // See pmin above
397 Packet8f res;
398 asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
399 return res;
400#else
401 // Arguments are swapped to match NaN propagation behavior of std::max.
402 return _mm256_max_ps(b,a);
403#endif
404}
405template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
406#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
407 // See pmin above
408 Packet4d res;
409 asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
410 return res;
411#else
412 // Arguments are swapped to match NaN propagation behavior of std::max.
413 return _mm256_max_pd(b,a);
414#endif
415}
416
417// Add specializations for min/max with prescribed NaN progation.
418template<>
419EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
420 return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
421}
422template<>
423EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
424 return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
425}
426template<>
427EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
428 return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
429}
430template<>
431EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
432 return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
433}
434template<>
435EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
436 return pminmax_propagate_nan(a, b, pmin<Packet8f>);
437}
438template<>
439EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
440 return pminmax_propagate_nan(a, b, pmin<Packet4d>);
441}
442template<>
443EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
444 return pminmax_propagate_nan(a, b, pmax<Packet8f>);
445}
446template<>
447EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
448 return pminmax_propagate_nan(a, b, pmax<Packet4d>);
449}
450
451template<> EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
452template<> EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
453
454template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
455template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
456
457template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
458template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
459
460
461template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
462#ifdef EIGEN_VECTORIZE_AVX2
463 // vpcmpeqd has lower latency than the more general vcmpps
464 return _mm256_cmpeq_epi32(a,a);
465#else
466 const __m256 b = _mm256_castsi256_ps(a);
467 return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
468#endif
469}
470
471template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
472#ifdef EIGEN_VECTORIZE_AVX2
473 // vpcmpeqd has lower latency than the more general vcmpps
474 const __m256i b = _mm256_castps_si256(a);
475 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
476#else
477 return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
478#endif
479}
480
481template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
482#ifdef EIGEN_VECTORIZE_AVX2
483 // vpcmpeqq has lower latency than the more general vcmppd
484 const __m256i b = _mm256_castpd_si256(a);
485 return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
486#else
487 return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
488#endif
489}
490
491template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
492template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
493template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
494#ifdef EIGEN_VECTORIZE_AVX2
495 return _mm256_and_si256(a,b);
496#else
497 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
498#endif
499}
500
501template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
502template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
503template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
504#ifdef EIGEN_VECTORIZE_AVX2
505 return _mm256_or_si256(a,b);
506#else
507 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
508#endif
509}
510
511template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
512template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
513template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
514#ifdef EIGEN_VECTORIZE_AVX2
515 return _mm256_xor_si256(a,b);
516#else
517 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
518#endif
519}
520
521template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); }
522template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); }
523template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
524#ifdef EIGEN_VECTORIZE_AVX2
525 return _mm256_andnot_si256(b,a);
526#else
527 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
528#endif
529}
530
531template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a)
532{
533 const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
534 const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
535 return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
536}
537template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a)
538{
539 const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
540 const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
541 return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
542}
543
544template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b)
545{ return _mm256_blendv_ps(b,a,mask); }
546template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b)
547{ return _mm256_blendv_pd(b,a,mask); }
548
549template<int N> EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
550#ifdef EIGEN_VECTORIZE_AVX2
551 return _mm256_srai_epi32(a, N);
552#else
553 __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
554 __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
555 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
556#endif
557}
558
559template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
560#ifdef EIGEN_VECTORIZE_AVX2
561 return _mm256_srli_epi32(a, N);
562#else
563 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
564 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
565 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
566#endif
567}
568
569template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
570#ifdef EIGEN_VECTORIZE_AVX2
571 return _mm256_slli_epi32(a, N);
572#else
573 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
574 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
575 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
576#endif
577}
578
579template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
580template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
581template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
582
583template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
584template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
585template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
586
587template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
588 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
589 const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
590 mask = por<Packet8i>(mask, bit_mask);
591 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
592 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
593}
594
595// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
596template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
597{
598 // TODO try to find a way to avoid the need of a temporary register
599// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
600// tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
601// return _mm256_unpacklo_ps(tmp,tmp);
602
603 // _mm256_insertf128_ps is very slow on Haswell, thus:
604 Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
605 // mimic an "inplace" permutation of the lower 128bits using a blend
606 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
607 // then we can perform a consistent permutation on the global register to get everything in shape:
608 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
609}
610// Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1}
611template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
612{
613 Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
614 return _mm256_permute_pd(tmp, 3<<2);
615}
616
617// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
618template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
619{
620 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
621 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
622}
623
624template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
625template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
626template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
627
628template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
629template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
630template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
631
632template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from, uint8_t umask) {
633#ifdef EIGEN_VECTORIZE_AVX512
634 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
635 EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
636#else
637 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
638 const Packet8i bit_mask = _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
639 mask = por<Packet8i>(mask, bit_mask);
640 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
641#if EIGEN_COMP_MSVC
642 // MSVC sometimes seems to use a bogus mask with maskstore.
643 const __m256i ifrom = _mm256_castps_si256(from);
644 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0), reinterpret_cast<char*>(to));
645 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1), reinterpret_cast<char*>(to + 4));
646#else
647 EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
648#endif
649#endif
650}
651
652// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
653// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
654template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
655{
656 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
657 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
658}
659template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
660{
661 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
662}
663
664template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
665{
666 __m128 low = _mm256_extractf128_ps(from, 0);
667 to[stride*0] = _mm_cvtss_f32(low);
668 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
669 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
670 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
671
672 __m128 high = _mm256_extractf128_ps(from, 1);
673 to[stride*4] = _mm_cvtss_f32(high);
674 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
675 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
676 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
677}
678template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
679{
680 __m128d low = _mm256_extractf128_pd(from, 0);
681 to[stride*0] = _mm_cvtsd_f64(low);
682 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
683 __m128d high = _mm256_extractf128_pd(from, 1);
684 to[stride*2] = _mm_cvtsd_f64(high);
685 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
686}
687
688template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
689{
690 Packet8f pa = pset1<Packet8f>(a);
691 pstore(to, pa);
692}
693template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
694{
695 Packet4d pa = pset1<Packet4d>(a);
696 pstore(to, pa);
697}
698template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
699{
700 Packet8i pa = pset1<Packet8i>(a);
701 pstore(to, pa);
702}
703
704#ifndef EIGEN_VECTORIZE_AVX512
705template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
706template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
707template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
708#endif
709
710template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
711 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
712}
713template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
714 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
715}
716template<> EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
717 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
718}
719
720
721template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)
722{
723 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
724 return _mm256_permute2f128_ps(tmp, tmp, 1);
725}
726template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
727{
728 __m256d tmp = _mm256_shuffle_pd(a,a,5);
729 return _mm256_permute2f128_pd(tmp, tmp, 1);
730 #if 0
731 // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
732 // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
733 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
734 return _mm256_permute_pd(swap_halves,5);
735 #endif
736}
737
738// pabs should be ok
739template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)
740{
741 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
742 return _mm256_and_ps(a,mask);
743}
744template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
745{
746 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
747 return _mm256_and_pd(a,mask);
748}
749
750template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
751 return pfrexp_generic(a,exponent);
752}
753
754// Extract exponent without existence of Packet4l.
755template<>
756EIGEN_STRONG_INLINE
757Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
758 const Packet4d cst_exp_mask = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
759 __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
760#ifdef EIGEN_VECTORIZE_AVX2
761 a_expo = _mm256_srli_epi64(a_expo, 52);
762 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
763 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
764#else
765 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
766 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
767 lo = _mm_srli_epi64(lo, 52);
768 hi = _mm_srli_epi64(hi, 52);
769#endif
770 Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
771 Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
772 Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
773 exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
774 return exponent;
775}
776
777
778template<> EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
779 return pfrexp_generic(a, exponent);
780}
781
782template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
783 return pldexp_generic(a, exponent);
784}
785
786template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
787 // Clamp exponent to [-2099, 2099]
788 const Packet4d max_exponent = pset1<Packet4d>(2099.0);
789 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
790
791 // Split 2^e into four factors and multiply.
792 const Packet4i bias = pset1<Packet4i>(1023);
793 Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4)
794
795 // 2^b
796 Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
797 Packet4i lo = _mm_slli_epi64(hi, 52);
798 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
799 Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
800 Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
801
802 // 2^(e - 3b)
803 b = psub(psub(psub(e, b), b), b); // e - 3b
804 hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
805 lo = _mm_slli_epi64(hi, 52);
806 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
807 c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
808 out = pmul(out, c); // a * 2^e
809 return out;
810}
811
812template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
813{
814 return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
815}
816template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
817{
818 return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
819}
820
821template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a)
822{
823 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
824}
825
826template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
827{
828 Packet8f tmp;
829 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
830 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
831 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
832}
833template<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)
834{
835 Packet4d tmp;
836 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
837 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
838}
839
840template<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)
841{
842 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
843 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
844 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
845}
846template<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)
847{
848 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
849 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
850}
851
852template<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)
853{
854 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
855 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
856 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
857}
858
859template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
860{
861 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
862 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
863}
864
865// not needed yet
866// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
867// {
868// return _mm256_movemask_ps(x)==0xFF;
869// }
870
871template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x)
872{
873 return _mm256_movemask_ps(x)!=0;
874}
875
876EIGEN_DEVICE_FUNC inline void
877ptranspose(PacketBlock<Packet8f,8>& kernel) {
878 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
879 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
880 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
881 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
882 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
883 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
884 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
885 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
886 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
887 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
888 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
889 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
890 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
891 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
892 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
893 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
894 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
895 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
896 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
897 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
898 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
899 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
900 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
901 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
902}
903
904EIGEN_DEVICE_FUNC inline void
905ptranspose(PacketBlock<Packet8f,4>& kernel) {
906 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
907 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
908 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
909 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
910
911 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
912 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
913 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
914 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
915
916 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
917 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
918 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
919 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
920}
921
922EIGEN_DEVICE_FUNC inline void
923ptranspose(PacketBlock<Packet4d,4>& kernel) {
924 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
925 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
926 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
927 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
928
929 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
930 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
931 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
932 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
933}
934
935template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
936 const __m256 zero = _mm256_setzero_ps();
937 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
938 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
939 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
940}
941template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
942 const __m256d zero = _mm256_setzero_pd();
943 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
944 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
945 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
946}
947
948// Packet math for Eigen::half
949
950template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; };
951
952template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
953 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
954}
955
956template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
957 return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
958}
959
960template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
961 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
962}
963
964template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
965 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
966}
967
968template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
969 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
970}
971
972template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
973 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
974}
975
976template<> EIGEN_STRONG_INLINE Packet8h
977ploaddup<Packet8h>(const Eigen::half* from) {
978 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
979 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
980 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
981 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
982 return _mm_set_epi16(d, d, c, c, b, b, a, a);
983}
984
985template<> EIGEN_STRONG_INLINE Packet8h
986ploadquad<Packet8h>(const Eigen::half* from) {
987 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
988 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
989 return _mm_set_epi16(b, b, b, b, a, a, a, a);
990}
991
992template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
993 return _mm_cmpeq_epi32(a, a);
994}
995
996template <>
997EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
998 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
999 return _mm_andnot_si128(sign_mask, a);
1000}
1001
1002EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
1003#ifdef EIGEN_HAS_FP16_C
1004 return _mm256_cvtph_ps(a);
1005#else
1006 EIGEN_ALIGN32 Eigen::half aux[8];
1007 pstore(aux, a);
1008 float f0(aux[0]);
1009 float f1(aux[1]);
1010 float f2(aux[2]);
1011 float f3(aux[3]);
1012 float f4(aux[4]);
1013 float f5(aux[5]);
1014 float f6(aux[6]);
1015 float f7(aux[7]);
1016
1017 return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
1018#endif
1019}
1020
1021EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
1022#ifdef EIGEN_HAS_FP16_C
1023 return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
1024#else
1025 EIGEN_ALIGN32 float aux[8];
1026 pstore(aux, a);
1027 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[0]));
1028 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[1]));
1029 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[2]));
1030 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[3]));
1031 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[4]));
1032 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[5]));
1033 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[6]));
1034 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[7]));
1035 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1036#endif
1037}
1038
1039template <>
1040EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a,
1041 const Packet8h& b) {
1042 return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
1043}
1044
1045template <>
1046EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a,
1047 const Packet8h& b) {
1048 return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
1049}
1050
1051template <>
1052EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
1053 return float2half(plset<Packet8f>(static_cast<float>(a)));
1054}
1055
1056template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
1057 // in some cases Packet4i is a wrapper around __m128i, so we either need to
1058 // cast to Packet4i to directly call the intrinsics as below:
1059 return _mm_or_si128(a,b);
1060}
1061template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
1062 return _mm_xor_si128(a,b);
1063}
1064template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
1065 return _mm_and_si128(a,b);
1066}
1067template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
1068 return _mm_andnot_si128(b,a);
1069}
1070
1071template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
1072 return _mm_blendv_epi8(b, a, mask);
1073}
1074
1075template<> EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
1076 return float2half(pround<Packet8f>(half2float(a)));
1077}
1078
1079template<> EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
1080 return float2half(print<Packet8f>(half2float(a)));
1081}
1082
1083template<> EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
1084 return float2half(pceil<Packet8f>(half2float(a)));
1085}
1086
1087template<> EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
1088 return float2half(pfloor<Packet8f>(half2float(a)));
1089}
1090
1091template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
1092 return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
1093}
1094
1095template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a,const Packet8h& b) {
1096 return Pack16To8(pcmp_le(half2float(a), half2float(b)));
1097}
1098
1099template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a,const Packet8h& b) {
1100 return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
1101}
1102
1103template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a,const Packet8h& b) {
1104 return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
1105}
1106
1107template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
1108
1109template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
1110 Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1111 return _mm_xor_si128(a, sign_mask);
1112}
1113
1114template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
1115 Packet8f af = half2float(a);
1116 Packet8f bf = half2float(b);
1117 Packet8f rf = padd(af, bf);
1118 return float2half(rf);
1119}
1120
1121template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
1122 Packet8f af = half2float(a);
1123 Packet8f bf = half2float(b);
1124 Packet8f rf = psub(af, bf);
1125 return float2half(rf);
1126}
1127
1128template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
1129 Packet8f af = half2float(a);
1130 Packet8f bf = half2float(b);
1131 Packet8f rf = pmul(af, bf);
1132 return float2half(rf);
1133}
1134
1135template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
1136 Packet8f af = half2float(a);
1137 Packet8f bf = half2float(b);
1138 Packet8f rf = pdiv(af, bf);
1139 return float2half(rf);
1140}
1141
1142template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
1143{
1144 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1145 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1146 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1147 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1148 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1149 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1150 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1151 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1152 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1153}
1154
1155template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
1156{
1157 EIGEN_ALIGN32 Eigen::half aux[8];
1158 pstore(aux, from);
1159 to[stride*0] = aux[0];
1160 to[stride*1] = aux[1];
1161 to[stride*2] = aux[2];
1162 to[stride*3] = aux[3];
1163 to[stride*4] = aux[4];
1164 to[stride*5] = aux[5];
1165 to[stride*6] = aux[6];
1166 to[stride*7] = aux[7];
1167}
1168
1169template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
1170 Packet8f af = half2float(a);
1171 float reduced = predux<Packet8f>(af);
1172 return Eigen::half(reduced);
1173}
1174
1175template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
1176 Packet8f af = half2float(a);
1177 float reduced = predux_max<Packet8f>(af);
1178 return Eigen::half(reduced);
1179}
1180
1181template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
1182 Packet8f af = half2float(a);
1183 float reduced = predux_min<Packet8f>(af);
1184 return Eigen::half(reduced);
1185}
1186
1187template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
1188 Packet8f af = half2float(a);
1189 float reduced = predux_mul<Packet8f>(af);
1190 return Eigen::half(reduced);
1191}
1192
1193template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
1194{
1195 __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1196 return _mm_shuffle_epi8(a,m);
1197}
1198
1199EIGEN_STRONG_INLINE void
1200ptranspose(PacketBlock<Packet8h,8>& kernel) {
1201 __m128i a = kernel.packet[0];
1202 __m128i b = kernel.packet[1];
1203 __m128i c = kernel.packet[2];
1204 __m128i d = kernel.packet[3];
1205 __m128i e = kernel.packet[4];
1206 __m128i f = kernel.packet[5];
1207 __m128i g = kernel.packet[6];
1208 __m128i h = kernel.packet[7];
1209
1210 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1211 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1212 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1213 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1214 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1215 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1216 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1217 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1218
1219 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1220 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1221 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1222 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1223 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1224 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1225 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1226 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1227
1228 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1229 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1230 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1231 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1232 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1233 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1234 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1235 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1236
1237 kernel.packet[0] = a0b0c0d0e0f0g0h0;
1238 kernel.packet[1] = a1b1c1d1e1f1g1h1;
1239 kernel.packet[2] = a2b2c2d2e2f2g2h2;
1240 kernel.packet[3] = a3b3c3d3e3f3g3h3;
1241 kernel.packet[4] = a4b4c4d4e4f4g4h4;
1242 kernel.packet[5] = a5b5c5d5e5f5g5h5;
1243 kernel.packet[6] = a6b6c6d6e6f6g6h6;
1244 kernel.packet[7] = a7b7c7d7e7f7g7h7;
1245}
1246
1247EIGEN_STRONG_INLINE void
1248ptranspose(PacketBlock<Packet8h,4>& kernel) {
1249 EIGEN_ALIGN32 Eigen::half in[4][8];
1250 pstore<Eigen::half>(in[0], kernel.packet[0]);
1251 pstore<Eigen::half>(in[1], kernel.packet[1]);
1252 pstore<Eigen::half>(in[2], kernel.packet[2]);
1253 pstore<Eigen::half>(in[3], kernel.packet[3]);
1254
1255 EIGEN_ALIGN32 Eigen::half out[4][8];
1256
1257 for (int i = 0; i < 4; ++i) {
1258 for (int j = 0; j < 4; ++j) {
1259 out[i][j] = in[j][2*i];
1260 }
1261 for (int j = 0; j < 4; ++j) {
1262 out[i][j+4] = in[j][2*i+1];
1263 }
1264 }
1265
1266 kernel.packet[0] = pload<Packet8h>(out[0]);
1267 kernel.packet[1] = pload<Packet8h>(out[1]);
1268 kernel.packet[2] = pload<Packet8h>(out[2]);
1269 kernel.packet[3] = pload<Packet8h>(out[3]);
1270}
1271
1272// BFloat16 implementation.
1273
1274EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
1275#ifdef EIGEN_VECTORIZE_AVX2
1276 __m256i extend = _mm256_cvtepu16_epi32(a);
1277 return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
1278#else
1279 __m128i lo = _mm_cvtepu16_epi32(a);
1280 __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
1281 __m128i lo_shift = _mm_slli_epi32(lo, 16);
1282 __m128i hi_shift = _mm_slli_epi32(hi, 16);
1283 return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
1284#endif
1285}
1286
1287// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
1288EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
1289 Packet8bf r;
1290
1291 __m256i input = _mm256_castps_si256(a);
1292
1293#ifdef EIGEN_VECTORIZE_AVX2
1294 // uint32_t lsb = (input >> 16);
1295 __m256i t = _mm256_srli_epi32(input, 16);
1296 // uint32_t lsb = lsb & 1;
1297 t = _mm256_and_si256(t, _mm256_set1_epi32(1));
1298 // uint32_t rounding_bias = 0x7fff + lsb;
1299 t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
1300 // input += rounding_bias;
1301 t = _mm256_add_epi32(t, input);
1302 // input = input >> 16;
1303 t = _mm256_srli_epi32(t, 16);
1304 // Check NaN before converting back to bf16
1305 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1306 __m256i nan = _mm256_set1_epi32(0x7fc0);
1307 t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
1308 // output = numext::bit_cast<uint16_t>(input);
1309 return _mm_packus_epi32(_mm256_extractf128_si256(t, 0),
1310 _mm256_extractf128_si256(t, 1));
1311#else
1312 // uint32_t lsb = (input >> 16);
1313 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
1314 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
1315 // uint32_t lsb = lsb & 1;
1316 lo = _mm_and_si128(lo, _mm_set1_epi32(1));
1317 hi = _mm_and_si128(hi, _mm_set1_epi32(1));
1318 // uint32_t rounding_bias = 0x7fff + lsb;
1319 lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
1320 hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
1321 // input += rounding_bias;
1322 lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
1323 hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
1324 // input = input >> 16;
1325 lo = _mm_srli_epi32(lo, 16);
1326 hi = _mm_srli_epi32(hi, 16);
1327 // Check NaN before converting back to bf16
1328 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1329 __m128i nan = _mm_set1_epi32(0x7fc0);
1330 lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
1331 hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
1332 // output = numext::bit_cast<uint16_t>(input);
1333 return _mm_packus_epi32(lo, hi);
1334#endif
1335}
1336
1337template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
1338 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
1339}
1340
1341template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
1342 return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
1343}
1344
1345template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
1346 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
1347}
1348
1349template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
1350 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
1351}
1352
1353template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
1354 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
1355}
1356
1357template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
1358 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
1359}
1360
1361template<> EIGEN_STRONG_INLINE Packet8bf
1362ploaddup<Packet8bf>(const bfloat16* from) {
1363 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1364 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1365 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
1366 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
1367 return _mm_set_epi16(d, d, c, c, b, b, a, a);
1368}
1369
1370template<> EIGEN_STRONG_INLINE Packet8bf
1371ploadquad<Packet8bf>(const bfloat16* from) {
1372 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1373 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1374 return _mm_set_epi16(b, b, b, b, a, a, a, a);
1375}
1376
1377template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
1378 return _mm_cmpeq_epi32(a, a);
1379}
1380
1381template <>
1382EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
1383 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1384 return _mm_andnot_si128(sign_mask, a);
1385}
1386
1387template <>
1388EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a,
1389 const Packet8bf& b) {
1390 return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1391}
1392
1393template <>
1394EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a,
1395 const Packet8bf& b) {
1396 return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1397}
1398
1399template <>
1400EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
1401 return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
1402}
1403
1404template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a,const Packet8bf& b) {
1405 return _mm_or_si128(a,b);
1406}
1407template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a,const Packet8bf& b) {
1408 return _mm_xor_si128(a,b);
1409}
1410template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a,const Packet8bf& b) {
1411 return _mm_and_si128(a,b);
1412}
1413template<> EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a,const Packet8bf& b) {
1414 return _mm_andnot_si128(b,a);
1415}
1416
1417template<> EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
1418 return _mm_blendv_epi8(b, a, mask);
1419}
1420
1421template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a)
1422{
1423 return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
1424}
1425
1426template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
1427 return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
1428}
1429
1430template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
1431 return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
1432}
1433
1434template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
1435 return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
1436}
1437
1438template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a,const Packet8bf& b) {
1439 return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
1440}
1441
1442template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a,const Packet8bf& b) {
1443 return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
1444}
1445
1446template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a,const Packet8bf& b) {
1447 return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
1448}
1449
1450template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a,const Packet8bf& b) {
1451 return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
1452}
1453
1454template<> EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) { return a; }
1455
1456template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
1457 Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1458 return _mm_xor_si128(a, sign_mask);
1459}
1460
1461template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1462 return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1463}
1464
1465template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1466 return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1467}
1468
1469template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1470 return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1471}
1472
1473template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1474 return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1475}
1476
1477
1478template<> EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride)
1479{
1480 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1481 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1482 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1483 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1484 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1485 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1486 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1487 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1488 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1489}
1490
1491template<> EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride)
1492{
1493 EIGEN_ALIGN32 bfloat16 aux[8];
1494 pstore(aux, from);
1495 to[stride*0] = aux[0];
1496 to[stride*1] = aux[1];
1497 to[stride*2] = aux[2];
1498 to[stride*3] = aux[3];
1499 to[stride*4] = aux[4];
1500 to[stride*5] = aux[5];
1501 to[stride*6] = aux[6];
1502 to[stride*7] = aux[7];
1503}
1504
1505template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
1506 return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
1507}
1508
1509template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
1510 return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
1511}
1512
1513template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
1514 return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
1515}
1516
1517template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
1518 return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
1519}
1520
1521template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a)
1522{
1523 __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1524 return _mm_shuffle_epi8(a,m);
1525}
1526
1527EIGEN_STRONG_INLINE void
1528ptranspose(PacketBlock<Packet8bf,8>& kernel) {
1529 __m128i a = kernel.packet[0];
1530 __m128i b = kernel.packet[1];
1531 __m128i c = kernel.packet[2];
1532 __m128i d = kernel.packet[3];
1533 __m128i e = kernel.packet[4];
1534 __m128i f = kernel.packet[5];
1535 __m128i g = kernel.packet[6];
1536 __m128i h = kernel.packet[7];
1537
1538 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1539 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1540 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1541 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1542 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1543 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1544 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1545 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1546
1547 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1548 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1549 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1550 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1551 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1552 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1553 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1554 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1555
1556 kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1557 kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1558 kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1559 kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1560 kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1561 kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1562 kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1563 kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1564}
1565
1566EIGEN_STRONG_INLINE void
1567ptranspose(PacketBlock<Packet8bf,4>& kernel) {
1568 __m128i a = kernel.packet[0];
1569 __m128i b = kernel.packet[1];
1570 __m128i c = kernel.packet[2];
1571 __m128i d = kernel.packet[3];
1572
1573 __m128i ab_03 = _mm_unpacklo_epi16(a, b);
1574 __m128i cd_03 = _mm_unpacklo_epi16(c, d);
1575 __m128i ab_47 = _mm_unpackhi_epi16(a, b);
1576 __m128i cd_47 = _mm_unpackhi_epi16(c, d);
1577
1578 kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
1579 kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
1580 kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
1581 kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
1582}
1583
1584} // end namespace internal
1585
1586} // end namespace Eigen
1587
1588#endif // EIGEN_PACKET_MATH_AVX_H
Base class for all dense matrices, vectors, and expressions.
Definition MatrixBase.h:50
@ Aligned32
Data pointer is aligned on a 32 bytes boundary.
Definition Constants.h:236
@ Aligned16
Data pointer is aligned on a 16 bytes boundary.
Definition Constants.h:235
Namespace containing all symbols from the Eigen library.
Definition LDLT.h:16
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:74
Definition BFloat16.h:58
Definition Half.h:140
Definition GenericPacketMath.h:43
Definition GenericPacketMath.h:160
Definition Meta.h:133
Definition GenericPacketMath.h:107
Definition XprHelper.h:710
Definition GenericPacketMath.h:133