10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
21#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
25#ifdef EIGEN_VECTORIZE_FMA
26#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
31typedef __m256 Packet8f;
32typedef __m256i Packet8i;
33typedef __m256d Packet4d;
34typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
35typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
43#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
44 const Packet8f p8f_##NAME = pset1<Packet8f>(X)
46#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
47 const Packet4d p4d_##NAME = pset1<Packet4d>(X)
49#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
50 const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
52#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
53 const Packet8i p8i_##NAME = pset1<Packet8i>(X)
57#ifndef EIGEN_VECTORIZE_AVX512
60 typedef Packet8f type;
61 typedef Packet4f
half;
70 HasSin = EIGEN_FAST_MATH,
71 HasCos = EIGEN_FAST_MATH,
80 HasTanh = EIGEN_FAST_MATH,
81 HasErf = EIGEN_FAST_MATH,
129 HasSin = EIGEN_FAST_MATH,
130 HasCos = EIGEN_FAST_MATH,
144 HasTanh = EIGEN_FAST_MATH,
145 HasErf = EIGEN_FAST_MATH,
173 HasSin = EIGEN_FAST_MATH,
174 HasCos = EIGEN_FAST_MATH,
188 HasTanh = EIGEN_FAST_MATH,
189 HasErf = EIGEN_FAST_MATH,
221 typedef uint8_t mask_t;
222 enum {size=8, alignment=
Aligned32, vectorizable=
true, masked_load_available=
true, masked_store_available=
true};
227 enum {size=4, alignment=
Aligned32, vectorizable=
true, masked_load_available=
false, masked_store_available=
false};
234EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
235 return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
236 _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
240template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(
const float& from) {
return _mm256_set1_ps(from); }
241template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(
const double& from) {
return _mm256_set1_pd(from); }
242template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(
const int& from) {
return _mm256_set1_epi32(from); }
244template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(
unsigned int from) {
return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
245template<> EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) {
return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); }
247template<> EIGEN_STRONG_INLINE Packet8f pzero(
const Packet8f& ) {
return _mm256_setzero_ps(); }
248template<> EIGEN_STRONG_INLINE Packet4d pzero(
const Packet4d& ) {
return _mm256_setzero_pd(); }
249template<> EIGEN_STRONG_INLINE Packet8i pzero(
const Packet8i& ) {
return _mm256_setzero_si256(); }
252template<> EIGEN_STRONG_INLINE Packet8f peven_mask(
const Packet8f& ) {
return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); }
253template<> EIGEN_STRONG_INLINE Packet8i peven_mask(
const Packet8i& ) {
return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); }
254template<> EIGEN_STRONG_INLINE Packet4d peven_mask(
const Packet4d& ) {
return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); }
256template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(
const float* from) {
return _mm256_broadcast_ss(from); }
257template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(
const double* from) {
return _mm256_broadcast_sd(from); }
259template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(
const float& a) {
return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
260template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(
const double& a) {
return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
262template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_add_ps(a,b); }
263template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_add_pd(a,b); }
264template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
265#ifdef EIGEN_VECTORIZE_AVX2
266 return _mm256_add_epi32(a,b);
268 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
269 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
270 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
274template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_sub_ps(a,b); }
275template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_sub_pd(a,b); }
276template<> EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
277#ifdef EIGEN_VECTORIZE_AVX2
278 return _mm256_sub_epi32(a,b);
280 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
281 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
282 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
286template<> EIGEN_STRONG_INLINE Packet8f pnegate(
const Packet8f& a)
288 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
289 return _mm256_xor_ps(a, mask);
291template<> EIGEN_STRONG_INLINE Packet4d pnegate(
const Packet4d& a)
293 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
294 return _mm256_xor_pd(a, mask);
297template<> EIGEN_STRONG_INLINE Packet8f pconj(
const Packet8f& a) {
return a; }
298template<> EIGEN_STRONG_INLINE Packet4d pconj(
const Packet4d& a) {
return a; }
299template<> EIGEN_STRONG_INLINE Packet8i pconj(
const Packet8i& a) {
return a; }
301template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_mul_ps(a,b); }
302template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_mul_pd(a,b); }
303template<> EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
304#ifdef EIGEN_VECTORIZE_AVX2
305 return _mm256_mullo_epi32(a,b);
307 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
308 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
309 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
313template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_div_ps(a,b); }
314template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_div_pd(a,b); }
315template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(
const Packet8i& ,
const Packet8i& )
316{ eigen_assert(
false &&
"packet integer division are not supported by AVX");
317 return pset1<Packet8i>(0);
320#ifdef EIGEN_VECTORIZE_FMA
321template<> EIGEN_STRONG_INLINE Packet8f pmadd(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
322#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
329 __asm__(
"vfmadd231ps %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
332 return _mm256_fmadd_ps(a,b,c);
335template<> EIGEN_STRONG_INLINE Packet4d pmadd(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
336#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
339 __asm__(
"vfmadd231pd %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
342 return _mm256_fmadd_pd(a,b,c);
347template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(
const Packet8f& a,
const Packet8f& b) {
return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
348template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(
const Packet8f& a,
const Packet8f& b) {
return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
349template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(
const Packet8f& a,
const Packet8f& b) {
return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
350template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(
const Packet8f& a,
const Packet8f& b) {
return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
352template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(
const Packet4d& a,
const Packet4d& b) {
return _mm256_cmp_pd(a,b,_CMP_LE_OQ); }
353template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(
const Packet4d& a,
const Packet4d& b) {
return _mm256_cmp_pd(a,b,_CMP_LT_OQ); }
354template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(
const Packet4d& a,
const Packet4d& b) {
return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); }
355template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(
const Packet4d& a,
const Packet4d& b) {
return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
358template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(
const Packet8i& a,
const Packet8i& b) {
359#ifdef EIGEN_VECTORIZE_AVX2
360 return _mm256_cmpeq_epi32(a,b);
362 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
363 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
364 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
368template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
369#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
375 asm(
"vminps %[a], %[b], %[res]" : [res]
"=x" (res) : [a]
"x" (a), [b]
"x" (b));
379 return _mm256_min_ps(b,a);
382template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
383#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
386 asm(
"vminpd %[a], %[b], %[res]" : [res]
"=x" (res) : [a]
"x" (a), [b]
"x" (b));
390 return _mm256_min_pd(b,a);
394template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
395#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
398 asm(
"vmaxps %[a], %[b], %[res]" : [res]
"=x" (res) : [a]
"x" (a), [b]
"x" (b));
402 return _mm256_max_ps(b,a);
405template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
406#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
409 asm(
"vmaxpd %[a], %[b], %[res]" : [res]
"=x" (res) : [a]
"x" (a), [b]
"x" (b));
413 return _mm256_max_pd(b,a);
419EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
420 return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
423EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
424 return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
427EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
428 return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
431EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
432 return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
435EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
436 return pminmax_propagate_nan(a, b, pmin<Packet8f>);
439EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
440 return pminmax_propagate_nan(a, b, pmin<Packet4d>);
443EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(
const Packet8f& a,
const Packet8f& b) {
444 return pminmax_propagate_nan(a, b, pmax<Packet8f>);
447EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(
const Packet4d& a,
const Packet4d& b) {
448 return pminmax_propagate_nan(a, b, pmax<Packet4d>);
451template<> EIGEN_STRONG_INLINE Packet8f print<Packet8f>(
const Packet8f& a) {
return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
452template<> EIGEN_STRONG_INLINE Packet4d print<Packet4d>(
const Packet4d& a) {
return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
454template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(
const Packet8f& a) {
return _mm256_ceil_ps(a); }
455template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(
const Packet4d& a) {
return _mm256_ceil_pd(a); }
457template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(
const Packet8f& a) {
return _mm256_floor_ps(a); }
458template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(
const Packet4d& a) {
return _mm256_floor_pd(a); }
461template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(
const Packet8i& a) {
462#ifdef EIGEN_VECTORIZE_AVX2
464 return _mm256_cmpeq_epi32(a,a);
466 const __m256 b = _mm256_castsi256_ps(a);
467 return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
471template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(
const Packet8f& a) {
472#ifdef EIGEN_VECTORIZE_AVX2
474 const __m256i b = _mm256_castps_si256(a);
475 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
477 return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
481template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(
const Packet4d& a) {
482#ifdef EIGEN_VECTORIZE_AVX2
484 const __m256i b = _mm256_castpd_si256(a);
485 return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
487 return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
491template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_and_ps(a,b); }
492template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_and_pd(a,b); }
493template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
494#ifdef EIGEN_VECTORIZE_AVX2
495 return _mm256_and_si256(a,b);
497 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
501template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_or_ps(a,b); }
502template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_or_pd(a,b); }
503template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
504#ifdef EIGEN_VECTORIZE_AVX2
505 return _mm256_or_si256(a,b);
507 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
511template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_xor_ps(a,b); }
512template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_xor_pd(a,b); }
513template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
514#ifdef EIGEN_VECTORIZE_AVX2
515 return _mm256_xor_si256(a,b);
517 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
521template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_andnot_ps(b,a); }
522template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_andnot_pd(b,a); }
523template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(
const Packet8i& a,
const Packet8i& b) {
524#ifdef EIGEN_VECTORIZE_AVX2
525 return _mm256_andnot_si256(b,a);
527 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
531template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(
const Packet8f& a)
533 const Packet8f mask = pset1frombits<Packet8f>(
static_cast<numext::uint32_t
>(0x80000000u));
534 const Packet8f prev0dot5 = pset1frombits<Packet8f>(
static_cast<numext::uint32_t
>(0x3EFFFFFFu));
535 return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
537template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(
const Packet4d& a)
539 const Packet4d mask = pset1frombits<Packet4d>(
static_cast<numext::uint64_t
>(0x8000000000000000ull));
540 const Packet4d prev0dot5 = pset1frombits<Packet4d>(
static_cast<numext::uint64_t
>(0x3FDFFFFFFFFFFFFFull));
541 return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
544template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(
const Packet8f& mask,
const Packet8f& a,
const Packet8f& b)
545{
return _mm256_blendv_ps(b,a,mask); }
546template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(
const Packet4d& mask,
const Packet4d& a,
const Packet4d& b)
547{
return _mm256_blendv_pd(b,a,mask); }
549template<
int N> EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
550#ifdef EIGEN_VECTORIZE_AVX2
551 return _mm256_srai_epi32(a, N);
553 __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
554 __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
555 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
559template<
int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
560#ifdef EIGEN_VECTORIZE_AVX2
561 return _mm256_srli_epi32(a, N);
563 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
564 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
565 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
569template<
int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
570#ifdef EIGEN_VECTORIZE_AVX2
571 return _mm256_slli_epi32(a, N);
573 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
574 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
575 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
579template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(
const float* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_ps(from); }
580template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(
const double* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_pd(from); }
581template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(
const int* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from)); }
583template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_ps(from); }
584template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(
const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_pd(from); }
585template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(
const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from)); }
587template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from, uint8_t umask) {
588 Packet8i mask = _mm256_set1_epi8(
static_cast<char>(umask));
589 const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
590 mask = por<Packet8i>(mask, bit_mask);
591 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
592 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_maskload_ps(from, mask);
596template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(
const float* from)
604 Packet8f tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
606 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
608 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
611template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(
const double* from)
613 Packet4d tmp = _mm256_broadcast_pd((
const __m128d*)(
const void*)from);
614 return _mm256_permute_pd(tmp, 3<<2);
618template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(
const float* from)
620 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
621 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
624template<> EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
625template<> EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
626template<> EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from); }
628template<> EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
629template<> EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
630template<> EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from); }
632template<> EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from, uint8_t umask) {
633#ifdef EIGEN_VECTORIZE_AVX512
634 __mmask16 mask =
static_cast<__mmask16
>(umask & 0x00FF);
635 EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
637 Packet8i mask = _mm256_set1_epi8(
static_cast<char>(umask));
638 const Packet8i bit_mask = _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
639 mask = por<Packet8i>(mask, bit_mask);
640 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
643 const __m256i ifrom = _mm256_castps_si256(from);
644 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0),
reinterpret_cast<char*
>(to));
645 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1),
reinterpret_cast<char*
>(to + 4));
647 EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
654template<> EIGEN_DEVICE_FUNC
inline Packet8f pgather<float, Packet8f>(
const float* from,
Index stride)
656 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
657 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
659template<> EIGEN_DEVICE_FUNC
inline Packet4d pgather<double, Packet4d>(
const double* from,
Index stride)
661 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
664template<> EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet8f>(
float* to,
const Packet8f& from,
Index stride)
666 __m128 low = _mm256_extractf128_ps(from, 0);
667 to[stride*0] = _mm_cvtss_f32(low);
668 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
669 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
670 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
672 __m128 high = _mm256_extractf128_ps(from, 1);
673 to[stride*4] = _mm_cvtss_f32(high);
674 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
675 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
676 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
678template<> EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet4d>(
double* to,
const Packet4d& from,
Index stride)
680 __m128d low = _mm256_extractf128_pd(from, 0);
681 to[stride*0] = _mm_cvtsd_f64(low);
682 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
683 __m128d high = _mm256_extractf128_pd(from, 1);
684 to[stride*2] = _mm_cvtsd_f64(high);
685 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
688template<> EIGEN_STRONG_INLINE
void pstore1<Packet8f>(
float* to,
const float& a)
690 Packet8f pa = pset1<Packet8f>(a);
693template<> EIGEN_STRONG_INLINE
void pstore1<Packet4d>(
double* to,
const double& a)
695 Packet4d pa = pset1<Packet4d>(a);
698template<> EIGEN_STRONG_INLINE
void pstore1<Packet8i>(
int* to,
const int& a)
700 Packet8i pa = pset1<Packet8i>(a);
704#ifndef EIGEN_VECTORIZE_AVX512
705template<> EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
706template<> EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
707template<> EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
710template<> EIGEN_STRONG_INLINE
float pfirst<Packet8f>(
const Packet8f& a) {
711 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
713template<> EIGEN_STRONG_INLINE
double pfirst<Packet4d>(
const Packet4d& a) {
714 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
716template<> EIGEN_STRONG_INLINE
int pfirst<Packet8i>(
const Packet8i& a) {
717 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
721template<> EIGEN_STRONG_INLINE Packet8f preverse(
const Packet8f& a)
723 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
724 return _mm256_permute2f128_ps(tmp, tmp, 1);
726template<> EIGEN_STRONG_INLINE Packet4d preverse(
const Packet4d& a)
728 __m256d tmp = _mm256_shuffle_pd(a,a,5);
729 return _mm256_permute2f128_pd(tmp, tmp, 1);
733 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
734 return _mm256_permute_pd(swap_halves,5);
739template<> EIGEN_STRONG_INLINE Packet8f pabs(
const Packet8f& a)
741 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
742 return _mm256_and_ps(a,mask);
744template<> EIGEN_STRONG_INLINE Packet4d pabs(
const Packet4d& a)
746 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
747 return _mm256_and_pd(a,mask);
750template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(
const Packet8f& a, Packet8f& exponent) {
751 return pfrexp_generic(a,exponent);
757Packet4d pfrexp_generic_get_biased_exponent(
const Packet4d& a) {
758 const Packet4d cst_exp_mask = pset1frombits<Packet4d>(
static_cast<uint64_t
>(0x7ff0000000000000ull));
759 __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
760#ifdef EIGEN_VECTORIZE_AVX2
761 a_expo = _mm256_srli_epi64(a_expo, 52);
762 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
763 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
765 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
766 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
767 lo = _mm_srli_epi64(lo, 52);
768 hi = _mm_srli_epi64(hi, 52);
770 Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
771 Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
772 Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
773 exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
778template<> EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(
const Packet4d& a, Packet4d& exponent) {
779 return pfrexp_generic(a, exponent);
782template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(
const Packet8f& a,
const Packet8f& exponent) {
783 return pldexp_generic(a, exponent);
786template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(
const Packet4d& a,
const Packet4d& exponent) {
788 const Packet4d max_exponent = pset1<Packet4d>(2099.0);
789 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
792 const Packet4i bias = pset1<Packet4i>(1023);
793 Packet4i b = parithmetic_shift_right<2>(e);
796 Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
797 Packet4i lo = _mm_slli_epi64(hi, 52);
798 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
799 Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
800 Packet4d out = pmul(pmul(pmul(a, c), c), c);
803 b = psub(psub(psub(e, b), b), b);
804 hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
805 lo = _mm_slli_epi64(hi, 52);
806 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
807 c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
812template<> EIGEN_STRONG_INLINE
float predux<Packet8f>(
const Packet8f& a)
814 return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
816template<> EIGEN_STRONG_INLINE
double predux<Packet4d>(
const Packet4d& a)
818 return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
821template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(
const Packet8f& a)
823 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
826template<> EIGEN_STRONG_INLINE
float predux_mul<Packet8f>(
const Packet8f& a)
829 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
830 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
831 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
833template<> EIGEN_STRONG_INLINE
double predux_mul<Packet4d>(
const Packet4d& a)
836 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
837 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
840template<> EIGEN_STRONG_INLINE
float predux_min<Packet8f>(
const Packet8f& a)
842 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
843 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
844 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
846template<> EIGEN_STRONG_INLINE
double predux_min<Packet4d>(
const Packet4d& a)
848 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
849 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
852template<> EIGEN_STRONG_INLINE
float predux_max<Packet8f>(
const Packet8f& a)
854 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
855 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
856 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
859template<> EIGEN_STRONG_INLINE
double predux_max<Packet4d>(
const Packet4d& a)
861 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
862 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
871template<> EIGEN_STRONG_INLINE
bool predux_any(
const Packet8f& x)
873 return _mm256_movemask_ps(x)!=0;
876EIGEN_DEVICE_FUNC
inline void
877ptranspose(PacketBlock<Packet8f,8>& kernel) {
878 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
879 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
880 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
881 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
882 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
883 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
884 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
885 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
886 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
887 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
888 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
889 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
890 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
891 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
892 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
893 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
894 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
895 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
896 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
897 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
898 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
899 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
900 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
901 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
904EIGEN_DEVICE_FUNC
inline void
905ptranspose(PacketBlock<Packet8f,4>& kernel) {
906 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
907 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
908 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
909 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
911 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
912 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
913 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
914 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
916 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
917 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
918 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
919 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
922EIGEN_DEVICE_FUNC
inline void
923ptranspose(PacketBlock<Packet4d,4>& kernel) {
924 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
925 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
926 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
927 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
929 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
930 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
931 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
932 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
935template<> EIGEN_STRONG_INLINE Packet8f pblend(
const Selector<8>& ifPacket,
const Packet8f& thenPacket,
const Packet8f& elsePacket) {
936 const __m256 zero = _mm256_setzero_ps();
937 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
938 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
939 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
941template<> EIGEN_STRONG_INLINE Packet4d pblend(
const Selector<4>& ifPacket,
const Packet4d& thenPacket,
const Packet4d& elsePacket) {
942 const __m256d zero = _mm256_setzero_pd();
943 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
944 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
945 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
957 return numext::bit_cast<Eigen::half>(
static_cast<numext::uint16_t
>(
_mm_extract_epi16(from, 0)));
960template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(
const Eigen::half* from) {
961 return _mm_load_si128(
reinterpret_cast<const __m128i*
>(from));
964template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(
const Eigen::half* from) {
965 return _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from));
968template<> EIGEN_STRONG_INLINE
void pstore<Eigen::half>(
Eigen::half* to,
const Packet8h& from) {
969 _mm_store_si128(
reinterpret_cast<__m128i*
>(to), from);
972template<> EIGEN_STRONG_INLINE
void pstoreu<Eigen::half>(
Eigen::half* to,
const Packet8h& from) {
973 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(to), from);
976template<> EIGEN_STRONG_INLINE Packet8h
978 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
979 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
980 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
981 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
982 return _mm_set_epi16(d, d, c, c, b, b, a, a);
985template<> EIGEN_STRONG_INLINE Packet8h
987 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
988 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
989 return _mm_set_epi16(b, b, b, b, a, a, a, a);
992template<> EIGEN_STRONG_INLINE Packet8h ptrue(
const Packet8h& a) {
993 return _mm_cmpeq_epi32(a, a);
997EIGEN_STRONG_INLINE Packet8h pabs(
const Packet8h& a) {
998 const __m128i sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
999 return _mm_andnot_si128(sign_mask, a);
1002EIGEN_STRONG_INLINE Packet8f half2float(
const Packet8h& a) {
1003#ifdef EIGEN_HAS_FP16_C
1004 return _mm256_cvtph_ps(a);
1017 return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
1021EIGEN_STRONG_INLINE Packet8h float2half(
const Packet8f& a) {
1022#ifdef EIGEN_HAS_FP16_C
1023 return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
1025 EIGEN_ALIGN32
float aux[8];
1027 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[0]));
1028 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[1]));
1029 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[2]));
1030 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[3]));
1031 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[4]));
1032 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[5]));
1033 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[6]));
1034 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(
Eigen::half(aux[7]));
1035 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1040EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(
const Packet8h& a,
1041 const Packet8h& b) {
1042 return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
1046EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(
const Packet8h& a,
1047 const Packet8h& b) {
1048 return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
1052EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(
const half& a) {
1053 return float2half(plset<Packet8f>(
static_cast<float>(a)));
1056template<> EIGEN_STRONG_INLINE Packet8h por(
const Packet8h& a,
const Packet8h& b) {
1059 return _mm_or_si128(a,b);
1061template<> EIGEN_STRONG_INLINE Packet8h pxor(
const Packet8h& a,
const Packet8h& b) {
1062 return _mm_xor_si128(a,b);
1064template<> EIGEN_STRONG_INLINE Packet8h pand(
const Packet8h& a,
const Packet8h& b) {
1065 return _mm_and_si128(a,b);
1067template<> EIGEN_STRONG_INLINE Packet8h pandnot(
const Packet8h& a,
const Packet8h& b) {
1068 return _mm_andnot_si128(b,a);
1071template<> EIGEN_STRONG_INLINE Packet8h pselect(
const Packet8h& mask,
const Packet8h& a,
const Packet8h& b) {
1072 return _mm_blendv_epi8(b, a, mask);
1075template<> EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(
const Packet8h& a) {
1076 return float2half(pround<Packet8f>(half2float(a)));
1079template<> EIGEN_STRONG_INLINE Packet8h print<Packet8h>(
const Packet8h& a) {
1080 return float2half(print<Packet8f>(half2float(a)));
1083template<> EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(
const Packet8h& a) {
1084 return float2half(pceil<Packet8f>(half2float(a)));
1087template<> EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(
const Packet8h& a) {
1088 return float2half(pfloor<Packet8f>(half2float(a)));
1091template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(
const Packet8h& a,
const Packet8h& b) {
1092 return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
1095template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(
const Packet8h& a,
const Packet8h& b) {
1096 return Pack16To8(pcmp_le(half2float(a), half2float(b)));
1099template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(
const Packet8h& a,
const Packet8h& b) {
1100 return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
1103template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(
const Packet8h& a,
const Packet8h& b) {
1104 return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
1107template<> EIGEN_STRONG_INLINE Packet8h pconj(
const Packet8h& a) {
return a; }
1109template<> EIGEN_STRONG_INLINE Packet8h pnegate(
const Packet8h& a) {
1110 Packet8h sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
1111 return _mm_xor_si128(a, sign_mask);
1114template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
1115 Packet8f af = half2float(a);
1116 Packet8f bf = half2float(b);
1117 Packet8f rf = padd(af, bf);
1118 return float2half(rf);
1121template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
1122 Packet8f af = half2float(a);
1123 Packet8f bf = half2float(b);
1124 Packet8f rf = psub(af, bf);
1125 return float2half(rf);
1128template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
1129 Packet8f af = half2float(a);
1130 Packet8f bf = half2float(b);
1131 Packet8f rf = pmul(af, bf);
1132 return float2half(rf);
1135template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(
const Packet8h& a,
const Packet8h& b) {
1136 Packet8f af = half2float(a);
1137 Packet8f bf = half2float(b);
1138 Packet8f rf = pdiv(af, bf);
1139 return float2half(rf);
1142template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(
const Eigen::half* from,
Index stride)
1144 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1145 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1146 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1147 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1148 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1149 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1150 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1151 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1152 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1155template<> EIGEN_STRONG_INLINE
void pscatter<Eigen::half, Packet8h>(
Eigen::half* to,
const Packet8h& from,
Index stride)
1159 to[stride*0] = aux[0];
1160 to[stride*1] = aux[1];
1161 to[stride*2] = aux[2];
1162 to[stride*3] = aux[3];
1163 to[stride*4] = aux[4];
1164 to[stride*5] = aux[5];
1165 to[stride*6] = aux[6];
1166 to[stride*7] = aux[7];
1169template<> EIGEN_STRONG_INLINE
Eigen::half predux<Packet8h>(
const Packet8h& a) {
1170 Packet8f af = half2float(a);
1171 float reduced = predux<Packet8f>(af);
1175template<> EIGEN_STRONG_INLINE
Eigen::half predux_max<Packet8h>(
const Packet8h& a) {
1176 Packet8f af = half2float(a);
1177 float reduced = predux_max<Packet8f>(af);
1181template<> EIGEN_STRONG_INLINE
Eigen::half predux_min<Packet8h>(
const Packet8h& a) {
1182 Packet8f af = half2float(a);
1183 float reduced = predux_min<Packet8f>(af);
1187template<> EIGEN_STRONG_INLINE
Eigen::half predux_mul<Packet8h>(
const Packet8h& a) {
1188 Packet8f af = half2float(a);
1189 float reduced = predux_mul<Packet8f>(af);
1193template<> EIGEN_STRONG_INLINE Packet8h preverse(
const Packet8h& a)
1195 __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1196 return _mm_shuffle_epi8(a,m);
1199EIGEN_STRONG_INLINE
void
1200ptranspose(PacketBlock<Packet8h,8>& kernel) {
1201 __m128i a = kernel.packet[0];
1202 __m128i b = kernel.packet[1];
1203 __m128i c = kernel.packet[2];
1204 __m128i d = kernel.packet[3];
1205 __m128i e = kernel.packet[4];
1206 __m128i f = kernel.packet[5];
1207 __m128i g = kernel.packet[6];
1208 __m128i h = kernel.packet[7];
1210 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1211 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1212 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1213 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1214 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1215 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1216 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1217 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1219 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1220 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1221 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1222 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1223 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1224 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1225 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1226 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1228 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1229 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1230 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1231 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1232 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1233 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1234 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1235 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1237 kernel.packet[0] = a0b0c0d0e0f0g0h0;
1238 kernel.packet[1] = a1b1c1d1e1f1g1h1;
1239 kernel.packet[2] = a2b2c2d2e2f2g2h2;
1240 kernel.packet[3] = a3b3c3d3e3f3g3h3;
1241 kernel.packet[4] = a4b4c4d4e4f4g4h4;
1242 kernel.packet[5] = a5b5c5d5e5f5g5h5;
1243 kernel.packet[6] = a6b6c6d6e6f6g6h6;
1244 kernel.packet[7] = a7b7c7d7e7f7g7h7;
1247EIGEN_STRONG_INLINE
void
1248ptranspose(PacketBlock<Packet8h,4>& kernel) {
1250 pstore<Eigen::half>(in[0], kernel.packet[0]);
1251 pstore<Eigen::half>(in[1], kernel.packet[1]);
1252 pstore<Eigen::half>(in[2], kernel.packet[2]);
1253 pstore<Eigen::half>(in[3], kernel.packet[3]);
1257 for (
int i = 0; i < 4; ++i) {
1258 for (
int j = 0; j < 4; ++j) {
1259 out[i][j] = in[j][2*i];
1261 for (
int j = 0; j < 4; ++j) {
1262 out[i][j+4] = in[j][2*i+1];
1266 kernel.packet[0] = pload<Packet8h>(out[0]);
1267 kernel.packet[1] = pload<Packet8h>(out[1]);
1268 kernel.packet[2] = pload<Packet8h>(out[2]);
1269 kernel.packet[3] = pload<Packet8h>(out[3]);
1274EIGEN_STRONG_INLINE Packet8f Bf16ToF32(
const Packet8bf& a) {
1275#ifdef EIGEN_VECTORIZE_AVX2
1276 __m256i extend = _mm256_cvtepu16_epi32(a);
1277 return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
1279 __m128i lo = _mm_cvtepu16_epi32(a);
1280 __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
1281 __m128i lo_shift = _mm_slli_epi32(lo, 16);
1282 __m128i hi_shift = _mm_slli_epi32(hi, 16);
1283 return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
1288EIGEN_STRONG_INLINE Packet8bf F32ToBf16(
const Packet8f& a) {
1291 __m256i input = _mm256_castps_si256(a);
1293#ifdef EIGEN_VECTORIZE_AVX2
1295 __m256i t = _mm256_srli_epi32(input, 16);
1297 t = _mm256_and_si256(t, _mm256_set1_epi32(1));
1299 t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
1301 t = _mm256_add_epi32(t, input);
1303 t = _mm256_srli_epi32(t, 16);
1305 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1306 __m256i nan = _mm256_set1_epi32(0x7fc0);
1307 t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
1309 return _mm_packus_epi32(_mm256_extractf128_si256(t, 0),
1310 _mm256_extractf128_si256(t, 1));
1313 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
1314 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
1316 lo = _mm_and_si128(lo, _mm_set1_epi32(1));
1317 hi = _mm_and_si128(hi, _mm_set1_epi32(1));
1319 lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
1320 hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
1322 lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
1323 hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
1325 lo = _mm_srli_epi32(lo, 16);
1326 hi = _mm_srli_epi32(hi, 16);
1328 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1329 __m128i nan = _mm_set1_epi32(0x7fc0);
1330 lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
1331 hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
1333 return _mm_packus_epi32(lo, hi);
1337template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(
const bfloat16& from) {
1338 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
1341template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(
const Packet8bf& from) {
1342 return numext::bit_cast<bfloat16>(
static_cast<numext::uint16_t
>(_mm_extract_epi16(from, 0)));
1345template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(
const bfloat16* from) {
1346 return _mm_load_si128(
reinterpret_cast<const __m128i*
>(from));
1349template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(
const bfloat16* from) {
1350 return _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from));
1353template<> EIGEN_STRONG_INLINE
void pstore<bfloat16>(bfloat16* to,
const Packet8bf& from) {
1354 _mm_store_si128(
reinterpret_cast<__m128i*
>(to), from);
1357template<> EIGEN_STRONG_INLINE
void pstoreu<bfloat16>(bfloat16* to,
const Packet8bf& from) {
1358 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(to), from);
1361template<> EIGEN_STRONG_INLINE Packet8bf
1362ploaddup<Packet8bf>(
const bfloat16* from) {
1363 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1364 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1365 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
1366 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
1367 return _mm_set_epi16(d, d, c, c, b, b, a, a);
1370template<> EIGEN_STRONG_INLINE Packet8bf
1371ploadquad<Packet8bf>(
const bfloat16* from) {
1372 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1373 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1374 return _mm_set_epi16(b, b, b, b, a, a, a, a);
1377template<> EIGEN_STRONG_INLINE Packet8bf ptrue(
const Packet8bf& a) {
1378 return _mm_cmpeq_epi32(a, a);
1382EIGEN_STRONG_INLINE Packet8bf pabs(
const Packet8bf& a) {
1383 const __m128i sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
1384 return _mm_andnot_si128(sign_mask, a);
1388EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(
const Packet8bf& a,
1389 const Packet8bf& b) {
1390 return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1394EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(
const Packet8bf& a,
1395 const Packet8bf& b) {
1396 return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1400EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(
const bfloat16& a) {
1401 return F32ToBf16(plset<Packet8f>(
static_cast<float>(a)));
1404template<> EIGEN_STRONG_INLINE Packet8bf por(
const Packet8bf& a,
const Packet8bf& b) {
1405 return _mm_or_si128(a,b);
1407template<> EIGEN_STRONG_INLINE Packet8bf pxor(
const Packet8bf& a,
const Packet8bf& b) {
1408 return _mm_xor_si128(a,b);
1410template<> EIGEN_STRONG_INLINE Packet8bf pand(
const Packet8bf& a,
const Packet8bf& b) {
1411 return _mm_and_si128(a,b);
1413template<> EIGEN_STRONG_INLINE Packet8bf pandnot(
const Packet8bf& a,
const Packet8bf& b) {
1414 return _mm_andnot_si128(b,a);
1417template<> EIGEN_STRONG_INLINE Packet8bf pselect(
const Packet8bf& mask,
const Packet8bf& a,
const Packet8bf& b) {
1418 return _mm_blendv_epi8(b, a, mask);
1421template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(
const Packet8bf& a)
1423 return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
1426template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(
const Packet8bf& a) {
1427 return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
1430template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(
const Packet8bf& a) {
1431 return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
1434template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(
const Packet8bf& a) {
1435 return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
1438template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(
const Packet8bf& a,
const Packet8bf& b) {
1439 return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
1442template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(
const Packet8bf& a,
const Packet8bf& b) {
1443 return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
1446template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(
const Packet8bf& a,
const Packet8bf& b) {
1447 return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
1450template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(
const Packet8bf& a,
const Packet8bf& b) {
1451 return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
1454template<> EIGEN_STRONG_INLINE Packet8bf pconj(
const Packet8bf& a) {
return a; }
1456template<> EIGEN_STRONG_INLINE Packet8bf pnegate(
const Packet8bf& a) {
1457 Packet8bf sign_mask = _mm_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
1458 return _mm_xor_si128(a, sign_mask);
1461template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
1462 return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1465template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
1466 return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1469template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
1470 return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1473template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(
const Packet8bf& a,
const Packet8bf& b) {
1474 return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1478template<> EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(
const bfloat16* from,
Index stride)
1480 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1481 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1482 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1483 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1484 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1485 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1486 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1487 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1488 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1491template<> EIGEN_STRONG_INLINE
void pscatter<bfloat16, Packet8bf>(bfloat16* to,
const Packet8bf& from,
Index stride)
1493 EIGEN_ALIGN32 bfloat16 aux[8];
1495 to[stride*0] = aux[0];
1496 to[stride*1] = aux[1];
1497 to[stride*2] = aux[2];
1498 to[stride*3] = aux[3];
1499 to[stride*4] = aux[4];
1500 to[stride*5] = aux[5];
1501 to[stride*6] = aux[6];
1502 to[stride*7] = aux[7];
1505template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(
const Packet8bf& a) {
1506 return static_cast<bfloat16
>(predux<Packet8f>(Bf16ToF32(a)));
1509template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(
const Packet8bf& a) {
1510 return static_cast<bfloat16
>(predux_max<Packet8f>(Bf16ToF32(a)));
1513template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(
const Packet8bf& a) {
1514 return static_cast<bfloat16
>(predux_min<Packet8f>(Bf16ToF32(a)));
1517template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(
const Packet8bf& a) {
1518 return static_cast<bfloat16
>(predux_mul<Packet8f>(Bf16ToF32(a)));
1521template<> EIGEN_STRONG_INLINE Packet8bf preverse(
const Packet8bf& a)
1523 __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1524 return _mm_shuffle_epi8(a,m);
1527EIGEN_STRONG_INLINE
void
1528ptranspose(PacketBlock<Packet8bf,8>& kernel) {
1529 __m128i a = kernel.packet[0];
1530 __m128i b = kernel.packet[1];
1531 __m128i c = kernel.packet[2];
1532 __m128i d = kernel.packet[3];
1533 __m128i e = kernel.packet[4];
1534 __m128i f = kernel.packet[5];
1535 __m128i g = kernel.packet[6];
1536 __m128i h = kernel.packet[7];
1538 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1539 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1540 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1541 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1542 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1543 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1544 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1545 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1547 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1548 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1549 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1550 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1551 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1552 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1553 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1554 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1556 kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1557 kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1558 kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1559 kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1560 kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1561 kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1562 kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1563 kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1566EIGEN_STRONG_INLINE
void
1567ptranspose(PacketBlock<Packet8bf,4>& kernel) {
1568 __m128i a = kernel.packet[0];
1569 __m128i b = kernel.packet[1];
1570 __m128i c = kernel.packet[2];
1571 __m128i d = kernel.packet[3];
1573 __m128i ab_03 = _mm_unpacklo_epi16(a, b);
1574 __m128i cd_03 = _mm_unpacklo_epi16(c, d);
1575 __m128i ab_47 = _mm_unpackhi_epi16(a, b);
1576 __m128i cd_47 = _mm_unpackhi_epi16(c, d);
1578 kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
1579 kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
1580 kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
1581 kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
Base class for all dense matrices, vectors, and expressions.
Definition MatrixBase.h:50
@ Aligned32
Data pointer is aligned on a 32 bytes boundary.
Definition Constants.h:236
@ Aligned16
Data pointer is aligned on a 16 bytes boundary.
Definition Constants.h:235
Namespace containing all symbols from the Eigen library.
Definition LDLT.h:16
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:74
Definition GenericPacketMath.h:43
Definition GenericPacketMath.h:160
Definition GenericPacketMath.h:107
Definition XprHelper.h:710
Definition GenericPacketMath.h:133