10#ifndef EIGEN_PACKET_MATH_AVX512_H
11#define EIGEN_PACKET_MATH_AVX512_H
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
21#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
25#ifdef EIGEN_VECTORIZE_FMA
26#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
32#if EIGEN_GNUC_AT_LEAST(5, 3) || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC >= 1923 || EIGEN_COMP_ICC >= 1900
33#define EIGEN_HAS_AVX512_MATH 1
35#define EIGEN_HAS_AVX512_MATH 0
38typedef __m512 Packet16f;
39typedef __m512i Packet16i;
40typedef __m512d Packet8d;
41typedef eigen_packet_wrapper<__m256i, 1> Packet16h;
42typedef eigen_packet_wrapper<__m256i, 2> Packet16bf;
46 enum { value =
true };
50 enum { value =
true };
54 enum { value =
true };
82 HasLog = EIGEN_HAS_AVX512_MATH,
83 HasLog1p = EIGEN_HAS_AVX512_MATH,
84 HasExp = EIGEN_HAS_AVX512_MATH,
85 HasExpm1 = EIGEN_HAS_AVX512_MATH,
86 HasSqrt = EIGEN_HAS_AVX512_MATH,
87 HasRsqrt = EIGEN_HAS_AVX512_MATH,
88 HasBessel = EIGEN_HAS_AVX512_MATH,
89 HasNdtri = EIGEN_HAS_AVX512_MATH,
90 HasSin = EIGEN_FAST_MATH,
91 HasCos = EIGEN_FAST_MATH,
92 HasTanh = EIGEN_FAST_MATH,
93 HasErf = EIGEN_FAST_MATH,
104 typedef Packet16f type;
105 typedef Packet8f
half;
117 HasSin = EIGEN_FAST_MATH,
118 HasCos = EIGEN_FAST_MATH,
119#if EIGEN_HAS_AVX512_MATH
126 HasSqrt = EIGEN_FAST_MATH,
127 HasRsqrt = EIGEN_FAST_MATH,
128 HasTanh = EIGEN_FAST_MATH,
129 HasErf = EIGEN_FAST_MATH,
139template<>
struct packet_traits<double> : default_packet_traits
141 typedef Packet8d type;
142 typedef Packet4d half;
148#if EIGEN_HAS_AVX512_MATH
151 HasSqrt = EIGEN_FAST_MATH,
152 HasRsqrt = EIGEN_FAST_MATH,
180 typedef uint16_t mask_t;
181 enum { size = 16, alignment=
Aligned64, vectorizable=
true, masked_load_available=
true, masked_store_available=
true };
187 enum { size = 8, alignment=
Aligned64, vectorizable=
true, masked_load_available=
false, masked_store_available=
false };
193 enum { size = 16, alignment=
Aligned64, vectorizable=
false, masked_load_available=
false, masked_store_available=
false };
200 enum {size=16, alignment=
Aligned32, vectorizable=
true, masked_load_available=
false, masked_store_available=
false};
212EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(
const int& from) {
213 return _mm512_set1_epi32(from);
217EIGEN_STRONG_INLINE Packet16f pset1frombits<Packet16f>(
unsigned int from) {
218 return _mm512_castsi512_ps(_mm512_set1_epi32(from));
222EIGEN_STRONG_INLINE Packet8d pset1frombits<Packet8d>(
const numext::uint64_t from) {
223 return _mm512_castsi512_pd(_mm512_set1_epi64(from));
226template<> EIGEN_STRONG_INLINE Packet16f pzero(
const Packet16f& ) {
return _mm512_setzero_ps(); }
227template<> EIGEN_STRONG_INLINE Packet8d pzero(
const Packet8d& ) {
return _mm512_setzero_pd(); }
228template<> EIGEN_STRONG_INLINE Packet16i pzero(
const Packet16i& ) {
return _mm512_setzero_si512(); }
230template<> EIGEN_STRONG_INLINE Packet16f peven_mask(
const Packet16f& ) {
231 return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
232 0, -1, 0, -1, 0, -1, 0, -1));
234template<> EIGEN_STRONG_INLINE Packet16i peven_mask(
const Packet16i& ) {
235 return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
236 0, -1, 0, -1, 0, -1, 0, -1);
238template<> EIGEN_STRONG_INLINE Packet8d peven_mask(
const Packet8d& ) {
239 return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1,
240 0, 0, -1, -1, 0, 0, -1, -1));
244EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(
const float* from) {
245 return _mm512_broadcastss_ps(_mm_load_ps1(from));
248EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(
const double* from) {
249 return _mm512_set1_pd(*from);
253EIGEN_STRONG_INLINE Packet16f plset<Packet16f>(
const float& a) {
254 return _mm512_add_ps(
256 _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,
257 4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
260EIGEN_STRONG_INLINE Packet8d plset<Packet8d>(
const double& a) {
261 return _mm512_add_pd(_mm512_set1_pd(a),
262 _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
266EIGEN_STRONG_INLINE Packet16f padd<Packet16f>(
const Packet16f& a,
267 const Packet16f& b) {
268 return _mm512_add_ps(a, b);
271EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(
const Packet8d& a,
273 return _mm512_add_pd(a, b);
276EIGEN_STRONG_INLINE Packet16i padd<Packet16i>(
const Packet16i& a,
277 const Packet16i& b) {
278 return _mm512_add_epi32(a, b);
282EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(
const Packet16f& a,
283 const Packet16f& b) {
284 return _mm512_sub_ps(a, b);
287EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(
const Packet8d& a,
289 return _mm512_sub_pd(a, b);
292EIGEN_STRONG_INLINE Packet16i psub<Packet16i>(
const Packet16i& a,
293 const Packet16i& b) {
294 return _mm512_sub_epi32(a, b);
298EIGEN_STRONG_INLINE Packet16f pnegate(
const Packet16f& a) {
302 const __m512i mask = _mm512_set_epi32(0x80000000,0x80000000,0x80000000,0x80000000,
303 0x80000000,0x80000000,0x80000000,0x80000000,
304 0x80000000,0x80000000,0x80000000,0x80000000,
305 0x80000000,0x80000000,0x80000000,0x80000000);
306 return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a), mask));
309EIGEN_STRONG_INLINE Packet8d pnegate(
const Packet8d& a) {
310 const __m512i mask = _mm512_set_epi64(0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL,
311 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL);
312 return _mm512_castsi512_pd(_mm512_xor_epi64(_mm512_castpd_si512(a), mask));
316EIGEN_STRONG_INLINE Packet16f pconj(
const Packet16f& a) {
320EIGEN_STRONG_INLINE Packet8d pconj(
const Packet8d& a) {
324EIGEN_STRONG_INLINE Packet16i pconj(
const Packet16i& a) {
329EIGEN_STRONG_INLINE Packet16f pmul<Packet16f>(
const Packet16f& a,
330 const Packet16f& b) {
331 return _mm512_mul_ps(a, b);
334EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(
const Packet8d& a,
336 return _mm512_mul_pd(a, b);
339EIGEN_STRONG_INLINE Packet16i pmul<Packet16i>(
const Packet16i& a,
340 const Packet16i& b) {
341 return _mm512_mullo_epi32(a, b);
345EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(
const Packet16f& a,
346 const Packet16f& b) {
347 return _mm512_div_ps(a, b);
350EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(
const Packet8d& a,
352 return _mm512_div_pd(a, b);
355#ifdef EIGEN_VECTORIZE_FMA
357EIGEN_STRONG_INLINE Packet16f pmadd(
const Packet16f& a,
const Packet16f& b,
358 const Packet16f& c) {
359 return _mm512_fmadd_ps(a, b, c);
362EIGEN_STRONG_INLINE Packet8d pmadd(
const Packet8d& a,
const Packet8d& b,
364 return _mm512_fmadd_pd(a, b, c);
369EIGEN_DEVICE_FUNC
inline Packet16f pselect(
const Packet16f& mask,
371 const Packet16f& b) {
372 __mmask16 mask16 = _mm512_cmp_epi32_mask(
373 _mm512_castps_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ);
374 return _mm512_mask_blend_ps(mask16, a, b);
378EIGEN_DEVICE_FUNC
inline Packet8d pselect(
const Packet8d& mask,
381 __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask),
382 _mm512_setzero_epi32(), _MM_CMPINT_EQ);
383 return _mm512_mask_blend_pd(mask8, a, b);
387EIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(
const Packet16f& a,
388 const Packet16f& b) {
390 return _mm512_min_ps(b, a);
393EIGEN_STRONG_INLINE Packet8d pmin<Packet8d>(
const Packet8d& a,
396 return _mm512_min_pd(b, a);
400EIGEN_STRONG_INLINE Packet16f pmax<Packet16f>(
const Packet16f& a,
401 const Packet16f& b) {
403 return _mm512_max_ps(b, a);
406EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(
const Packet8d& a,
409 return _mm512_max_pd(b, a);
414EIGEN_STRONG_INLINE Packet16f pmin<PropagateNumbers, Packet16f>(
const Packet16f& a,
const Packet16f& b) {
415 return pminmax_propagate_numbers(a, b, pmin<Packet16f>);
418EIGEN_STRONG_INLINE Packet8d pmin<PropagateNumbers, Packet8d>(
const Packet8d& a,
const Packet8d& b) {
419 return pminmax_propagate_numbers(a, b, pmin<Packet8d>);
422EIGEN_STRONG_INLINE Packet16f pmax<PropagateNumbers, Packet16f>(
const Packet16f& a,
const Packet16f& b) {
423 return pminmax_propagate_numbers(a, b, pmax<Packet16f>);
426EIGEN_STRONG_INLINE Packet8d pmax<PropagateNumbers, Packet8d>(
const Packet8d& a,
const Packet8d& b) {
427 return pminmax_propagate_numbers(a, b, pmax<Packet8d>);
430EIGEN_STRONG_INLINE Packet16f pmin<PropagateNaN, Packet16f>(
const Packet16f& a,
const Packet16f& b) {
431 return pminmax_propagate_nan(a, b, pmin<Packet16f>);
434EIGEN_STRONG_INLINE Packet8d pmin<PropagateNaN, Packet8d>(
const Packet8d& a,
const Packet8d& b) {
435 return pminmax_propagate_nan(a, b, pmin<Packet8d>);
438EIGEN_STRONG_INLINE Packet16f pmax<PropagateNaN, Packet16f>(
const Packet16f& a,
const Packet16f& b) {
439 return pminmax_propagate_nan(a, b, pmax<Packet16f>);
442EIGEN_STRONG_INLINE Packet8d pmax<PropagateNaN, Packet8d>(
const Packet8d& a,
const Packet8d& b) {
443 return pminmax_propagate_nan(a, b, pmax<Packet8d>);
447#ifdef EIGEN_VECTORIZE_AVX512DQ
448template<
int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
return _mm512_extractf32x8_ps(x,I_); }
449template<
int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
return _mm512_extractf64x2_pd(x,I_); }
450EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); }
453template<
int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
454 return _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_));
458template<
int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
459 return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_));
462EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
463 return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)),
464 _mm256_castps_si256(b),1));
470EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf) {
478 __m256i lo = _mm256_castps_si256(extract256<0>(rf));
479 __m256i hi = _mm256_castps_si256(extract256<1>(rf));
480 __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0),
481 _mm256_extractf128_si256(lo, 1));
482 __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0),
483 _mm256_extractf128_si256(hi, 1));
484 return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
488EIGEN_STRONG_INLINE Packet16f pcmp_eq(
const Packet16f& a,
const Packet16f& b) {
489 __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
490 return _mm512_castsi512_ps(
491 _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
493template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(
const Packet16f& a,
const Packet16f& b) {
494 __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
495 return _mm512_castsi512_ps(
496 _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
499template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(
const Packet16f& a,
const Packet16f& b) {
500 __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
501 return _mm512_castsi512_ps(
502 _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
505template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(
const Packet16f& a,
const Packet16f& b) {
506 __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ);
507 return _mm512_castsi512_ps(
508 _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
511template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(
const Packet16i& a,
const Packet16i& b) {
512 __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ);
513 return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu);
518EIGEN_STRONG_INLINE Packet8d pcmp_eq(
const Packet8d& a,
const Packet8d& b) {
519 __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
520 return _mm512_castsi512_pd(
521 _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
524EIGEN_STRONG_INLINE Packet8d pcmp_le(
const Packet8d& a,
const Packet8d& b) {
525 __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ);
526 return _mm512_castsi512_pd(
527 _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
530EIGEN_STRONG_INLINE Packet8d pcmp_lt(
const Packet8d& a,
const Packet8d& b) {
531 __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ);
532 return _mm512_castsi512_pd(
533 _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
536EIGEN_STRONG_INLINE Packet8d pcmp_lt_or_nan(
const Packet8d& a,
const Packet8d& b) {
537 __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ);
538 return _mm512_castsi512_pd(
539 _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
542template<> EIGEN_STRONG_INLINE Packet16f print<Packet16f>(
const Packet16f& a) {
return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION); }
543template<> EIGEN_STRONG_INLINE Packet8d print<Packet8d>(
const Packet8d& a) {
return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION); }
545template<> EIGEN_STRONG_INLINE Packet16f pceil<Packet16f>(
const Packet16f& a) {
return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF); }
546template<> EIGEN_STRONG_INLINE Packet8d pceil<Packet8d>(
const Packet8d& a) {
return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF); }
548template<> EIGEN_STRONG_INLINE Packet16f pfloor<Packet16f>(
const Packet16f& a) {
return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF); }
549template<> EIGEN_STRONG_INLINE Packet8d pfloor<Packet8d>(
const Packet8d& a) {
return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF); }
552EIGEN_STRONG_INLINE Packet16i ptrue<Packet16i>(
const Packet16i& ) {
553 return _mm512_set1_epi32(0xffffffffu);
557EIGEN_STRONG_INLINE Packet16f ptrue<Packet16f>(
const Packet16f& a) {
558 return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
562EIGEN_STRONG_INLINE Packet8d ptrue<Packet8d>(
const Packet8d& a) {
563 return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
567EIGEN_STRONG_INLINE Packet16i pand<Packet16i>(
const Packet16i& a,
568 const Packet16i& b) {
569 return _mm512_and_si512(a,b);
573EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(
const Packet16f& a,
574 const Packet16f& b) {
575#ifdef EIGEN_VECTORIZE_AVX512DQ
576 return _mm512_and_ps(a, b);
578 return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b)));
582EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(
const Packet8d& a,
584#ifdef EIGEN_VECTORIZE_AVX512DQ
585 return _mm512_and_pd(a, b);
587 Packet8d res = _mm512_undefined_pd();
588 Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
589 Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
590 res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
592 Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
593 Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
594 return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
599EIGEN_STRONG_INLINE Packet16i por<Packet16i>(
const Packet16i& a,
const Packet16i& b) {
600 return _mm512_or_si512(a, b);
604EIGEN_STRONG_INLINE Packet16f por<Packet16f>(
const Packet16f& a,
const Packet16f& b) {
605#ifdef EIGEN_VECTORIZE_AVX512DQ
606 return _mm512_or_ps(a, b);
608 return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b)));
613EIGEN_STRONG_INLINE Packet8d por<Packet8d>(
const Packet8d& a,
615#ifdef EIGEN_VECTORIZE_AVX512DQ
616 return _mm512_or_pd(a, b);
618 return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
623EIGEN_STRONG_INLINE Packet16i pxor<Packet16i>(
const Packet16i& a,
const Packet16i& b) {
624 return _mm512_xor_si512(a, b);
628EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(
const Packet16f& a,
const Packet16f& b) {
629#ifdef EIGEN_VECTORIZE_AVX512DQ
630 return _mm512_xor_ps(a, b);
632 return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b)));
637EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(
const Packet8d& a,
const Packet8d& b) {
638#ifdef EIGEN_VECTORIZE_AVX512DQ
639 return _mm512_xor_pd(a, b);
641 return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
646EIGEN_STRONG_INLINE Packet16i pandnot<Packet16i>(
const Packet16i& a,
const Packet16i& b) {
647 return _mm512_andnot_si512(b, a);
651EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(
const Packet16f& a,
const Packet16f& b) {
652#ifdef EIGEN_VECTORIZE_AVX512DQ
653 return _mm512_andnot_ps(b, a);
655 return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b)));
659EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(
const Packet8d& a,
const Packet8d& b) {
660#ifdef EIGEN_VECTORIZE_AVX512DQ
661 return _mm512_andnot_pd(b, a);
663 return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
667template<> EIGEN_STRONG_INLINE Packet16f pround<Packet16f>(
const Packet16f& a)
670 const Packet16f mask = pset1frombits<Packet16f>(
static_cast<numext::uint32_t
>(0x80000000u));
671 const Packet16f prev0dot5 = pset1frombits<Packet16f>(
static_cast<numext::uint32_t
>(0x3EFFFFFFu));
672 return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
674template<> EIGEN_STRONG_INLINE Packet8d pround<Packet8d>(
const Packet8d& a)
677 const Packet8d mask = pset1frombits<Packet8d>(
static_cast<numext::uint64_t
>(0x8000000000000000ull));
678 const Packet8d prev0dot5 = pset1frombits<Packet8d>(
static_cast<numext::uint64_t
>(0x3FDFFFFFFFFFFFFFull));
679 return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
682template<
int N> EIGEN_STRONG_INLINE Packet16i parithmetic_shift_right(Packet16i a) {
683 return _mm512_srai_epi32(a, N);
686template<
int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_right(Packet16i a) {
687 return _mm512_srli_epi32(a, N);
690template<
int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_left(Packet16i a) {
691 return _mm512_slli_epi32(a, N);
695EIGEN_STRONG_INLINE Packet16f pload<Packet16f>(
const float* from) {
696 EIGEN_DEBUG_ALIGNED_LOAD
return _mm512_load_ps(from);
699EIGEN_STRONG_INLINE Packet8d pload<Packet8d>(
const double* from) {
700 EIGEN_DEBUG_ALIGNED_LOAD
return _mm512_load_pd(from);
703EIGEN_STRONG_INLINE Packet16i pload<Packet16i>(
const int* from) {
704 EIGEN_DEBUG_ALIGNED_LOAD
return _mm512_load_si512(
705 reinterpret_cast<const __m512i*
>(from));
709EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(
const float* from) {
710 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm512_loadu_ps(from);
713EIGEN_STRONG_INLINE Packet8d ploadu<Packet8d>(
const double* from) {
714 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm512_loadu_pd(from);
717EIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(
const int* from) {
718 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm512_loadu_si512(
719 reinterpret_cast<const __m512i*
>(from));
723EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(
const float* from, uint16_t umask) {
724 __mmask16 mask =
static_cast<__mmask16
>(umask);
725 EIGEN_DEBUG_UNALIGNED_LOAD
return _mm512_maskz_loadu_ps(mask, from);
731EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(
const float* from) {
734 __m256i low_half = _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
735 __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
736 __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
740#ifdef EIGEN_VECTORIZE_AVX512DQ
745EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(
const double* from) {
746 __m512d x = _mm512_setzero_pd();
747 x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[0]), 0);
748 x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[1]), 1);
749 x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[2]), 2);
750 x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);
755EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(
const double* from) {
756 __m512d x = _mm512_setzero_pd();
757 x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0));
758 x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1));
759 x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2));
760 x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3));
768EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(
const float* from) {
769 Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
770 const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
771 return _mm512_permutexvar_ps(scatter_mask, tmp);
777EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(
const double* from) {
778 __m256d lane0 = _mm256_set1_pd(*from);
779 __m256d lane1 = _mm256_set1_pd(*(from+1));
780 __m512d tmp = _mm512_undefined_pd();
781 tmp = _mm512_insertf64x4(tmp, lane0, 0);
782 return _mm512_insertf64x4(tmp, lane1, 1);
786EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet16f& from) {
787 EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
790EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet8d& from) {
791 EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
794EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet16i& from) {
795 EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(
reinterpret_cast<__m512i*
>(to),
800EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet16f& from) {
801 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
804EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet8d& from) {
805 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
808EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet16i& from) {
809 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
810 reinterpret_cast<__m512i*
>(to), from);
813EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet16f& from, uint16_t umask) {
814 __mmask16 mask =
static_cast<__mmask16
>(umask);
815 EIGEN_DEBUG_UNALIGNED_STORE
return _mm512_mask_storeu_ps(to, mask, from);
819EIGEN_DEVICE_FUNC
inline Packet16f pgather<float, Packet16f>(
const float* from,
821 Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
822 Packet16i stride_multiplier =
823 _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
824 Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
826 return _mm512_i32gather_ps(indices, from, 4);
829EIGEN_DEVICE_FUNC
inline Packet8d pgather<double, Packet8d>(
const double* from,
831 Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
832 Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
833 Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
835 return _mm512_i32gather_pd(indices, from, 8);
839EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet16f>(
float* to,
840 const Packet16f& from,
842 Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
843 Packet16i stride_multiplier =
844 _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
845 Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
846 _mm512_i32scatter_ps(to, indices, from, 4);
849EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet8d>(
double* to,
850 const Packet8d& from,
852 Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
853 Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
854 Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
855 _mm512_i32scatter_pd(to, indices, from, 8);
859EIGEN_STRONG_INLINE
void pstore1<Packet16f>(
float* to,
const float& a) {
860 Packet16f pa = pset1<Packet16f>(a);
864EIGEN_STRONG_INLINE
void pstore1<Packet8d>(
double* to,
const double& a) {
865 Packet8d pa = pset1<Packet8d>(a);
869EIGEN_STRONG_INLINE
void pstore1<Packet16i>(
int* to,
const int& a) {
870 Packet16i pa = pset1<Packet16i>(a);
874template<> EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
875template<> EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
876template<> EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
879EIGEN_STRONG_INLINE
float pfirst<Packet16f>(
const Packet16f& a) {
880 return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));
883EIGEN_STRONG_INLINE
double pfirst<Packet8d>(
const Packet8d& a) {
884 return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));
887EIGEN_STRONG_INLINE
int pfirst<Packet16i>(
const Packet16i& a) {
888 return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);
891template<> EIGEN_STRONG_INLINE Packet16f preverse(
const Packet16f& a)
893 return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
896template<> EIGEN_STRONG_INLINE Packet8d preverse(
const Packet8d& a)
898 return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
901template<> EIGEN_STRONG_INLINE Packet16f pabs(
const Packet16f& a)
904 return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
907EIGEN_STRONG_INLINE Packet8d pabs(
const Packet8d& a) {
909 return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
910 _mm512_set1_epi64(0x7fffffffffffffff)));
914EIGEN_STRONG_INLINE Packet16f pfrexp<Packet16f>(
const Packet16f& a, Packet16f& exponent){
915 return pfrexp_generic(a, exponent);
921Packet8d pfrexp_generic_get_biased_exponent(
const Packet8d& a) {
922 const Packet8d cst_exp_mask = pset1frombits<Packet8d>(
static_cast<uint64_t
>(0x7ff0000000000000ull));
923 #ifdef EIGEN_VECTORIZE_AVX512DQ
924 return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52));
926 return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)));
931EIGEN_STRONG_INLINE Packet8d pfrexp<Packet8d>(
const Packet8d& a, Packet8d& exponent) {
932 return pfrexp_generic(a, exponent);
935template<> EIGEN_STRONG_INLINE Packet16f pldexp<Packet16f>(
const Packet16f& a,
const Packet16f& exponent) {
936 return pldexp_generic(a, exponent);
939template<> EIGEN_STRONG_INLINE Packet8d pldexp<Packet8d>(
const Packet8d& a,
const Packet8d& exponent) {
941 const Packet8d max_exponent = pset1<Packet8d>(2099.0);
942 const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
945 const Packet8i bias = pset1<Packet8i>(1023);
946 Packet8i b = parithmetic_shift_right<2>(e);
949 const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
950 Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
951 Packet8i lo = _mm256_slli_epi64(hi, 52);
952 hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
953 Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
954 Packet8d out = pmul(pmul(pmul(a, c), c), c);
957 b = psub(psub(psub(e, b), b), b);
958 hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
959 lo = _mm256_slli_epi64(hi, 52);
960 hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
961 c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
966#ifdef EIGEN_VECTORIZE_AVX512DQ
968#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
969 __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0); \
970 __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1)
972#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
973 __m256 OUTPUT##_0 = _mm256_insertf128_ps( \
974 _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \
975 _mm512_extractf32x4_ps(INPUT, 1), 1); \
976 __m256 OUTPUT##_1 = _mm256_insertf128_ps( \
977 _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \
978 _mm512_extractf32x4_ps(INPUT, 3), 1);
981#ifdef EIGEN_VECTORIZE_AVX512DQ
982#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
983 OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
985#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
986 OUTPUT = _mm512_undefined_ps(); \
987 OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
988 OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
989 OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
990 OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
994EIGEN_STRONG_INLINE
float predux<Packet16f>(
const Packet16f& a) {
995#ifdef EIGEN_VECTORIZE_AVX512DQ
996 __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
997 __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
998 Packet8f x = _mm256_add_ps(lane0, lane1);
999 return predux<Packet8f>(x);
1001 __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1002 __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1003 __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1004 __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1005 __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));
1006 sum = _mm_hadd_ps(sum, sum);
1007 sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));
1008 return _mm_cvtss_f32(sum);
1012EIGEN_STRONG_INLINE
double predux<Packet8d>(
const Packet8d& a) {
1013 __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1014 __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1015 __m256d sum = _mm256_add_pd(lane0, lane1);
1016 __m256d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));
1017 return _mm_cvtsd_f64(_mm256_castpd256_pd128(_mm256_hadd_pd(tmp0, tmp0)));
1021EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(
const Packet16f& a) {
1022#ifdef EIGEN_VECTORIZE_AVX512DQ
1023 __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
1024 __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
1025 return _mm256_add_ps(lane0, lane1);
1027 __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1028 __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1029 __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1030 __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1031 __m128 sum0 = _mm_add_ps(lane0, lane2);
1032 __m128 sum1 = _mm_add_ps(lane1, lane3);
1033 return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
1037EIGEN_STRONG_INLINE Packet4d predux_half_dowto4<Packet8d>(
const Packet8d& a) {
1038 __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1039 __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1040 return _mm256_add_pd(lane0, lane1);
1044EIGEN_STRONG_INLINE
float predux_mul<Packet16f>(
const Packet16f& a) {
1047 Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
1048 Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
1049 Packet8f res = pmul(lane0, lane1);
1050 res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
1051 res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1052 return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1054 __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1055 __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1056 __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1057 __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1058 __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
1059 res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1060 return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1064EIGEN_STRONG_INLINE
double predux_mul<Packet8d>(
const Packet8d& a) {
1065 __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1066 __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1067 __m256d res = pmul(lane0, lane1);
1068 res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
1069 return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
1073EIGEN_STRONG_INLINE
float predux_min<Packet16f>(
const Packet16f& a) {
1074 __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1075 __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1076 __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1077 __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1078 __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
1079 res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1080 return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1083EIGEN_STRONG_INLINE
double predux_min<Packet8d>(
const Packet8d& a) {
1084 __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1085 __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1086 __m256d res = _mm256_min_pd(lane0, lane1);
1087 res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
1088 return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
1092EIGEN_STRONG_INLINE
float predux_max<Packet16f>(
const Packet16f& a) {
1093 __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1094 __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1095 __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1096 __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1097 __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
1098 res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1099 return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1103EIGEN_STRONG_INLINE
double predux_max<Packet8d>(
const Packet8d& a) {
1104 __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1105 __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1106 __m256d res = _mm256_max_pd(lane0, lane1);
1107 res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
1108 return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
1111template<> EIGEN_STRONG_INLINE
bool predux_any(
const Packet16f& x)
1113 Packet16i xi = _mm512_castps_si512(x);
1114 __mmask16 tmp = _mm512_test_epi32_mask(xi,xi);
1115 return !_mm512_kortestz(tmp,tmp);
1120#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
1121 EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
1123EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet16f, 16>& kernel) {
1124 __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1125 __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1126 __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1127 __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1128 __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1129 __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1130 __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1131 __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1132 __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
1133 __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
1134 __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
1135 __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
1136 __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
1137 __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
1138 __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
1139 __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
1140 __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1141 __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1142 __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1143 __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1144 __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1145 __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1146 __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1147 __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1148 __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
1149 __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
1150 __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
1151 __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
1152 __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
1153 __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
1154 __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
1155 __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
1157 EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1158 EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1159 EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1160 EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1161 EIGEN_EXTRACT_8f_FROM_16f(S4, S4);
1162 EIGEN_EXTRACT_8f_FROM_16f(S5, S5);
1163 EIGEN_EXTRACT_8f_FROM_16f(S6, S6);
1164 EIGEN_EXTRACT_8f_FROM_16f(S7, S7);
1165 EIGEN_EXTRACT_8f_FROM_16f(S8, S8);
1166 EIGEN_EXTRACT_8f_FROM_16f(S9, S9);
1167 EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
1168 EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
1169 EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
1170 EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
1171 EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
1172 EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
1174 PacketBlock<Packet8f, 32> tmp;
1176 tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
1177 tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
1178 tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
1179 tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
1180 tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
1181 tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
1182 tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
1183 tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
1185 tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
1186 tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
1187 tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
1188 tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
1189 tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
1190 tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
1191 tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
1192 tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
1195 tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
1196 tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
1197 tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
1198 tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
1199 tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
1200 tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
1201 tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
1202 tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
1204 tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
1205 tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
1206 tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
1207 tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
1208 tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
1209 tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
1210 tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
1211 tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
1214 PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
1215 PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
1216 PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
1217 PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
1219 PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
1220 PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
1221 PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
1222 PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
1224 PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
1225 PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
1226 PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
1227 PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
1229 PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
1230 PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
1231 PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
1232 PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
1234#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE) \
1235 EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \
1236 INPUT[2 * INDEX + STRIDE]);
1238EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet16f, 4>& kernel) {
1239 __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1240 __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1241 __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1242 __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1244 __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1245 __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1246 __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1247 __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1249 EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1250 EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1251 EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1252 EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1254 PacketBlock<Packet8f, 8> tmp;
1256 tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
1257 tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
1258 tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
1259 tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
1261 tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
1262 tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
1263 tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
1264 tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
1266 PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
1267 PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
1268 PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
1269 PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
1272#define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE) \
1273 OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \
1274 OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);
1276#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE) \
1277 OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
1279 _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
1281EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8d, 4>& kernel) {
1282 __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
1283 __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
1284 __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
1285 __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
1287 PacketBlock<Packet4d, 8> tmp;
1289 tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1290 _mm512_extractf64x4_pd(T2, 0), 0x20);
1291 tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1292 _mm512_extractf64x4_pd(T3, 0), 0x20);
1293 tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1294 _mm512_extractf64x4_pd(T2, 0), 0x31);
1295 tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1296 _mm512_extractf64x4_pd(T3, 0), 0x31);
1298 tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1299 _mm512_extractf64x4_pd(T2, 1), 0x20);
1300 tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1301 _mm512_extractf64x4_pd(T3, 1), 0x20);
1302 tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1303 _mm512_extractf64x4_pd(T2, 1), 0x31);
1304 tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1305 _mm512_extractf64x4_pd(T3, 1), 0x31);
1307 PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
1308 PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
1309 PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
1310 PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
1313EIGEN_DEVICE_FUNC
inline void ptranspose(PacketBlock<Packet8d, 8>& kernel) {
1314 __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1315 __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1316 __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
1317 __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
1318 __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
1319 __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
1320 __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
1321 __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
1323 PacketBlock<Packet4d, 16> tmp;
1325 tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1326 _mm512_extractf64x4_pd(T2, 0), 0x20);
1327 tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1328 _mm512_extractf64x4_pd(T3, 0), 0x20);
1329 tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1330 _mm512_extractf64x4_pd(T2, 0), 0x31);
1331 tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1332 _mm512_extractf64x4_pd(T3, 0), 0x31);
1334 tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1335 _mm512_extractf64x4_pd(T2, 1), 0x20);
1336 tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1337 _mm512_extractf64x4_pd(T3, 1), 0x20);
1338 tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1339 _mm512_extractf64x4_pd(T2, 1), 0x31);
1340 tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1341 _mm512_extractf64x4_pd(T3, 1), 0x31);
1343 tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1344 _mm512_extractf64x4_pd(T6, 0), 0x20);
1345 tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1346 _mm512_extractf64x4_pd(T7, 0), 0x20);
1347 tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1348 _mm512_extractf64x4_pd(T6, 0), 0x31);
1349 tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1350 _mm512_extractf64x4_pd(T7, 0), 0x31);
1352 tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1353 _mm512_extractf64x4_pd(T6, 1), 0x20);
1354 tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1355 _mm512_extractf64x4_pd(T7, 1), 0x20);
1356 tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1357 _mm512_extractf64x4_pd(T6, 1), 0x31);
1358 tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1359 _mm512_extractf64x4_pd(T7, 1), 0x31);
1361 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);
1362 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);
1363 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);
1364 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);
1366 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);
1367 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);
1368 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);
1369 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);
1372EIGEN_STRONG_INLINE Packet16f pblend(
const Selector<16>& ,
1374 const Packet16f& ) {
1375 assert(
false &&
"To be implemented");
1379EIGEN_STRONG_INLINE Packet8d pblend(
const Selector<8>& ifPacket,
1380 const Packet8d& thenPacket,
1381 const Packet8d& elsePacket) {
1382 __mmask8 m = (ifPacket.select[0] )
1383 | (ifPacket.select[1]<<1)
1384 | (ifPacket.select[2]<<2)
1385 | (ifPacket.select[3]<<3)
1386 | (ifPacket.select[4]<<4)
1387 | (ifPacket.select[5]<<5)
1388 | (ifPacket.select[6]<<6)
1389 | (ifPacket.select[7]<<7);
1390 return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
1394template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(
const Eigen::half& from) {
1395 return _mm256_set1_epi16(from.x);
1398template<> EIGEN_STRONG_INLINE
Eigen::half pfirst<Packet16h>(
const Packet16h& from) {
1399 return half_impl::raw_uint16_to_half(
static_cast<unsigned short>(_mm256_extract_epi16(from, 0)));
1402template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(
const Eigen::half* from) {
1403 return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
1406template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(
const Eigen::half* from) {
1407 return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
1410template<> EIGEN_STRONG_INLINE
void pstore<half>(
Eigen::half* to,
const Packet16h& from) {
1413 _mm256_store_si256((__m256i*)(
void*)to, from);
1416template<> EIGEN_STRONG_INLINE
void pstoreu<half>(
Eigen::half* to,
const Packet16h& from) {
1419 _mm256_storeu_si256((__m256i*)(
void*)to, from);
1422template<> EIGEN_STRONG_INLINE Packet16h
1424 unsigned short a = from[0].x;
1425 unsigned short b = from[1].x;
1426 unsigned short c = from[2].x;
1427 unsigned short d = from[3].x;
1428 unsigned short e = from[4].x;
1429 unsigned short f = from[5].x;
1430 unsigned short g = from[6].x;
1431 unsigned short h = from[7].x;
1432 return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1435template<> EIGEN_STRONG_INLINE Packet16h
1437 unsigned short a = from[0].x;
1438 unsigned short b = from[1].x;
1439 unsigned short c = from[2].x;
1440 unsigned short d = from[3].x;
1441 return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1444EIGEN_STRONG_INLINE Packet16f half2float(
const Packet16h& a) {
1445 return _mm512_cvtph_ps(a);
1448EIGEN_STRONG_INLINE Packet16h float2half(
const Packet16f& a) {
1449 return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
1452template<> EIGEN_STRONG_INLINE Packet16h ptrue(
const Packet16h& a) {
1453 return ptrue(Packet8i(a));
1457EIGEN_STRONG_INLINE Packet16h pabs(
const Packet16h& a) {
1458 const __m256i sign_mask = _mm256_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
1459 return _mm256_andnot_si256(sign_mask, a);
1463EIGEN_STRONG_INLINE Packet16h pmin<Packet16h>(
const Packet16h& a,
1464 const Packet16h& b) {
1465 return float2half(pmin<Packet16f>(half2float(a), half2float(b)));
1469EIGEN_STRONG_INLINE Packet16h pmax<Packet16h>(
const Packet16h& a,
1470 const Packet16h& b) {
1471 return float2half(pmax<Packet16f>(half2float(a), half2float(b)));
1475EIGEN_STRONG_INLINE Packet16h plset<Packet16h>(
const half& a) {
1476 return float2half(plset<Packet16f>(
static_cast<float>(a)));
1479template<> EIGEN_STRONG_INLINE Packet16h por(
const Packet16h& a,
const Packet16h& b) {
1482 return por(Packet8i(a),Packet8i(b));
1484template<> EIGEN_STRONG_INLINE Packet16h pxor(
const Packet16h& a,
const Packet16h& b) {
1485 return pxor(Packet8i(a),Packet8i(b));
1487template<> EIGEN_STRONG_INLINE Packet16h pand(
const Packet16h& a,
const Packet16h& b) {
1488 return pand(Packet8i(a),Packet8i(b));
1490template<> EIGEN_STRONG_INLINE Packet16h pandnot(
const Packet16h& a,
const Packet16h& b) {
1491 return pandnot(Packet8i(a),Packet8i(b));
1494template<> EIGEN_STRONG_INLINE Packet16h pselect(
const Packet16h& mask,
const Packet16h& a,
const Packet16h& b) {
1495 return _mm256_blendv_epi8(b, a, mask);
1498template<> EIGEN_STRONG_INLINE Packet16h pround<Packet16h>(
const Packet16h& a) {
1499 return float2half(pround<Packet16f>(half2float(a)));
1502template<> EIGEN_STRONG_INLINE Packet16h print<Packet16h>(
const Packet16h& a) {
1503 return float2half(print<Packet16f>(half2float(a)));
1506template<> EIGEN_STRONG_INLINE Packet16h pceil<Packet16h>(
const Packet16h& a) {
1507 return float2half(pceil<Packet16f>(half2float(a)));
1510template<> EIGEN_STRONG_INLINE Packet16h pfloor<Packet16h>(
const Packet16h& a) {
1511 return float2half(pfloor<Packet16f>(half2float(a)));
1514template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(
const Packet16h& a,
const Packet16h& b) {
1515 Packet16f af = half2float(a);
1516 Packet16f bf = half2float(b);
1517 return Pack32To16(pcmp_eq(af, bf));
1520template<> EIGEN_STRONG_INLINE Packet16h pcmp_le(
const Packet16h& a,
const Packet16h& b) {
1521 return Pack32To16(pcmp_le(half2float(a), half2float(b)));
1524template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt(
const Packet16h& a,
const Packet16h& b) {
1525 return Pack32To16(pcmp_lt(half2float(a), half2float(b)));
1528template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt_or_nan(
const Packet16h& a,
const Packet16h& b) {
1529 return Pack32To16(pcmp_lt_or_nan(half2float(a), half2float(b)));
1532template<> EIGEN_STRONG_INLINE Packet16h pconj(
const Packet16h& a) {
return a; }
1534template<> EIGEN_STRONG_INLINE Packet16h pnegate(
const Packet16h& a) {
1535 Packet16h sign_mask = _mm256_set1_epi16(
static_cast<unsigned short>(0x8000));
1536 return _mm256_xor_si256(a, sign_mask);
1539template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(
const Packet16h& a,
const Packet16h& b) {
1540 Packet16f af = half2float(a);
1541 Packet16f bf = half2float(b);
1542 Packet16f rf = padd(af, bf);
1543 return float2half(rf);
1546template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(
const Packet16h& a,
const Packet16h& b) {
1547 Packet16f af = half2float(a);
1548 Packet16f bf = half2float(b);
1549 Packet16f rf = psub(af, bf);
1550 return float2half(rf);
1553template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(
const Packet16h& a,
const Packet16h& b) {
1554 Packet16f af = half2float(a);
1555 Packet16f bf = half2float(b);
1556 Packet16f rf = pmul(af, bf);
1557 return float2half(rf);
1560template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(
const Packet16h& a,
const Packet16h& b) {
1561 Packet16f af = half2float(a);
1562 Packet16f bf = half2float(b);
1563 Packet16f rf = pdiv(af, bf);
1564 return float2half(rf);
1567template<> EIGEN_STRONG_INLINE half predux<Packet16h>(
const Packet16h& from) {
1568 Packet16f from_float = half2float(from);
1569 return half(predux(from_float));
1573EIGEN_STRONG_INLINE Packet8h predux_half_dowto4<Packet16h>(
const Packet16h& a) {
1574 Packet8h lane0 = _mm256_extractf128_si256(a, 0);
1575 Packet8h lane1 = _mm256_extractf128_si256(a, 1);
1576 return padd<Packet8h>(lane0, lane1);
1579template<> EIGEN_STRONG_INLINE
Eigen::half predux_max<Packet16h>(
const Packet16h& a) {
1580 Packet16f af = half2float(a);
1581 float reduced = predux_max<Packet16f>(af);
1585template<> EIGEN_STRONG_INLINE
Eigen::half predux_min<Packet16h>(
const Packet16h& a) {
1586 Packet16f af = half2float(a);
1587 float reduced = predux_min<Packet16f>(af);
1591template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(
const Packet16h& from) {
1592 Packet16f from_float = half2float(from);
1593 return half(predux_mul(from_float));
1596template<> EIGEN_STRONG_INLINE Packet16h preverse(
const Packet16h& a)
1598 __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1599 return _mm256_insertf128_si256(
1600 _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a,1),m)),
1601 _mm_shuffle_epi8(_mm256_extractf128_si256(a,0),m), 1);
1604template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(
const Eigen::half* from,
Index stride)
1606 return _mm256_set_epi16(
1607 from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
1608 from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
1609 from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
1610 from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
1613template<> EIGEN_STRONG_INLINE
void pscatter<half, Packet16h>(half* to,
const Packet16h& from,
Index stride)
1615 EIGEN_ALIGN64 half aux[16];
1617 to[stride*0] = aux[0];
1618 to[stride*1] = aux[1];
1619 to[stride*2] = aux[2];
1620 to[stride*3] = aux[3];
1621 to[stride*4] = aux[4];
1622 to[stride*5] = aux[5];
1623 to[stride*6] = aux[6];
1624 to[stride*7] = aux[7];
1625 to[stride*8] = aux[8];
1626 to[stride*9] = aux[9];
1627 to[stride*10] = aux[10];
1628 to[stride*11] = aux[11];
1629 to[stride*12] = aux[12];
1630 to[stride*13] = aux[13];
1631 to[stride*14] = aux[14];
1632 to[stride*15] = aux[15];
1635EIGEN_STRONG_INLINE
void
1636ptranspose(PacketBlock<Packet16h,16>& kernel) {
1637 __m256i a = kernel.packet[0];
1638 __m256i b = kernel.packet[1];
1639 __m256i c = kernel.packet[2];
1640 __m256i d = kernel.packet[3];
1641 __m256i e = kernel.packet[4];
1642 __m256i f = kernel.packet[5];
1643 __m256i g = kernel.packet[6];
1644 __m256i h = kernel.packet[7];
1645 __m256i i = kernel.packet[8];
1646 __m256i j = kernel.packet[9];
1647 __m256i k = kernel.packet[10];
1648 __m256i l = kernel.packet[11];
1649 __m256i m = kernel.packet[12];
1650 __m256i n = kernel.packet[13];
1651 __m256i o = kernel.packet[14];
1652 __m256i p = kernel.packet[15];
1654 __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
1655 __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
1656 __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
1657 __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
1658 __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
1659 __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
1660 __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
1661 __m256i op_07 = _mm256_unpacklo_epi16(o, p);
1663 __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
1664 __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
1665 __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
1666 __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
1667 __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
1668 __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
1669 __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
1670 __m256i op_8f = _mm256_unpackhi_epi16(o, p);
1672 __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
1673 __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
1674 __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
1675 __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
1676 __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
1677 __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
1678 __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
1679 __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
1681 __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
1682 __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
1683 __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
1684 __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
1685 __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
1686 __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
1687 __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
1688 __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
1690 __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
1691 __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
1692 __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
1693 __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
1694 __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
1695 __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
1696 __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
1697 __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
1698 __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
1699 __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
1700 __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
1701 __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
1702 __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
1703 __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
1704 __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
1705 __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
1708 __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
1709 __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
1710 __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
1711 __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
1712 __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
1713 __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
1714 __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
1715 __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
1716 __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
1717 __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
1718 __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
1719 __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
1720 __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
1721 __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
1722 __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
1723 __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
1725 kernel.packet[0] = a_p_0;
1726 kernel.packet[1] = a_p_1;
1727 kernel.packet[2] = a_p_2;
1728 kernel.packet[3] = a_p_3;
1729 kernel.packet[4] = a_p_4;
1730 kernel.packet[5] = a_p_5;
1731 kernel.packet[6] = a_p_6;
1732 kernel.packet[7] = a_p_7;
1733 kernel.packet[8] = a_p_8;
1734 kernel.packet[9] = a_p_9;
1735 kernel.packet[10] = a_p_a;
1736 kernel.packet[11] = a_p_b;
1737 kernel.packet[12] = a_p_c;
1738 kernel.packet[13] = a_p_d;
1739 kernel.packet[14] = a_p_e;
1740 kernel.packet[15] = a_p_f;
1743EIGEN_STRONG_INLINE
void
1744ptranspose(PacketBlock<Packet16h,8>& kernel) {
1745 EIGEN_ALIGN64 half in[8][16];
1746 pstore<half>(in[0], kernel.packet[0]);
1747 pstore<half>(in[1], kernel.packet[1]);
1748 pstore<half>(in[2], kernel.packet[2]);
1749 pstore<half>(in[3], kernel.packet[3]);
1750 pstore<half>(in[4], kernel.packet[4]);
1751 pstore<half>(in[5], kernel.packet[5]);
1752 pstore<half>(in[6], kernel.packet[6]);
1753 pstore<half>(in[7], kernel.packet[7]);
1755 EIGEN_ALIGN64 half out[8][16];
1757 for (
int i = 0; i < 8; ++i) {
1758 for (
int j = 0; j < 8; ++j) {
1759 out[i][j] = in[j][2*i];
1761 for (
int j = 0; j < 8; ++j) {
1762 out[i][j+8] = in[j][2*i+1];
1766 kernel.packet[0] = pload<Packet16h>(out[0]);
1767 kernel.packet[1] = pload<Packet16h>(out[1]);
1768 kernel.packet[2] = pload<Packet16h>(out[2]);
1769 kernel.packet[3] = pload<Packet16h>(out[3]);
1770 kernel.packet[4] = pload<Packet16h>(out[4]);
1771 kernel.packet[5] = pload<Packet16h>(out[5]);
1772 kernel.packet[6] = pload<Packet16h>(out[6]);
1773 kernel.packet[7] = pload<Packet16h>(out[7]);
1776EIGEN_STRONG_INLINE
void
1777ptranspose(PacketBlock<Packet16h,4>& kernel) {
1778 EIGEN_ALIGN64 half in[4][16];
1779 pstore<half>(in[0], kernel.packet[0]);
1780 pstore<half>(in[1], kernel.packet[1]);
1781 pstore<half>(in[2], kernel.packet[2]);
1782 pstore<half>(in[3], kernel.packet[3]);
1784 EIGEN_ALIGN64 half out[4][16];
1786 for (
int i = 0; i < 4; ++i) {
1787 for (
int j = 0; j < 4; ++j) {
1788 out[i][j] = in[j][4*i];
1790 for (
int j = 0; j < 4; ++j) {
1791 out[i][j+4] = in[j][4*i+1];
1793 for (
int j = 0; j < 4; ++j) {
1794 out[i][j+8] = in[j][4*i+2];
1796 for (
int j = 0; j < 4; ++j) {
1797 out[i][j+12] = in[j][4*i+3];
1801 kernel.packet[0] = pload<Packet16h>(out[0]);
1802 kernel.packet[1] = pload<Packet16h>(out[1]);
1803 kernel.packet[2] = pload<Packet16h>(out[2]);
1804 kernel.packet[3] = pload<Packet16h>(out[3]);
1815 AlignedOnScalar = 1,
1820 HasSin = EIGEN_FAST_MATH,
1821 HasCos = EIGEN_FAST_MATH,
1822#if EIGEN_HAS_AVX512_MATH
1823#ifdef EIGEN_VECTORIZE_AVX512DQ
1831 HasSqrt = EIGEN_FAST_MATH,
1832 HasRsqrt = EIGEN_FAST_MATH,
1833 HasTanh = EIGEN_FAST_MATH,
1834 HasErf = EIGEN_FAST_MATH,
1845 enum {size=16, alignment=
Aligned32, vectorizable=
true, masked_load_available=
false, masked_store_available=
false};
1862EIGEN_STRONG_INLINE Packet16bf pload<Packet16bf>(
const bfloat16* from) {
1863 return _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
1867EIGEN_STRONG_INLINE Packet16bf ploadu<Packet16bf>(
const bfloat16* from) {
1868 return _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
1872EIGEN_STRONG_INLINE
void pstore<bfloat16>(bfloat16* to,
1873 const Packet16bf& from) {
1874 _mm256_store_si256(
reinterpret_cast<__m256i*
>(to), from);
1878EIGEN_STRONG_INLINE
void pstoreu<bfloat16>(bfloat16* to,
1879 const Packet16bf& from) {
1880 _mm256_storeu_si256(
reinterpret_cast<__m256i*
>(to), from);
1883template<> EIGEN_STRONG_INLINE Packet16bf
1884ploaddup<Packet16bf>(
const bfloat16* from) {
1886 unsigned short a = from[0].value;
1887 unsigned short b = from[1].value;
1888 unsigned short c = from[2].value;
1889 unsigned short d = from[3].value;
1890 unsigned short e = from[4].value;
1891 unsigned short f = from[5].value;
1892 unsigned short g = from[6].value;
1893 unsigned short h = from[7].value;
1894 return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1897template<> EIGEN_STRONG_INLINE Packet16bf
1898ploadquad(
const bfloat16* from) {
1900 unsigned short a = from[0].value;
1901 unsigned short b = from[1].value;
1902 unsigned short c = from[2].value;
1903 unsigned short d = from[3].value;
1904 return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1907EIGEN_STRONG_INLINE Packet16f Bf16ToF32(
const Packet16bf& a) {
1908 return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
1912EIGEN_STRONG_INLINE Packet16bf F32ToBf16(
const Packet16f& a) {
1915#if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_AT_LEAST(10, 1)
1919 r = (__m256i)(_mm512_cvtneps_pbh(a));
1923 __m512i input = _mm512_castps_si512(a);
1924 __m512i nan = _mm512_set1_epi32(0x7fc0);
1927 t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1));
1929 t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff));
1931 t = _mm512_add_epi32(t, input);
1933 t = _mm512_srli_epi32(t, 16);
1936 __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
1938 t = _mm512_mask_blend_epi32(mask, nan, t);
1940 r = _mm512_cvtepi32_epi16(t);
1947EIGEN_STRONG_INLINE Packet16bf ptrue(
const Packet16bf& a) {
1948 return ptrue<Packet8i>(a);
1952EIGEN_STRONG_INLINE Packet16bf por(
const Packet16bf& a,
const Packet16bf& b) {
1953 return por<Packet8i>(a, b);
1957EIGEN_STRONG_INLINE Packet16bf pxor(
const Packet16bf& a,
const Packet16bf& b) {
1958 return pxor<Packet8i>(a, b);
1962EIGEN_STRONG_INLINE Packet16bf pand(
const Packet16bf& a,
const Packet16bf& b) {
1963 return pand<Packet8i>(a, b);
1967EIGEN_STRONG_INLINE Packet16bf pandnot(
const Packet16bf& a,
1968 const Packet16bf& b) {
1969 return pandnot<Packet8i>(a, b);
1973EIGEN_STRONG_INLINE Packet16bf pselect(
const Packet16bf& mask,
1974 const Packet16bf& a,
1975 const Packet16bf& b) {
1978 return _mm256_blendv_epi8(b, a, mask);
1981template<> EIGEN_STRONG_INLINE Packet16bf pround<Packet16bf>(
const Packet16bf& a)
1983 return F32ToBf16(pround<Packet16f>(Bf16ToF32(a)));
1986template<> EIGEN_STRONG_INLINE Packet16bf print<Packet16bf>(
const Packet16bf& a) {
1987 return F32ToBf16(print<Packet16f>(Bf16ToF32(a)));
1990template<> EIGEN_STRONG_INLINE Packet16bf pceil<Packet16bf>(
const Packet16bf& a) {
1991 return F32ToBf16(pceil<Packet16f>(Bf16ToF32(a)));
1994template<> EIGEN_STRONG_INLINE Packet16bf pfloor<Packet16bf>(
const Packet16bf& a) {
1995 return F32ToBf16(pfloor<Packet16f>(Bf16ToF32(a)));
1999EIGEN_STRONG_INLINE Packet16bf pcmp_eq(
const Packet16bf& a,
2000 const Packet16bf& b) {
2001 return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2005EIGEN_STRONG_INLINE Packet16bf pcmp_le(
const Packet16bf& a,
2006 const Packet16bf& b) {
2007 return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2011EIGEN_STRONG_INLINE Packet16bf pcmp_lt(
const Packet16bf& a,
2012 const Packet16bf& b) {
2013 return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2017EIGEN_STRONG_INLINE Packet16bf pcmp_lt_or_nan(
const Packet16bf& a,
2018 const Packet16bf& b) {
2019 return Pack32To16(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2023EIGEN_STRONG_INLINE Packet16bf pnegate(
const Packet16bf& a) {
2024 Packet16bf sign_mask = _mm256_set1_epi16(
static_cast<unsigned short>(0x8000));
2025 return _mm256_xor_si256(a, sign_mask);
2029EIGEN_STRONG_INLINE Packet16bf pconj(
const Packet16bf& a) {
2034EIGEN_STRONG_INLINE Packet16bf pabs(
const Packet16bf& a) {
2035 const __m256i sign_mask = _mm256_set1_epi16(
static_cast<numext::uint16_t
>(0x8000));
2036 return _mm256_andnot_si256(sign_mask, a);
2040EIGEN_STRONG_INLINE Packet16bf padd<Packet16bf>(
const Packet16bf& a,
2041 const Packet16bf& b) {
2042 return F32ToBf16(padd<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2046EIGEN_STRONG_INLINE Packet16bf psub<Packet16bf>(
const Packet16bf& a,
2047 const Packet16bf& b) {
2048 return F32ToBf16(psub<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2052EIGEN_STRONG_INLINE Packet16bf pmul<Packet16bf>(
const Packet16bf& a,
2053 const Packet16bf& b) {
2054 return F32ToBf16(pmul<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2058EIGEN_STRONG_INLINE Packet16bf pdiv<Packet16bf>(
const Packet16bf& a,
2059 const Packet16bf& b) {
2060 return F32ToBf16(pdiv<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2064EIGEN_STRONG_INLINE Packet16bf pmin<Packet16bf>(
const Packet16bf& a,
2065 const Packet16bf& b) {
2066 return F32ToBf16(pmin<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2070EIGEN_STRONG_INLINE Packet16bf pmax<Packet16bf>(
const Packet16bf& a,
2071 const Packet16bf& b) {
2072 return F32ToBf16(pmax<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2076EIGEN_STRONG_INLINE Packet16bf plset<Packet16bf>(
const bfloat16& a) {
2077 return F32ToBf16(plset<Packet16f>(
static_cast<float>(a)));
2081EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4<Packet16bf>(
const Packet16bf& a) {
2082 Packet8bf lane0 = _mm256_extractf128_si256(a, 0);
2083 Packet8bf lane1 = _mm256_extractf128_si256(a, 1);
2084 return padd<Packet8bf>(lane0, lane1);
2088EIGEN_STRONG_INLINE bfloat16 predux<Packet16bf>(
const Packet16bf& p) {
2089 return static_cast<bfloat16
>(predux<Packet16f>(Bf16ToF32(p)));
2093EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet16bf>(
const Packet16bf& from) {
2094 return static_cast<bfloat16
>(predux_mul<Packet16f>(Bf16ToF32(from)));
2098EIGEN_STRONG_INLINE bfloat16 predux_min<Packet16bf>(
const Packet16bf& from) {
2099 return static_cast<bfloat16
>(predux_min<Packet16f>(Bf16ToF32(from)));
2103EIGEN_STRONG_INLINE bfloat16 predux_max<Packet16bf>(
const Packet16bf& from) {
2104 return static_cast<bfloat16
>(predux_max<Packet16f>(Bf16ToF32(from)));
2108EIGEN_STRONG_INLINE Packet16bf preverse(
const Packet16bf& a) {
2109 __m256i m = _mm256_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1,
2110 14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
2114 res = _mm256_permute2x128_si256(a, a, 1);
2116 return _mm256_shuffle_epi8(res, m);
2120EIGEN_STRONG_INLINE Packet16bf pgather<bfloat16, Packet16bf>(
const bfloat16* from,
2122 return _mm256_set_epi16(
2123 from[15*stride].value, from[14*stride].value, from[13*stride].value, from[12*stride].value,
2124 from[11*stride].value, from[10*stride].value, from[9*stride].value, from[8*stride].value,
2125 from[7*stride].value, from[6*stride].value, from[5*stride].value, from[4*stride].value,
2126 from[3*stride].value, from[2*stride].value, from[1*stride].value, from[0*stride].value);
2130EIGEN_STRONG_INLINE
void pscatter<bfloat16, Packet16bf>(bfloat16* to,
2131 const Packet16bf& from,
2133 EIGEN_ALIGN64 bfloat16 aux[16];
2135 to[stride*0] = aux[0];
2136 to[stride*1] = aux[1];
2137 to[stride*2] = aux[2];
2138 to[stride*3] = aux[3];
2139 to[stride*4] = aux[4];
2140 to[stride*5] = aux[5];
2141 to[stride*6] = aux[6];
2142 to[stride*7] = aux[7];
2143 to[stride*8] = aux[8];
2144 to[stride*9] = aux[9];
2145 to[stride*10] = aux[10];
2146 to[stride*11] = aux[11];
2147 to[stride*12] = aux[12];
2148 to[stride*13] = aux[13];
2149 to[stride*14] = aux[14];
2150 to[stride*15] = aux[15];
2153EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet16bf,16>& kernel) {
2154 __m256i a = kernel.packet[0];
2155 __m256i b = kernel.packet[1];
2156 __m256i c = kernel.packet[2];
2157 __m256i d = kernel.packet[3];
2158 __m256i e = kernel.packet[4];
2159 __m256i f = kernel.packet[5];
2160 __m256i g = kernel.packet[6];
2161 __m256i h = kernel.packet[7];
2162 __m256i i = kernel.packet[8];
2163 __m256i j = kernel.packet[9];
2164 __m256i k = kernel.packet[10];
2165 __m256i l = kernel.packet[11];
2166 __m256i m = kernel.packet[12];
2167 __m256i n = kernel.packet[13];
2168 __m256i o = kernel.packet[14];
2169 __m256i p = kernel.packet[15];
2171 __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2172 __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2173 __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
2174 __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
2175 __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
2176 __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
2177 __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
2178 __m256i op_07 = _mm256_unpacklo_epi16(o, p);
2180 __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2181 __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2182 __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
2183 __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
2184 __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
2185 __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
2186 __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
2187 __m256i op_8f = _mm256_unpackhi_epi16(o, p);
2189 __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2190 __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2191 __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
2192 __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
2193 __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
2194 __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
2195 __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
2196 __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
2198 __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2199 __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2200 __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
2201 __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
2202 __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
2203 __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
2204 __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
2205 __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
2207 __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
2208 __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
2209 __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
2210 __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
2211 __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
2212 __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
2213 __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
2214 __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
2215 __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
2216 __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
2217 __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
2218 __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
2219 __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
2220 __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
2221 __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
2222 __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
2225 kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
2226 kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
2227 kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
2228 kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
2229 kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
2230 kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
2231 kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
2232 kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
2233 kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
2234 kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
2235 kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
2236 kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
2237 kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
2238 kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
2239 kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
2240 kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
2243EIGEN_STRONG_INLINE
void ptranspose(PacketBlock<Packet16bf,4>& kernel) {
2244 __m256i a = kernel.packet[0];
2245 __m256i b = kernel.packet[1];
2246 __m256i c = kernel.packet[2];
2247 __m256i d = kernel.packet[3];
2249 __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2250 __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2251 __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2252 __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2254 __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2255 __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2256 __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2257 __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2260 kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20);
2261 kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20);
2262 kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31);
2263 kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31);
Base class for all dense matrices, vectors, and expressions.
Definition MatrixBase.h:50
@ Aligned64
Data pointer is aligned on a 64 bytes boundary.
Definition Constants.h:237
@ Aligned32
Data pointer is aligned on a 32 bytes boundary.
Definition Constants.h:236
Namespace containing all symbols from the Eigen library.
Definition LDLT.h:16
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:74
Definition GenericPacketMath.h:43
Definition GenericPacketMath.h:160
Definition GenericPacketMath.h:107
Definition GenericPacketMath.h:133