Compartilhar via


Avx512F Classe

Definição

Importante

Esta API não está em conformidade com CLS.

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

public ref class Avx512F abstract : System::Runtime::Intrinsics::X86::Avx2
[System.CLSCompliant(false)]
public abstract class Avx512F : System.Runtime.Intrinsics.X86.Avx2
[<System.CLSCompliant(false)>]
type Avx512F = class
    inherit Avx2
Public MustInherit Class Avx512F
Inherits Avx2
Herança
Derivado
Atributos

Propriedades

IsSupported

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Métodos

Abs(Vector512<Int32>)

__m512i _mm512_abs_epi32 (__m512i a)

VPABSD zmm1 {k1}{z}, zmm2/m512/m32bcst

Abs(Vector512<Int64>)

__m512i _mm512_abs_epi64 (__m512i a)

VPABSQ zmm1 {k1}{z}, zmm2/m512/m64bcst

Add(Vector512<Double>, Vector512<Double>)

__m512d _mm512_add_pd (__m512d a, __m512d b)

VADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}

Add(Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Add(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_add_epi32 (__m512i a, __m512i b)

VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Add(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_add_epi64 (__m512i a, __m512i b)

VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Add(Vector512<Single>, Vector512<Single>)

__m512 _mm512_add_ps (__m512 a, __m512 b)

VADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}

Add(Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Add(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_add_epi32 (__m512i a, __m512i b)

VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Add(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_add_epi64 (__m512i a, __m512i b)

VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

AddScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

AddScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

AlignRight32(Vector512<Int32>, Vector512<Int32>, Byte)

__m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count)

VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

AlignRight32(Vector512<UInt32>, Vector512<UInt32>, Byte)

__m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count)

VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

AlignRight64(Vector512<Int64>, Vector512<Int64>, Byte)

__m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count)

VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

AlignRight64(Vector512<UInt64>, Vector512<UInt64>, Byte)

__m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count)

VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

And(Vector512<Byte>, Vector512<Byte>)

__m512i _mm512_and_si512 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<Int16>, Vector512<Int16>)

__m512i _mm512_and_si512 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_and_epi32 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_and_epi64 (__m512i a, __m512i b)

VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

And(Vector512<SByte>, Vector512<SByte>)

__m512i _mm512_and_si512 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<UInt16>, Vector512<UInt16>)

__m512i _mm512_and_si512 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_and_epi32 (__m512i a, __m512i b)

VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

And(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_and_epi64 (__m512i a, __m512i b)

VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

AndNot(Vector512<Byte>, Vector512<Byte>)

__m512i _mm512_andnot_si512 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<Int16>, Vector512<Int16>)

__m512i _mm512_andnot_si512 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_andnot_epi32 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_andnot_epi64 (__m512i a, __m512i b)

VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

AndNot(Vector512<SByte>, Vector512<SByte>)

__m512i _mm512_andnot_si512 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<UInt16>, Vector512<UInt16>)

__m512i _mm512_andnot_si512 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_andnot_epi32 (__m512i a, __m512i b)

VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

AndNot(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_andnot_epi64 (__m512i a, __m512i b)

VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

BlendVariable(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_blendv_pd (__m512d a, __m512d b, máscara de __m512d)

VBLENDMPD zmm1 {k1}, zmm2, zmm3/m512/m64bcst

BlendVariable(Vector512<Int32>, Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_blendv_epi32 (__m512i a, __m512i b, máscara de __m512i)

VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst

BlendVariable(Vector512<Int64>, Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_blendv_epi64 (__m512i a, __m512i b, máscara de __m512i)

VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst

BlendVariable(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_blendv_ps (__m512 a, __m512 b, máscara de __m512)

VBLENDMPS zmm1 {k1}, zmm2, zmm3/m512/m32bcst

BlendVariable(Vector512<UInt32>, Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_blendv_epu32 (__m512i a, __m512i b, máscara de __m512i)

VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst

BlendVariable(Vector512<UInt64>, Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_blendv_epu64 (__m512i a, __m512i b, máscara de __m512i)

VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst

BroadcastScalarToVector512(Vector128<Double>)

__m512d _mm512_broadcastsd_pd (__m128d a)

VBROADCASTSD zmm1 {k1}{z}, xmm2/m64

BroadcastScalarToVector512(Vector128<Int32>)

__m512i _mm512_broadcastd_epi32 (__m128i a)

VPBROADCASTD zmm1 {k1}{z}, xmm2/m32

BroadcastScalarToVector512(Vector128<Int64>)

__m512i _mm512_broadcastq_epi64 (__m128i a)

VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64

BroadcastScalarToVector512(Vector128<Single>)

__m512 _mm512_broadcastss_ps (__m128 a)

VBROADCASTSS zmm1 {k1}{z}, xmm2/m32

BroadcastScalarToVector512(Vector128<UInt32>)

__m512i _mm512_broadcastd_epi32 (__m128i a)

VPBROADCASTD zmm1 {k1}{z}, xmm2/m32

BroadcastScalarToVector512(Vector128<UInt64>)

__m512i _mm512_broadcastq_epi64 (__m128i a)

VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64

BroadcastVector128ToVector512(Int32*)

__m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr)

VBROADCASTI32x4 zmm1 {k1}{z}, m128

BroadcastVector128ToVector512(Single*)

__m512 _mm512_broadcast_f32x4 (__m128 const * mem_addr)

VBROADCASTF32x4 zmm1 {k1}{z}, m128

BroadcastVector128ToVector512(UInt32*)

__m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr)

VBROADCASTI32x4 zmm1 {k1}{z}, m128

BroadcastVector256ToVector512(Double*)

__m512d _mm512_broadcast_f64x4 (__m256d const * mem_addr)

VBROADCASTF64x4 zmm1 {k1}{z}, m256

BroadcastVector256ToVector512(Int64*)

__m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr)

VBROADCASTI64x4 zmm1 {k1}{z}, m256

BroadcastVector256ToVector512(UInt64*)

__m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr)

VBROADCASTI64x4 zmm1 {k1}{z}, m256

Compare(Vector512<Double>, Vector512<Double>, FloatComparisonMode)

__m512d _mm512_cmp_pd (__m512d a, __m512d b, const int imm8)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8

Compare(Vector512<Single>, Vector512<Single>, FloatComparisonMode)

__m512 _mm512_cmp_ps (__m512 a, __m512 b, const int imm8)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8

CompareEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpeq_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(0) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareEqual(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmpeq_epi32 (__m512i a, __m512i b)

VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst

CompareEqual(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmpeq_epi64 (__m512i a, __m512i b)

VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst

CompareEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpeq_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(0) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareEqual(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmpeq_epu32 (__m512i a, __m512i b)

VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst

CompareEqual(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmpeq_epu64 (__m512i a, __m512i b)

VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst

CompareGreaterThan(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpgt_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(14) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareGreaterThan(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmpgt_epi32 (__m512i a, __m512i b)

VPCMPGTD k1 {k2}, zmm2, zmm3/m512/m32bcst

CompareGreaterThan(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmpgt_epi64 (__m512i a, __m512i b)

VPCMPGTQ k1 {k2}, zmm2, zmm3/m512/m64bcst

CompareGreaterThan(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpgt_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(14) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareGreaterThan(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmpgt_epu32 (__m512i a, __m512i b)

VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(6)

CompareGreaterThan(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmpgt_epu64 (__m512i a, __m512i b)

VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(6)

CompareGreaterThanOrEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpge_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(13) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareGreaterThanOrEqual(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmpge_epi32 (__m512i a, __m512i b)

VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmpge_epi64 (__m512i a, __m512i b)

VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpge_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(13) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareGreaterThanOrEqual(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmpge_epu32 (__m512i a, __m512i b)

VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmpge_epu64 (__m512i a, __m512i b)

VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)

CompareLessThan(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmplt_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(1) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareLessThan(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmplt_epi32 (__m512i a, __m512i b)

VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)

CompareLessThan(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmplt_epi64 (__m512i a, __m512i b)

VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)

CompareLessThan(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmplt_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(1) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareLessThan(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmplt_epu32 (__m512i a, __m512i b)

VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)

CompareLessThan(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmplt_epu64 (__m512i a, __m512i b)

VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)

CompareLessThanOrEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmple_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(2) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareLessThanOrEqual(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmple_epi32 (__m512i a, __m512i b)

VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmple_epi64 (__m512i a, __m512i b)

VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmple_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(2) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareLessThanOrEqual(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmple_epu32 (__m512i a, __m512i b)

VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmple_epu64 (__m512i a, __m512i b)

VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)

CompareNotEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpneq_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(4) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotEqual(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_cmpne_epi32 (__m512i a, __m512i b)

VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)

CompareNotEqual(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_cmpne_epi64 (__m512i a, __m512i b)

VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)

CompareNotEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpneq_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(4) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotEqual(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_cmpne_epu32 (__m512i a, __m512i b)

VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)

CompareNotEqual(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_cmpne_epu64 (__m512i a, __m512i b)

VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)

CompareNotGreaterThan(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpngt_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(10) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotGreaterThan(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpngt_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(10) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotGreaterThanOrEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpnge_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(9) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotGreaterThanOrEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpnge_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(9) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotLessThan(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpnlt_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(5) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotLessThan(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpnlt_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(5) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotLessThanOrEqual(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpnle_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(6) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareNotLessThanOrEqual(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpnle_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(6) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareOrdered(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpord_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(7) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareOrdered(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpord_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(7) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareUnordered(Vector512<Double>, Vector512<Double>)

__m512d _mm512_cmpunord_pd (__m512d a, __m512d b)

VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(3) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

CompareUnordered(Vector512<Single>, Vector512<Single>)

__m512 _mm512_cmpunord_ps (__m512 a, __m512 b)

VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(3) A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para fins de integridade.

ConvertScalarToVector128Double(Vector128<Double>, UInt32)

__m128d _mm_cvtsi32_sd (__m128d a, int b)

VCVTUSI2SD xmm1, xmm2, r/m32

ConvertScalarToVector128Single(Vector128<Single>, Int32, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertScalarToVector128Single(Vector128<Single>, UInt32)

__m128 _mm_cvtsi32_ss (__m128 a, int b)

VCVTUSI2SS xmm1, xmm2, r/m32

ConvertScalarToVector128Single(Vector128<Single>, UInt32, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertScalarToVector128Single(Vector128<Single>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToInt32(Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToInt32(Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToUInt32(Vector128<Double>)

unsigned int _mm_cvtsd_u32 (__m128d a)

VCVTSD2USI r32, xmm1/m64{er}

ConvertToUInt32(Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToUInt32(Vector128<Single>)

unsigned int _mm_cvtss_u32 (__m128 a)

VCVTSS2USI r32, xmm1/m32{er}

ConvertToUInt32(Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToUInt32WithTruncation(Vector128<Double>)

unsigned int _mm_cvttsd_u32 (__m128d a)

VCVTTSD2USI r32, xmm1/m64{er}

ConvertToUInt32WithTruncation(Vector128<Single>)

unsigned int _mm_cvttss_u32 (__m128 a)

VCVTTSS2USI r32, xmm1/m32{er}

ConvertToVector128Byte(Vector512<Int32>)

__m128i _mm512_cvtepi32_epi8 (__m512i a)

VPMOVDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Byte(Vector512<Int64>)

__m128i _mm512_cvtepi64_epi8 (__m512i a)

VPMOVQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Byte(Vector512<UInt32>)

__m128i _mm512_cvtepi32_epi8 (__m512i a)

VPMOVDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Byte(Vector512<UInt64>)

__m128i _mm512_cvtepi64_epi8 (__m512i a)

VPMOVQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128ByteWithSaturation(Vector512<UInt32>)

__m128i _mm512_cvtusepi32_epi8 (__m512i a)

VPMOVUSDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128ByteWithSaturation(Vector512<UInt64>)

__m128i _mm512_cvtusepi64_epi8 (__m512i a)

VPMOVUSQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector512<Int64>)

__m128i _mm512_cvtepi64_epi16 (__m512i a)

VPMOVQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector512<UInt64>)

__m128i _mm512_cvtepi64_epi16 (__m512i a)

VPMOVQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector512<Int64>)

__m128i _mm512_cvtsepi64_epi16 (__m512i a)

VPMOVSQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByte(Vector512<Int32>)

__m128i _mm512_cvtepi32_epi8 (__m512i a)

VPMOVDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByte(Vector512<Int64>)

__m128i _mm512_cvtepi64_epi8 (__m512i a)

VPMOVQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector512<UInt32>)

__m128i _mm512_cvtepi32_epi8 (__m512i a)

VPMOVDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByte(Vector512<UInt64>)

__m128i _mm512_cvtepi64_epi8 (__m512i a)

VPMOVQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector512<Int32>)

__m128i _mm512_cvtsepi32_epi8 (__m512i a)

VPMOVSDB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector512<Int64>)

__m128i _mm512_cvtsepi64_epi8 (__m512i a)

VPMOVSQB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128UInt16(Vector512<Int64>)

__m128i _mm512_cvtepi64_epi16 (__m512i a)

VPMOVQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128UInt16(Vector512<UInt64>)

__m128i _mm512_cvtepi64_epi16 (__m512i a)

VPMOVQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128UInt16WithSaturation(Vector512<UInt64>)

__m128i _mm512_cvtusepi64_epi16 (__m512i a)

VPMOVUSQW xmm1/m128 {k1}{z}, zmm2

ConvertToVector256Int16(Vector512<Int32>)

__m256i _mm512_cvtepi32_epi16 (__m512i a)

VPMOVDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int16(Vector512<UInt32>)

__m256i _mm512_cvtepi32_epi16 (__m512i a)

VPMOVDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int16WithSaturation(Vector512<Int32>)

__m256i _mm512_cvtsepi32_epi16 (__m512i a)

VPMOVSDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int32(Vector512<Double>)

__m256i _mm512_cvtpd_epi32 (__m512d a)

VCVTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}

ConvertToVector256Int32(Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector256Int32(Vector512<Int64>)

__m256i _mm512_cvtepi64_epi32 (__m512i a)

VPMOVQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int32(Vector512<UInt64>)

__m256i _mm512_cvtepi64_epi32 (__m512i a)

VPMOVQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int32WithSaturation(Vector512<Int64>)

__m256i _mm512_cvtsepi64_epi32 (__m512i a)

VPMOVSQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256Int32WithTruncation(Vector512<Double>)

__m256i _mm512_cvttpd_epi32 (__m512d a)

VCVTTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{sae}

ConvertToVector256Single(Vector512<Double>)

__m256 _mm512_cvtpd_ps (__m512d a)

VCVTPD2PS ymm1, zmm2/m512 VCVTPD2PS ymm1 {k1}{z}, zmm2/m512/m64bcst{er}

ConvertToVector256Single(Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector256UInt16(Vector512<Int32>)

__m256i _mm512_cvtepi32_epi16 (__m512i a)

VPMOVDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt16(Vector512<UInt32>)

__m256i _mm512_cvtepi32_epi16 (__m512i a)

VPMOVDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt16WithSaturation(Vector512<UInt32>)

__m256i _mm512_cvtusepi32_epi16 (__m512i a)

VPMOVUSDW ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt32(Vector512<Double>)

__m256i _mm512_cvtpd_epu32 (__m512d a)

VCVTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}

ConvertToVector256UInt32(Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector256UInt32(Vector512<Int64>)

__m256i _mm512_cvtepi64_epi32 (__m512i a)

VPMOVQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt32(Vector512<UInt64>)

__m256i _mm512_cvtepi64_epi32 (__m512i a)

VPMOVQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt32WithSaturation(Vector512<UInt64>)

__m256i _mm512_cvtusepi64_epi32 (__m512i a)

VPMOVUSQD ymm1/m256 {k1}{z}, zmm2

ConvertToVector256UInt32WithTruncation(Vector512<Double>)

__m256i _mm512_cvttpd_epu32 (__m512d a)

VCVTTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}

ConvertToVector512Double(Vector256<Int32>)

__m512d _mm512_cvtepi32_pd (__m256i a)

VCVTDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector512Double(Vector256<Single>)

__m512d _mm512_cvtps_pd (__m256 a)

VCVTPS2PD zmm1 {k1}{z}, ymm2/m256/m32bcst{sae}

ConvertToVector512Double(Vector256<UInt32>)

__m512d _mm512_cvtepu32_pd (__m256i a)

VCVTUDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector512Int32(Vector128<Byte>)

__m512i _mm512_cvtepu8_epi32 (__m128i a)

VPMOVZXBD zmm1 {k1}{z}, xmm2/m128

ConvertToVector512Int32(Vector128<SByte>)

__m512i _mm512_cvtepi8_epi32 (__m128i a)

VPMOVSXBD zmm1 {k1}{z}, xmm2/m128

ConvertToVector512Int32(Vector256<Int16>)

__m512i _mm512_cvtepi16_epi32 (__m128i a)

VPMOVSXWD zmm1 {k1}{z}, ymm2/m256

ConvertToVector512Int32(Vector256<UInt16>)

__m512i _mm512_cvtepu16_epi32 (__m128i a)

VPMOVZXWD zmm1 {k1}{z}, ymm2/m256

ConvertToVector512Int32(Vector512<Single>)

__m512i _mm512_cvtps_epi32 (__m512 a)

VCVTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

ConvertToVector512Int32(Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector512Int32WithTruncation(Vector512<Single>)

__m512i _mm512_cvttps_epi32 (__m512 a)

VCVTTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}

ConvertToVector512Int64(Vector128<Byte>)

__m512i _mm512_cvtepu8_epi64 (__m128i a)

VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64

ConvertToVector512Int64(Vector128<Int16>)

__m512i _mm512_cvtepi16_epi64 (__m128i a)

VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128

ConvertToVector512Int64(Vector128<SByte>)

__m512i _mm512_cvtepi8_epi64 (__m128i a)

VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64

ConvertToVector512Int64(Vector128<UInt16>)

__m512i _mm512_cvtepu16_epi64 (__m128i a)

VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128

ConvertToVector512Int64(Vector256<Int32>)

__m512i _mm512_cvtepi32_epi64 (__m128i a)

VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256

ConvertToVector512Int64(Vector256<UInt32>)

__m512i _mm512_cvtepu32_epi64 (__m128i a)

VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256

ConvertToVector512Single(Vector512<Int32>)

__m512 _mm512_cvtepi32_ps (__m512i a)

VCVTDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

ConvertToVector512Single(Vector512<Int32>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector512Single(Vector512<UInt32>)

__m512 _mm512_cvtepu32_ps (__m512i a)

VCVTUDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

ConvertToVector512Single(Vector512<UInt32>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector512UInt32(Vector128<Byte>)

__m512i _mm512_cvtepu8_epi32 (__m128i a)

VPMOVZXBD zmm1 {k1}{z}, xmm2/m128

ConvertToVector512UInt32(Vector128<SByte>)

__m512i _mm512_cvtepi8_epi32 (__m128i a)

VPMOVSXBD zmm1 {k1}{z}, xmm2/m128

ConvertToVector512UInt32(Vector256<Int16>)

__m512i _mm512_cvtepi16_epi32 (__m128i a)

VPMOVSXWD zmm1 {k1}{z}, ymm2/m256

ConvertToVector512UInt32(Vector256<UInt16>)

__m512i _mm512_cvtepu16_epi32 (__m128i a)

VPMOVZXWD zmm1 {k1}{z}, ymm2/m256

ConvertToVector512UInt32(Vector512<Single>)

__m512i _mm512_cvtps_epu32 (__m512 a)

VCVTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

ConvertToVector512UInt32(Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ConvertToVector512UInt32WithTruncation(Vector512<Single>)

__m512i _mm512_cvttps_epu32 (__m512 a)

VCVTTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

ConvertToVector512UInt64(Vector128<Byte>)

__m512i _mm512_cvtepu8_epi64 (__m128i a)

VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64

ConvertToVector512UInt64(Vector128<Int16>)

__m512i _mm512_cvtepi16_epi64 (__m128i a)

VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128

ConvertToVector512UInt64(Vector128<SByte>)

__m512i _mm512_cvtepi8_epi64 (__m128i a)

VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64

ConvertToVector512UInt64(Vector128<UInt16>)

__m512i _mm512_cvtepu16_epi64 (__m128i a)

VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128

ConvertToVector512UInt64(Vector256<Int32>)

__m512i _mm512_cvtepi32_epi64 (__m128i a)

VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256

ConvertToVector512UInt64(Vector256<UInt32>)

__m512i _mm512_cvtepu32_epi64 (__m128i a)

VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256

Divide(Vector512<Double>, Vector512<Double>)

__m512d _mm512_div_pd (__m512d a, __m512d b)

VDIVPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}

Divide(Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Divide(Vector512<Single>, Vector512<Single>)

__m512 _mm512_div_ps (__m512 a, __m512 b)

VDIVPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}

Divide(Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

DivideScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

DivideScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

DuplicateEvenIndexed(Vector512<Double>)

__m512d _mm512_movedup_pd (__m512d a)

VMOVDDUP zmm1 {k1}{z}, zmm2/m512

DuplicateEvenIndexed(Vector512<Single>)

__m512 _mm512_moveldup_ps (__m512 a)

VMOVSLDUP zmm1 {k1}{z}, zmm2/m512

DuplicateOddIndexed(Vector512<Single>)

__m512 _mm512_movehdup_ps (__m512 a)

VMOVSHDUP zmm1 {k1}{z}, zmm2/m512

Equals(Object)

Determina se o objeto especificado é igual ao objeto atual.

(Herdado de Object)
ExtractVector128(Vector512<Byte>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<Double>, Byte)

__m128d _mm512_extractf128_pd (__m512d a, const int imm8)

VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<Int16>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<Int32>, Byte)

__m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<Int64>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<SByte>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<Single>, Byte)

__m128 _mm512_extractf32x4_ps (__m512 a, const int imm8)

VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<UInt16>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<UInt32>, Byte)

__m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector128(Vector512<UInt64>, Byte)

__m128i _mm512_extracti128_si512 (__m512i a, const int imm8)

VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Byte>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Double>, Byte)

__m256d _mm512_extractf64x4_pd (__m512d a, const int imm8)

VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Int16>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Int32>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Int64>, Byte)

__m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<SByte>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<Single>, Byte)

__m256 _mm512_extractf256_ps (__m512 a, const int imm8)

VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<UInt16>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<UInt32>, Byte)

__m256i _mm512_extracti256_si512 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

ExtractVector256(Vector512<UInt64>, Byte)

__m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8)

VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8

Fixup(Vector512<Double>, Vector512<Double>, Vector512<Int64>, Byte)

__m512d _mm512_fixupimm_pd(__m512d a, __m512d b, __m512i tbl, int imm); VFIXUPIMMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8

Fixup(Vector512<Single>, Vector512<Single>, Vector512<Int32>, Byte)

__m512 _mm512_fixupimm_ps(__m512 a, __m512 b, __m512i tbl, int imm); VFIXUPIMMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8

FixupScalar(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte)

__m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

FixupScalar(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte)

__m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

FusedMultiplyAdd(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fmadd_pd (__m512d a, __m512d b, __m512d c)

VFMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

FusedMultiplyAdd(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAdd(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fmadd_ps (__m512 a, __m512 b, __m512 c)

VFMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplyAdd(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddNegated(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fnmadd_pd (__m512d a, __m512d b, __m512d c)

VFNMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

FusedMultiplyAddNegated(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddNegated(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fnmadd_ps (__m512 a, __m512 b, __m512 c)

VFNMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplyAddNegated(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddNegatedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddNegatedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddSubtract(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fmaddsub_pd (__m512d a, __m512d b, __m512d c)

VFMADDSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplyAddSubtract(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplyAddSubtract(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fmaddsub_ps (__m512 a, __m512 b, __m512 c)

VFMADDSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplyAddSubtract(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtract(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fmsub_pd (__m512d a, __m512d b, __m512d c)

VFMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

FusedMultiplySubtract(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtract(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fmsub_ps (__m512 a, __m512 b, __m512 c)

VFMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplySubtract(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractAdd(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fmsubadd_pd (__m512d a, __m512d b, __m512d c)

VFMSUBADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

FusedMultiplySubtractAdd(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractAdd(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fmsubadd_ps (__m512 a, __m512 b, __m512 c)

VFMSUBADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplySubtractAdd(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractNegated(Vector512<Double>, Vector512<Double>, Vector512<Double>)

__m512d _mm512_fnmsub_pd (__m512d a, __m512d b, __m512d c)

VFNMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

FusedMultiplySubtractNegated(Vector512<Double>, Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractNegated(Vector512<Single>, Vector512<Single>, Vector512<Single>)

__m512 _mm512_fnmsub_ps (__m512 a, __m512 b, __m512 c)

VFNMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

FusedMultiplySubtractNegated(Vector512<Single>, Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractNegatedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractNegatedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

FusedMultiplySubtractScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

GetExponent(Vector512<Double>)

__m512d _mm512_getexp_pd (__m512d a)

VGETEXPPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}

GetExponent(Vector512<Single>)

__m512 _mm512_getexp_ps (__m512 a)

VGETEXPPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}

GetExponentScalar(Vector128<Double>)

__m128d _mm_getexp_sd (__m128d a)

VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

GetExponentScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_getexp_sd (__m128d a, __m128d b)

VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

GetExponentScalar(Vector128<Single>)

__m128 _mm_getexp_ss (__m128 a)

VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

GetExponentScalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_getexp_ss (__m128 a, __m128 b)

VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

GetHashCode()

Serve como a função de hash padrão.

(Herdado de Object)
GetMantissa(Vector512<Double>, Byte)

__m512d _mm512_getmant_pd (__m512d a)

VGETMANTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}

GetMantissa(Vector512<Single>, Byte)

__m512 _mm512_getmant_ps (__m512 a)

VGETMANTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}

GetMantissaScalar(Vector128<Double>, Byte)

__m128d _mm_getmant_sd (__m128d a)

VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

GetMantissaScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_getmant_sd (__m128d a, __m128d b)

VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

GetMantissaScalar(Vector128<Single>, Byte)

__m128 _mm_getmant_ss (__m128 a)

VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

GetMantissaScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_getmant_ss (__m128 a, __m128 b)

VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

GetType()

Obtém o Type da instância atual.

(Herdado de Object)
InsertVector128(Vector512<Byte>, Vector128<Byte>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<Double>, Vector128<Double>, Byte)

__m512d _mm512_insertf128_pd (__m512d a, __m128d b, int imm8)

VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<Int16>, Vector128<Int16>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<Int32>, Vector128<Int32>, Byte)

__m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<Int64>, Vector128<Int64>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<SByte>, Vector128<SByte>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<Single>, Vector128<Single>, Byte)

__m512 _mm512_insertf32x4_ps (__m512 a, __m128 b, int imm8)

VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<UInt16>, Vector128<UInt16>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<UInt32>, Vector128<UInt32>, Byte)

__m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector128(Vector512<UInt64>, Vector128<UInt64>, Byte)

__m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)

VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8

InsertVector256(Vector512<Byte>, Vector256<Byte>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<Double>, Vector256<Double>, Byte)

__m512d _mm512_insertf64x4_pd (__m512d a, __m256d b, int imm8)

VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<Int16>, Vector256<Int16>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<Int32>, Vector256<Int32>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<Int64>, Vector256<Int64>, Byte)

__m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<SByte>, Vector256<SByte>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<Single>, Vector256<Single>, Byte)

__m512 _mm512_insertf256_ps (__m512 a, __m256 b, int imm8)

VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<UInt16>, Vector256<UInt16>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<UInt32>, Vector256<UInt32>, Byte)

__m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

InsertVector256(Vector512<UInt64>, Vector256<UInt64>, Byte)

__m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8)

VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8

LoadAlignedVector512(Byte*)

__m512i _mm512_load_si512 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(Double*)

__m512d _mm512_load_pd (const duplo * mem_addr)

VMOVAPD zmm1 {k1}{z}, m512

LoadAlignedVector512(Int16*)

__m512i _mm512_load_si512 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(Int32*)

__m512i _mm512_load_epi32 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(Int64*)

__m512i _mm512_load_epi64 (__m512i const * mem_addr)

VMOVDQA64 zmm1 {k1}{z}, m512

LoadAlignedVector512(SByte*)

__m512i _mm512_load_si512 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(Single*)

__m512 _mm512_load_ps (float const * mem_addr)

VMOVAPS zmm1 {k1}{z}, m512

LoadAlignedVector512(UInt16*)

__m512i _mm512_load_si512 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(UInt32*)

__m512i _mm512_load_epi32 (__m512i const * mem_addr)

VMOVDQA32 zmm1 {k1}{z}, m512

LoadAlignedVector512(UInt64*)

__m512i _mm512_load_epi64 (__m512i const * mem_addr)

VMOVDQA64 zmm1 {k1}{z}, m512

LoadAlignedVector512NonTemporal(Byte*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(Int16*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(Int32*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(Int64*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(SByte*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(UInt16*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(UInt32*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadAlignedVector512NonTemporal(UInt64*)

__m512i _mm512_stream_load_si512 (__m512i const* mem_addr)

VMOVNTDQA zmm1, m512

LoadVector512(Byte*)

__m512i _mm512_loadu_si512 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(Double*)

__m512d _mm512_loadu_pd (const duplo * mem_addr)

VMOVUPD zmm1 {k1}{z}, m512

LoadVector512(Int16*)

__m512i _mm512_loadu_si512 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(Int32*)

__m512i _mm512_loadu_epi32 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(Int64*)

__m512i _mm512_loadu_epi64 (__m512i const * mem_addr)

VMOVDQU64 zmm1 {k1}{z}, m512

LoadVector512(SByte*)

__m512i _mm512_loadu_si512 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(Single*)

__m512 _mm512_loadu_ps (float const * mem_addr)

VMOVUPS zmm1 {k1}{z}, m512

LoadVector512(UInt16*)

__m512i _mm512_loadu_si512 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(UInt32*)

__m512i _mm512_loadu_epi32 (__m512i const * mem_addr)

VMOVDQU32 zmm1 {k1}{z}, m512

LoadVector512(UInt64*)

__m512i _mm512_loadu_epi64 (__m512i const * mem_addr)

VMOVDQU64 zmm1 {k1}{z}, m512

Max(Vector512<Double>, Vector512<Double>)

__m512d _mm512_max_pd (__m512d a, __m512d b)

VMAXPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}

Max(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_max_epi32 (__m512i a, __m512i b)

VPMAXSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Max(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_max_epi64 (__m512i a, __m512i b)

VPMAXSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Max(Vector512<Single>, Vector512<Single>)

__m512 _mm512_max_ps (__m512 a, __m512 b)

VMAXPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}

Max(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_max_epu32 (__m512i a, __m512i b)

VPMAXUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Max(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_max_epu64 (__m512i a, __m512i b)

VPMAXUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

MemberwiseClone()

Cria uma cópia superficial do Object atual.

(Herdado de Object)
Min(Vector512<Double>, Vector512<Double>)

__m512d _mm512_min_pd (__m512d a, __m512d b)

VMINPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}

Min(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_min_epi32 (__m512i a, __m512i b)

VPMINSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Min(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_min_epi64 (__m512i a, __m512i b)

VPMINSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Min(Vector512<Single>, Vector512<Single>)

__m512 _mm512_min_ps (__m512 a, __m512 b)

VMINPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}

Min(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_min_epu32 (__m512i a, __m512i b)

VPMINUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Min(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_min_epu64 (__m512i a, __m512i b)

VPMINUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Multiply(Vector512<Double>, Vector512<Double>)

__m512d _mm512_mul_pd (__m512d a, __m512d b)

VMULPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}

Multiply(Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Multiply(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_mul_epi32 (__m512i a, __m512i b)

VPMULDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Multiply(Vector512<Single>, Vector512<Single>)

__m512 _mm512_mul_ps (__m512 a, __m512 b)

VMULPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}

Multiply(Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Multiply(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_mul_epu32 (__m512i a, __m512i b)

VPMULUDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

MultiplyLow(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_mullo_epi32 (__m512i a, __m512i b)

VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

MultiplyLow(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_mullo_epi32 (__m512i a, __m512i b)

VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

MultiplyScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

MultiplyScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Or(Vector512<Byte>, Vector512<Byte>)

__m512i _mm512_or_si512 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<Int16>, Vector512<Int16>)

__m512i _mm512_or_si512 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_or_epi32 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_or_epi64 (__m512i a, __m512i b)

VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Or(Vector512<SByte>, Vector512<SByte>)

__m512i _mm512_or_si512 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<UInt16>, Vector512<UInt16>)

__m512i _mm512_or_si512 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_or_epi32 (__m512i a, __m512i b)

VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Or(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_or_epi64 (__m512i a, __m512i b)

VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Permute2x64(Vector512<Double>, Byte)

__m512d _mm512_permute_pd (__m512d a, int imm8)

VPERMILPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

Permute4x32(Vector512<Single>, Byte)

__m512 _mm512_permute_ps (__m512 a, int imm8)

VPERMILPS zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

Permute4x64(Vector512<Double>, Byte)

__m512d _mm512_permute4x64_pd (__m512d a, const int imm8)

VPERMPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

Permute4x64(Vector512<Int64>, Byte)

__m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8)

VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

Permute4x64(Vector512<UInt64>, Byte)

__m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8)

VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

PermuteVar16x32(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b)

VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar16x32(Vector512<Single>, Vector512<Int32>)

__m512 _mm512_permutevar16x32_ps (__m512 a, __m512i b)

VPERMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar16x32(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b)

VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar16x32x2(Vector512<Int32>, Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b)

VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar16x32x2(Vector512<Single>, Vector512<Int32>, Vector512<Single>)

__m512 _mm512_permutex2var_ps (__m512 a, __m512i idx, __m512i b)

VPERMI2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar16x32x2(Vector512<UInt32>, Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b)

VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar2x64(Vector512<Double>, Vector512<Int64>)

__m512d _mm512_permutevar_pd (__m512d a, __m512i b)

VPERMILPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar4x32(Vector512<Single>, Vector512<Int32>)

__m512 _mm512_permutevar_ps (__m512 a, __m512i b)

VPERMILPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

PermuteVar8x64(Vector512<Double>, Vector512<Int64>)

__m512d _mm512_permutevar8x64_pd (__m512d a, __m512i b)

VPERMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar8x64(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b)

VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar8x64(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b)

VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar8x64x2(Vector512<Double>, Vector512<Int64>, Vector512<Double>)

__m512d _mm512_permutex2var_pd (__m512d a, __m512i idx, __m512i b)

VPERMI2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar8x64x2(Vector512<Int64>, Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b)

VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

PermuteVar8x64x2(Vector512<UInt64>, Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b)

VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Reciprocal14(Vector512<Double>)

__m512d _mm512_rcp14_pd (__m512d a, __m512d b)

VRCP14PD zmm1 {k1}{z}, zmm2/m512/m64bcst

Reciprocal14(Vector512<Single>)

__m512 _mm512_rcp14_ps (__m512 a, __m512 b)

VRCP14PS zmm1 {k1}{z}, zmm2/m512/m32bcst

Reciprocal14Scalar(Vector128<Double>)

__m128d _mm_rcp14_sd (__m128d a)

VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64

Reciprocal14Scalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_rcp14_sd (__m128d a, __m128d b)

VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

Reciprocal14Scalar(Vector128<Single>)

__m128 _mm_rcp14_ss (__m128 a)

VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32

Reciprocal14Scalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_rcp14_ss (__m128 a, __m128 b)

VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

ReciprocalSqrt14(Vector512<Double>)

__m512d _mm512_rsqrt14_pd (__m512d a, __m512d b)

VRSQRT14PD zmm1 {k1}{z}, zmm2/m512/m64bcst

ReciprocalSqrt14(Vector512<Single>)

__m512 _mm512_rsqrt14_ps (__m512 a, __m512 b)

VRSQRT14PS zmm1 {k1}{z}, zmm2/m512/m32bcst

ReciprocalSqrt14Scalar(Vector128<Double>)

__m128d _mm_rsqrt14_sd (__m128d a)

VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64

ReciprocalSqrt14Scalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_rsqrt14_sd (__m128d a, __m128d b)

VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

ReciprocalSqrt14Scalar(Vector128<Single>)

__m128 _mm_rsqrt14_ss (__m128 a)

VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32

ReciprocalSqrt14Scalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_rsqrt14_ss (__m128 a, __m128 b)

VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

RotateLeft(Vector512<Int32>, Byte)

__m512i _mm512_rol_epi32 (__m512i a, int imm8)

VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

RotateLeft(Vector512<Int64>, Byte)

__m512i _mm512_rol_epi64 (__m512i a, int imm8)

VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

RotateLeft(Vector512<UInt32>, Byte)

__m512i _mm512_rol_epi32 (__m512i a, int imm8)

VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

RotateLeft(Vector512<UInt64>, Byte)

__m512i _mm512_rol_epi64 (__m512i a, int imm8)

VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

RotateLeftVariable(Vector512<Int32>, Vector512<UInt32>)

__m512i _mm512_rolv_epi32 (__m512i a, __m512i b)

VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

RotateLeftVariable(Vector512<Int64>, Vector512<UInt64>)

__m512i _mm512_rolv_epi64 (__m512i a, __m512i b)

VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

RotateLeftVariable(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_rolv_epi32 (__m512i a, __m512i b)

VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

RotateLeftVariable(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_rolv_epi64 (__m512i a, __m512i b)

VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

RotateRight(Vector512<Int32>, Byte)

__m512i _mm512_ror_epi32 (__m512i a, int imm8)

VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

RotateRight(Vector512<Int64>, Byte)

__m512i _mm512_ror_epi64 (__m512i a, int imm8)

VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

RotateRight(Vector512<UInt32>, Byte)

__m512i _mm512_ror_epi32 (__m512i a, int imm8)

VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

RotateRight(Vector512<UInt64>, Byte)

__m512i _mm512_ror_epi64 (__m512i a, int imm8)

VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8

RotateRightVariable(Vector512<Int32>, Vector512<UInt32>)

__m512i _mm512_rorv_epi32 (__m512i a, __m512i b)

VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

RotateRightVariable(Vector512<Int64>, Vector512<UInt64>)

__m512i _mm512_rorv_epi64 (__m512i a, __m512i b)

VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

RotateRightVariable(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_rorv_epi32 (__m512i a, __m512i b)

VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

RotateRightVariable(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_rorv_epi64 (__m512i a, __m512i b)

VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

RoundScale(Vector512<Double>, Byte)

__m512d _mm512_roundscale_pd (__m512d a, int imm)

VRNDSCALEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8

RoundScale(Vector512<Single>, Byte)

__m512 _mm512_roundscale_ps (__m512 a, int imm)

VRNDSCALEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8

RoundScaleScalar(Vector128<Double>, Byte)

__m128d _mm_roundscale_sd (__m128d a, int imm)

VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

RoundScaleScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm)

VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

RoundScaleScalar(Vector128<Single>, Byte)

__m128 _mm_roundscale_ss (__m128 a, int imm)

VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

RoundScaleScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm)

VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs de escalares.

Scale(Vector512<Double>, Vector512<Double>)

__m512d _mm512_scalef_pd (__m512d a, __m512d b)

VSCALEFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}

Scale(Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Scale(Vector512<Single>, Vector512<Single>)

__m512 _mm512_scalef_ps (__m512 a, __m512 b)

VSCALEFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}

Scale(Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ScaleScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_scalef_sd (__m128d a, __m128d b)

VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er}

ScaleScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ScaleScalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_scalef_ss (__m128 a, __m128 b)

VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er}

ScaleScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

ShiftLeftLogical(Vector512<Int32>, Byte)

__m512i _mm512_slli_epi32 (__m512i a, int imm8)

VPSLLD zmm1 {k1}{z}, zmm2, imm8

ShiftLeftLogical(Vector512<Int32>, Vector128<Int32>)

__m512i _mm512_sll_epi32 (__m512i a, contagem de __m128i)

VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftLeftLogical(Vector512<Int64>, Byte)

__m512i _mm512_slli_epi64 (__m512i a, int imm8)

VPSLLQ zmm1 {k1}{z}, zmm2, imm8

ShiftLeftLogical(Vector512<Int64>, Vector128<Int64>)

__m512i _mm512_sll_epi64 (__m512i a, contagem de __m128i)

VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftLeftLogical(Vector512<UInt32>, Byte)

__m512i _mm512_slli_epi32 (__m512i a, int imm8)

VPSLLD zmm1 {k1}{z}, zmm2, imm8

ShiftLeftLogical(Vector512<UInt32>, Vector128<UInt32>)

__m512i _mm512_sll_epi32 (__m512i a, contagem de __m128i)

VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftLeftLogical(Vector512<UInt64>, Byte)

__m512i _mm512_slli_epi64 (__m512i a, int imm8)

VPSLLQ zmm1 {k1}{z}, zmm2, imm8

ShiftLeftLogical(Vector512<UInt64>, Vector128<UInt64>)

__m512i _mm512_sll_epi64 (__m512i a, contagem de __m128i)

VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftLeftLogicalVariable(Vector512<Int32>, Vector512<UInt32>)

__m512i _mm512_sllv_epi32 (__m512i a, contagem de __m512i)

VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

ShiftLeftLogicalVariable(Vector512<Int64>, Vector512<UInt64>)

__m512i _mm512_sllv_epi64 (__m512i a, contagem de __m512i)

VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

ShiftLeftLogicalVariable(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_sllv_epi32 (__m512i a, contagem de __m512i)

VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

ShiftLeftLogicalVariable(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_sllv_epi64 (__m512i a, contagem de __m512i)

VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

ShiftRightArithmetic(Vector512<Int32>, Byte)

__m512i _mm512_srai_epi32 (__m512i a, int imm8)

VPSRAD zmm1 {k1}{z}, zmm2, imm8

ShiftRightArithmetic(Vector512<Int32>, Vector128<Int32>)

_mm512_sra_epi32 (__m512i a, contagem de __m128i)

VPSRAD zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightArithmetic(Vector512<Int64>, Byte)

__m512i _mm512_srai_epi64 (__m512i a, int imm8)

VPSRAQ zmm1 {k1}{z}, zmm2, imm8

ShiftRightArithmetic(Vector512<Int64>, Vector128<Int64>)

_mm512_sra_epi64 (__m512i a, contagem de __m128i)

VPSRAQ zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightArithmeticVariable(Vector512<Int32>, Vector512<UInt32>)

__m512i _mm512_srav_epi32 (__m512i a, contagem de __m512i)

VPSRAVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

ShiftRightArithmeticVariable(Vector512<Int64>, Vector512<UInt64>)

__m512i _mm512_srav_epi64 (__m512i a, contagem de __m512i)

VPSRAVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

ShiftRightLogical(Vector512<Int32>, Byte)

__m512i _mm512_srli_epi32 (__m512i a, int imm8)

VPSRLD zmm1 {k1}{z}, zmm2, imm8

ShiftRightLogical(Vector512<Int32>, Vector128<Int32>)

__m512i _mm512_srl_epi32 (__m512i a, contagem de __m128i)

VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightLogical(Vector512<Int64>, Byte)

__m512i _mm512_srli_epi64 (__m512i a, int imm8)

VPSRLQ zmm1 {k1}{z}, zmm2, imm8

ShiftRightLogical(Vector512<Int64>, Vector128<Int64>)

__m512i _mm512_srl_epi64 (__m512i a, contagem de __m128i)

VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightLogical(Vector512<UInt32>, Byte)

__m512i _mm512_srli_epi32 (__m512i a, int imm8)

VPSRLD zmm1 {k1}{z}, zmm2, imm8

ShiftRightLogical(Vector512<UInt32>, Vector128<UInt32>)

__m512i _mm512_srl_epi32 (__m512i a, contagem de __m128i)

VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightLogical(Vector512<UInt64>, Byte)

__m512i _mm512_srli_epi64 (__m512i a, int imm8)

VPSRLQ zmm1 {k1}{z}, zmm2, imm8

ShiftRightLogical(Vector512<UInt64>, Vector128<UInt64>)

__m512i _mm512_srl_epi64 (__m512i a, contagem de __m128i)

VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128

ShiftRightLogicalVariable(Vector512<Int32>, Vector512<UInt32>)

__m512i _mm512_srlv_epi32 (__m512i a, contagem de __m512i)

VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

ShiftRightLogicalVariable(Vector512<Int64>, Vector512<UInt64>)

__m512i _mm512_srlv_epi64 (__m512i a, contagem de __m512i)

VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

ShiftRightLogicalVariable(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_srlv_epi32 (__m512i a, contagem de __m512i)

VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

ShiftRightLogicalVariable(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_srlv_epi64 (__m512i a, contagem de __m512i)

VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Shuffle(Vector512<Double>, Vector512<Double>, Byte)

__m512d _mm512_shuffle_pd (__m512d a, __m512d b, const int imm8)

VSHUFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

Shuffle(Vector512<Int32>, Byte)

__m512i _mm512_shuffle_epi32 (__m512i a, const int imm8)

VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

Shuffle(Vector512<Single>, Vector512<Single>, Byte)

__m512 _mm512_shuffle_ps (__m512 a, __m512 b, const int imm8)

VSHUFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

Shuffle(Vector512<UInt32>, Byte)

__m512i _mm512_shuffle_epi32 (__m512i a, const int imm8)

VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8

Shuffle4x128(Vector512<Double>, Vector512<Double>, Byte)

__m512d _mm512_shuffle_f64x2 (__m512d a, __m512d b, const int imm8)

VSHUFF64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

Shuffle4x128(Vector512<Int32>, Vector512<Int32>, Byte)

__m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8)

VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

Shuffle4x128(Vector512<Int64>, Vector512<Int64>, Byte)

__m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8)

VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

Shuffle4x128(Vector512<Single>, Vector512<Single>, Byte)

__m512 _mm512_shuffle_f32x4 (__m512 a, __m512 b, const int imm8)

VSHUFF32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

Shuffle4x128(Vector512<UInt32>, Vector512<UInt32>, Byte)

__m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8)

VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

Shuffle4x128(Vector512<UInt64>, Vector512<UInt64>, Byte)

__m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8)

VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

Sqrt(Vector512<Double>)

__m512d _mm512_sqrt_pd (__m512d a)

VSQRTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{er}

Sqrt(Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Sqrt(Vector512<Single>)

__m512 _mm512_sqrt_ps (__m512 a)

VSQRTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}

Sqrt(Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

SqrtScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

SqrtScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Store(Byte*, Vector512<Byte>)

void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(Double*, Vector512<Double>)

void _mm512_storeu_pd (double * mem_addr, __m512d a)

VMOVUPD m512 {k1}{z}, zmm1

Store(Int16*, Vector512<Int16>)

void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(Int32*, Vector512<Int32>)

void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(Int64*, Vector512<Int64>)

void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a)

VMOVDQU64 m512 {k1}{z}, zmm1

Store(SByte*, Vector512<SByte>)

void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(Single*, Vector512<Single>)

void _mm512_storeu_ps (float * mem_addr, __m512 a)

VMOVUPS m512 {k1}{z}, zmm1

Store(UInt16*, Vector512<UInt16>)

void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(UInt32*, Vector512<UInt32>)

void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a)

VMOVDQU32 m512 {k1}{z}, zmm1

Store(UInt64*, Vector512<UInt64>)

void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a)

VMOVDQU64 m512 {k1}{z}, zmm1

StoreAligned(Byte*, Vector512<Byte>)

void _mm512_store_si512 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(Double*, Vector512<Double>)

void _mm512_store_pd (double * mem_addr, __m512d a)

VMOVAPD m512 {k1}{z}, zmm1

StoreAligned(Int16*, Vector512<Int16>)

void _mm512_store_si512 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(Int32*, Vector512<Int32>)

void _mm512_store_epi32 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(Int64*, Vector512<Int64>)

void _mm512_store_epi64 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(SByte*, Vector512<SByte>)

void _mm512_store_si512 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(Single*, Vector512<Single>)

void _mm512_store_ps (float * mem_addr, __m512 a)

VMOVAPS m512 {k1}{z}, zmm1

StoreAligned(UInt16*, Vector512<UInt16>)

void _mm512_store_si512 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(UInt32*, Vector512<UInt32>)

void _mm512_store_epi32 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAligned(UInt64*, Vector512<UInt64>)

void _mm512_store_epi64 (__m512i * mem_addr, __m512i a)

VMOVDQA32 m512 {k1}{z}, zmm1

StoreAlignedNonTemporal(Byte*, Vector512<Byte>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(Double*, Vector512<Double>)

void _mm512_stream_pd (double * mem_addr, __m512d a)

VMOVNTPD m512, zmm1

StoreAlignedNonTemporal(Int16*, Vector512<Int16>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(Int32*, Vector512<Int32>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(Int64*, Vector512<Int64>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(SByte*, Vector512<SByte>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(Single*, Vector512<Single>)

void _mm512_stream_ps (float * mem_addr, __m512 a)

VMOVNTPS m512, zmm1

StoreAlignedNonTemporal(UInt16*, Vector512<UInt16>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(UInt32*, Vector512<UInt32>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

StoreAlignedNonTemporal(UInt64*, Vector512<UInt64>)

void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)

VMOVNTDQ m512, zmm1

Subtract(Vector512<Double>, Vector512<Double>)

__m512d _mm512_sub_pd (__m512d a, __m512d b)

VSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}

Subtract(Vector512<Double>, Vector512<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Subtract(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_sub_epi32 (__m512i a, __m512i b)

VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Subtract(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_sub_epi64 (__m512i a, __m512i b)

VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Subtract(Vector512<Single>, Vector512<Single>)

__m512 _mm512_sub_ps (__m512 a, __m512 b)

VSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}

Subtract(Vector512<Single>, Vector512<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

Subtract(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_sub_epi32 (__m512i a, __m512i b)

VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Subtract(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_sub_epi64 (__m512i a, __m512i b)

VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

SubtractScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

SubtractScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

Fornece acesso ao X86 AVX512F instruções de hardware por meio de intrínsecos.

TernaryLogic(Vector512<Byte>, Vector512<Byte>, Vector512<Byte>, Byte)

__m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, byte imm)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<Double>, Vector512<Double>, Vector512<Double>, Byte)

__m512d _mm512_ternarylogic_pd (__m512d a, __m512d b, __m512d c, int imm)

VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<Int16>, Vector512<Int16>, Vector512<Int16>, Byte)

__m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, imm curto)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<Int32>, Vector512<Int32>, Vector512<Int32>, Byte)

__m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

TernaryLogic(Vector512<Int64>, Vector512<Int64>, Vector512<Int64>, Byte)

__m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm)

VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

TernaryLogic(Vector512<SByte>, Vector512<SByte>, Vector512<SByte>, Byte)

__m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, int imm)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<Single>, Vector512<Single>, Vector512<Single>, Byte)

__m512 _mm512_ternarylogic_ps (__m512 a, __m512 b, __m512 c, int imm)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<UInt16>, Vector512<UInt16>, Vector512<UInt16>, Byte)

__m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, imm curto)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 A assinatura nativa acima não existe. Fornecemos essa sobrecarga adicional para consistência com as outras APIs bit a bit.

TernaryLogic(Vector512<UInt32>, Vector512<UInt32>, Vector512<UInt32>, Byte)

__m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm)

VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8

TernaryLogic(Vector512<UInt64>, Vector512<UInt64>, Vector512<UInt64>, Byte)

__m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm)

VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8

ToString()

Retorna uma cadeia de caracteres que representa o objeto atual.

(Herdado de Object)
UnpackHigh(Vector512<Double>, Vector512<Double>)

__m512d _mm512_unpackhi_pd (__m512d a, __m512d b)

VUNPCKHPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

UnpackHigh(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b)

VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackHigh(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b)

VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

UnpackHigh(Vector512<Single>, Vector512<Single>)

__m512 _mm512_unpackhi_ps (__m512 a, __m512 b)

VUNPCKHPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackHigh(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b)

VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackHigh(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b)

VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

UnpackLow(Vector512<Double>, Vector512<Double>)

__m512d _mm512_unpacklo_pd (__m512d a, __m512d b)

VUNPCKLPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

UnpackLow(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b)

VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackLow(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b)

VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

UnpackLow(Vector512<Single>, Vector512<Single>)

__m512 _mm512_unpacklo_ps (__m512 a, __m512 b)

VUNPCKLPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackLow(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b)

VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

UnpackLow(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b)

VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Xor(Vector512<Byte>, Vector512<Byte>)

__m512i _mm512_xor_si512 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<Int16>, Vector512<Int16>)

__m512i _mm512_xor_si512 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<Int32>, Vector512<Int32>)

__m512i _mm512_xor_epi32 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<Int64>, Vector512<Int64>)

__m512i _mm512_xor_epi64 (__m512i a, __m512i b)

VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Xor(Vector512<SByte>, Vector512<SByte>)

__m512i _mm512_xor_si512 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<UInt16>, Vector512<UInt16>)

__m512i _mm512_xor_si512 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<UInt32>, Vector512<UInt32>)

__m512i _mm512_xor_epi32 (__m512i a, __m512i b)

VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst

Xor(Vector512<UInt64>, Vector512<UInt64>)

__m512i _mm512_xor_epi64 (__m512i a, __m512i b)

VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst

Aplica-se a