Avx Klasa

Definicja

Ważne

Ten interfejs API nie jest zgodny ze specyfikacją CLS.

Ta klasa zapewnia dostęp do instrukcji sprzętowych Intel AVX za pośrednictwem funkcji wewnętrznych.

public ref class Avx abstract : System::Runtime::Intrinsics::X86::Sse42
[System.CLSCompliant(false)]
public abstract class Avx : System.Runtime.Intrinsics.X86.Sse42
[<System.CLSCompliant(false)>]
type Avx = class
    inherit Sse42
Public MustInherit Class Avx
Inherits Sse42
Dziedziczenie
Dziedziczenie
Pochodne
Atrybuty

Właściwości

IsSupported

Ta klasa zapewnia dostęp do instrukcji sprzętowych Intel AVX za pośrednictwem funkcji wewnętrznych.

Metody

Add(Vector256<Double>, Vector256<Double>)

__m256d _mm256_add_pd (__m256d a, __m256d b)

VADDPD ymm, ymm, ymm, ymm/m256

Add(Vector256<Single>, Vector256<Single>)

__m256 _mm256_add_ps (__m256 a, __m256 b)

VADDPS ymm, ymm, ymm, ymm/m256

AddSubtract(Vector256<Double>, Vector256<Double>)

__m256d _mm256_addsub_pd (__m256d a, __m256d b)

VADDSUBPD ymm, ymm, ymm, ymm/m256

AddSubtract(Vector256<Single>, Vector256<Single>)

__m256 _mm256_addsub_ps (__m256 a, __m256 b)

VADDSUBPS ymm, ymm, ymm/ m256

And(Vector256<Double>, Vector256<Double>)

__m256d _mm256_and_pd (__m256d a, __m256d b)

VANDPD ymm, ymm, ymm, ymm/m256

And(Vector256<Single>, Vector256<Single>)

__m256 _mm256_and_ps (__m256 a, __m256 b)

VANDPS ymm, ymm, ymm, ymm/m256

AndNot(Vector256<Double>, Vector256<Double>)

__m256d _mm256_andnot_pd (__m256d a, __m256d b)

VANDNPD ymm, ymm, ymm/ m256

AndNot(Vector256<Single>, Vector256<Single>)

__m256 _mm256_andnot_ps (__m256 a, __m256 b)

VANDNPS ymm, ymm, ymm, ymm/m256

Blend(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_blend_pd (__m256d a, __m256d b, const int imm8)

VBLENDPD ymm, ymm, ymm, m256, imm8

Blend(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8)

VBLENDPS ymm, ymm, ymm/ m256, imm8

BlendVariable(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_blendv_pd (__m256d __m256d b, maska __m256d)

VBLENDVPD ymm, ymm, ymm/ m256, ymm

BlendVariable(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_blendv_ps (__m256 __m256 b, maska __m256)

VBLENDVPS ymm, ymm, ymm/m256, ymm

BroadcastScalarToVector128(Single*)

__m128 _mm_broadcast_ss (float const * mem_addr)

VBROADCASTSS xmm, m32

BroadcastScalarToVector256(Double*)

__m256d _mm256_broadcast_sd (podwójny const * mem_addr)

VBROADCASTSD ymm, m64

BroadcastScalarToVector256(Single*)

__m256 _mm256_broadcast_ss (float const * mem_addr)

VBROADCASTSS ymm, m32

BroadcastVector128ToVector256(Double*)

__m256d _mm256_broadcast_pd (__m128d const * mem_addr)

VBROADCASTF128, ymm, m128

BroadcastVector128ToVector256(Single*)

__m256 _mm256_broadcast_ps (__m128 const * mem_addr)

VBROADCASTF128, ymm, m128

Ceiling(Vector256<Double>)

__m256d _mm256_ceil_pd (__m256d a)

VROUNDPD ymm, ymm/m256, imm8(10)

Ceiling(Vector256<Single>)

__m256 _mm256_ceil_ps (__m256 a)

VROUNDPS ymm, ymm/m256, imm8(10)

Compare(Vector128<Double>, Vector128<Double>, FloatComparisonMode)

__m128d _mm_cmp_pd (__m128d a, __m128d b, const int imm8)

VCMPPD xmm, xmm, xmm/m128, imm8

Compare(Vector128<Single>, Vector128<Single>, FloatComparisonMode)

__m128 _mm_cmp_ps (__m128 a, __m128 b, const int imm8)

VCMPPS xmm, xmm, xmm/m128, imm8

Compare(Vector256<Double>, Vector256<Double>, FloatComparisonMode)

__m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8)

VCMPPD ymm, ymm, ymm/ m256, imm8

Compare(Vector256<Single>, Vector256<Single>, FloatComparisonMode)

__m256 _mm256_cmp_ps (__m256 a, __m256 b, const int imm8)

VCMPPS ymm, ymm, ymm/ m256, imm8

CompareEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpeq_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(0)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpeq_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(0)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareGreaterThan(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpgt_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(14)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareGreaterThan(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpgt_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(14)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpge_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(13)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpge_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(13)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareLessThan(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmplt_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(1)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareLessThan(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmplt_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(1)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmple_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(2)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmple_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(2)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareNotEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpneq_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(4)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareNotEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpneq_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(4)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareNotGreaterThan(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpngt_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(10)

Powyższy podpis natywny nie istnieje. Zapewniamy to dodatkowe przeciążenie pod kątem kompletności.

CompareNotGreaterThan(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpngt_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(10)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpnge_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(9)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpnge_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(9)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotLessThan(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpnlt_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(5)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotLessThan(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpnlt_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(5)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpnle_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(6)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareNotLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpnle_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(6)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareOrdered(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpord_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(7)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareOrdered(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpord_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(7)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareScalar(Vector128<Double>, Vector128<Double>, FloatComparisonMode)

__m128d _mm_cmp_sd (__m128d a, __m128d b, const int imm8)

VCMPSS xmm, xmm, xmm/m32, imm8

CompareScalar(Vector128<Single>, Vector128<Single>, FloatComparisonMode)

__m128 _mm_cmp_ss (__m128 a, __m128 b, const int imm8)

VCMPSD xmm, xmm, xmm/m64, imm8

CompareUnordered(Vector256<Double>, Vector256<Double>)

__m256d _mm256_cmpunord_pd (__m256d a, __m256d b) CMPPD ymm, ymm/m256, imm8(3)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

CompareUnordered(Vector256<Single>, Vector256<Single>)

__m256 _mm256_cmpunord_ps (__m256 a, __m256 b) CMPPS ymm, ymm/m256, imm8(3)

Powyższy podpis natywny nie istnieje. Udostępniamy to dodatkowe przeciążenie na potrzeby kompletności.

ConvertToVector128Int32(Vector256<Double>)

__m128i _mm256_cvtpd_epi32 (__m256d a)

VCVTPD2DQ xmm, ymm/m256

ConvertToVector128Int32WithTruncation(Vector256<Double>)

__m128i _mm256_cvttpd_epi32 (__m256d a)

VCVTTPD2DQ xmm, ymm/m256

ConvertToVector128Single(Vector256<Double>)

__m128 _mm256_cvtpd_ps (__m256d a)

VCVTPD2PS xmm, ymm/m256

ConvertToVector256Double(Vector128<Int32>)

__m256d _mm256_cvtepi32_pd (__m128i a)

VCVTDQ2PD ymm, xmm/m128

ConvertToVector256Double(Vector128<Single>)

__m256d _mm256_cvtps_pd (__m128 a)

VCVTPS2PD ymm, xmm/m128

ConvertToVector256Int32(Vector256<Single>)

__m256i _mm256_cvtps_epi32 (__m256 a)

VCVTPS2DQ ymm, ymm/m256

ConvertToVector256Int32WithTruncation(Vector256<Single>)

__m256i _mm256_cvttps_epi32 (__m256 a)

VCVTTPS2DQ ymm, ymm/m256

ConvertToVector256Single(Vector256<Int32>)

__m256 _mm256_cvtepi32_ps (__m256i a)

VCVTDQ2PS ymm, ymm/m256

Divide(Vector256<Double>, Vector256<Double>)

__m256d _mm256_div_pd (__m256d a, __m256d b)

VDIVPD ymm, ymm, ymm/m256

Divide(Vector256<Single>, Vector256<Single>)

__m256 _mm256_div_ps (__m256 a, __m256 b)

VDIVPS ymm, ymm, ymm/m256

DotProduct(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_dp_ps (__m256 a, __m256 b, const int imm8)

VDPPS ymm, ymm, ymm/m256, imm8

DuplicateEvenIndexed(Vector256<Double>)

__m256d _mm256_movedup_pd (__m256d a)

VMOVDDUP ymm, ymm/m256

DuplicateEvenIndexed(Vector256<Single>)

__m256 _mm256_moveldup_ps (__m256 a)

VMOVSLDUP ymm, ymm/m256

DuplicateOddIndexed(Vector256<Single>)

__m256 _mm256_movehdup_ps (__m256 a)

VMOVSHDUP ymm, ymm/m256

Equals(Object)

Określa, czy dany obiekt jest taki sam, jak bieżący obiekt.

(Odziedziczone po Object)
ExtractVector128(Vector256<Byte>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<Double>, Byte)

__m128d _mm256_extractf128_pd (__m256d a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<Int16>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<Int32>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<Int64>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<SByte>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<Single>, Byte)

__m128 _mm256_extractf128_ps (__m256, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<UInt16>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<UInt32>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

ExtractVector128(Vector256<UInt64>, Byte)

__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)

VEXTRACTF128 xmm/m128, ymm, imm8

Floor(Vector256<Double>)

__m256d _mm256_floor_pd (__m256d a)

VROUNDPS ymm, ymm/m256, imm8(9)

Floor(Vector256<Single>)

__m256 _mm256_floor_ps (__m256 a)

VROUNDPS ymm, ymm/m256, imm8(9)

GetHashCode()

Służy jako domyślna funkcja skrótu.

(Odziedziczone po Object)
GetType()

Type Pobiera bieżące wystąpienie.

(Odziedziczone po Object)
HorizontalAdd(Vector256<Double>, Vector256<Double>)

__m256d _mm256_hadd_pd (__m256d a, __m256d b)

VHADDPD ymm, ymm, ymm/m256

HorizontalAdd(Vector256<Single>, Vector256<Single>)

__m256 _mm256_hadd_ps (__m256 a, __m256 b)

VHADDPS ymm, ymm, ymm/m256

HorizontalSubtract(Vector256<Double>, Vector256<Double>)

__m256d _mm256_hsub_pd (__m256d a, __m256d b)

VHSUBPD ymm, ymm, ymm/m256

HorizontalSubtract(Vector256<Single>, Vector256<Single>)

__m256 _mm256_hsub_ps (__m256 a, __m256 b)

VHSUBPS ymm, ymm, ymm/m256

InsertVector128(Vector256<Byte>, Vector128<Byte>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<Double>, Vector128<Double>, Byte)

__m256d _mm256_insertf128_pd (__m256d a, __m128d b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<Int16>, Vector128<Int16>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<Int32>, Vector128<Int32>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<Int64>, Vector128<Int64>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<SByte>, Vector128<SByte>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<Single>, Vector128<Single>, Byte)

__m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<UInt16>, Vector128<UInt16>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<UInt32>, Vector128<UInt32>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

InsertVector128(Vector256<UInt64>, Vector128<UInt64>, Byte)

__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)

VINSERTF128 ymm, ymm, xmm/m128, imm8

LoadAlignedVector256(Byte*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(Double*)

__m256d _mm256_load_pd (podwójne const * mem_addr)

VMOVAPD ymm, ymm/m256

LoadAlignedVector256(Int16*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(Int32*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(Int64*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(SByte*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(Single*)

__m256 _mm256_load_ps (float const * mem_addr)

VMOVAPS ymm, ymm/m256

LoadAlignedVector256(UInt16*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(UInt32*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadAlignedVector256(UInt64*)

__m256i _mm256_load_si256 (__m256i const * mem_addr)

VMOVDQA ymm, m256

LoadDquVector256(Byte*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(Int16*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(Int32*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(Int64*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(SByte*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(UInt16*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(UInt32*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadDquVector256(UInt64*)

__m256i _mm256_lddqu_si256 (__m256i const * mem_addr)

VLDDQU ymm, m256

LoadVector256(Byte*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(Double*)

__m256d _mm256_loadu_pd (podwójny const * mem_addr)

VMOVUPD ymm, ymm/m256

LoadVector256(Int16*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(Int32*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(Int64*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(SByte*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(Single*)

__m256 _mm256_loadu_ps (zmiennoprzecinkowe * mem_addr)

VMOVUPS ymm, ymm/m256

LoadVector256(UInt16*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(UInt32*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

LoadVector256(UInt64*)

__m256i _mm256_loadu_si256 (__m256i const * mem_addr)

VMOVDQU ymm, m256

MaskLoad(Double*, Vector128<Double>)

__m128d _mm_maskload_pd (podwójna const * mem_addr, maska __m128i)

VMASKMOVPD xmm, xmm, m128

MaskLoad(Double*, Vector256<Double>)

__m256d _mm256_maskload_pd (podwójna const * mem_addr, maska __m256i)

VMASKMOVPD ymm, ymm, m256

MaskLoad(Single*, Vector128<Single>)

__m128 _mm_maskload_ps (zmiennoprzecinkowy const * mem_addr, maska __m128i)

VMASKMOVPS xmm, xmm, m128

MaskLoad(Single*, Vector256<Single>)

__m256 _mm256_maskload_ps (zmiennoprzecinkowe const * mem_addr, maska __m256i)

VMASKMOVPS ymm, ymm, m256

MaskStore(Double*, Vector128<Double>, Vector128<Double>)

void _mm_maskstore_pd (podwójne * mem_addr, maska __m128i, __m128d a)

VMASKMOVPD m128, xmm, xmm

MaskStore(Double*, Vector256<Double>, Vector256<Double>)

void _mm256_maskstore_pd (podwójne * mem_addr, maska __m256i, __m256d a)

VMASKMOVPD m256, ymm, ymm

MaskStore(Single*, Vector128<Single>, Vector128<Single>)

void _mm_maskstore_ps (float * mem_addr, maska __m128i, __m128 a)

VMASKMOVPS m128, xmm, xmm

MaskStore(Single*, Vector256<Single>, Vector256<Single>)

void _mm256_maskstore_ps (float * mem_addr, maska __m256i, __m256 a)

VMASKMOVPS m256, ymm, ymm

Max(Vector256<Double>, Vector256<Double>)

__m256d _mm256_max_pd (__m256d a, __m256d b)

VMAXPD ymm, ymm, ymm/ m256

Max(Vector256<Single>, Vector256<Single>)

__m256 _mm256_max_ps (__m256 a, __m256 b)

VMAXPS ymm, ymm, ymm, ymm/m256

MemberwiseClone()

Tworzy płytkią kopię bieżącego Objectelementu .

(Odziedziczone po Object)
Min(Vector256<Double>, Vector256<Double>)

__m256d _mm256_min_pd (__m256d a, __m256d b)

VMINPD ymm, ymm, ymm, ymm/m256

Min(Vector256<Single>, Vector256<Single>)

__m256 _mm256_min_ps (__m256 a, __m256 b)

VMINPS ymm, ymm, ymm, ymm/m256

MoveMask(Vector256<Double>)

int _mm256_movemask_pd (__m256d a)

VMOVMSKPD reg, ymm

MoveMask(Vector256<Single>)

int _mm256_movemask_ps (__m256 a)

VMOVMSKPS reg, ymm

Multiply(Vector256<Double>, Vector256<Double>)

__m256d _mm256_mul_pd (__m256d a, __m256d b)

VMULPD ymm, ymm, ymm, ymm/m256

Multiply(Vector256<Single>, Vector256<Single>)

__m256 _mm256_mul_ps (__m256 a, __m256 b)

VMULPS ymm, ymm, ymm, ymm/m256

Or(Vector256<Double>, Vector256<Double>)

__m256d _mm256_or_pd (__m256d a, __m256d b)

VORPD ymm, ymm, ymm, ymm/m256

Or(Vector256<Single>, Vector256<Single>)

__m256 _mm256_or_ps (__m256 a, __m256 b)

VORPS ymm, ymm, ymm/ m256

Permute(Vector128<Double>, Byte)

__m128d _mm_permute_pd (__m128d int imm8)

VPERMILPD xmm, xmm, imm8

Permute(Vector128<Single>, Byte)

__m128 _mm_permute_ps (__m128 int imm8)

VPERMILPS xmm, xmm, imm8

Permute(Vector256<Double>, Byte)

__m256d _mm256_permute_pd (__m256d int imm8)

VPERMILPD ymm, ymm, imm8

Permute(Vector256<Single>, Byte)

__m256 _mm256_permute_ps (__m256 int imm8)

VPERMILPS ymm, ymm, imm8

Permute2x128(Vector256<Byte>, Vector256<Byte>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_permute2f128_pd (__m256d a, __m256d b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<Int16>, Vector256<Int16>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<SByte>, Vector256<SByte>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<UInt16>, Vector256<UInt16>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

Permute2x128(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)

VPERM2F128 ymm, ymm, ymm/m256, imm8

PermuteVar(Vector128<Double>, Vector128<Int64>)

__m128d _mm_permutevar_pd (__m128d a, __m128i b)

VPERMILPD xmm, xmm, xmm/m128

PermuteVar(Vector128<Single>, Vector128<Int32>)

__m128 _mm_permutevar_ps (__m128 a, __m128i b)

VPERMILPS xmm, xmm, xmm/m128

PermuteVar(Vector256<Double>, Vector256<Int64>)

__m256d _mm256_permutevar_pd (__m256d a, __m256i b)

VPERMILPD ymm, ymm, ymm/ m256

PermuteVar(Vector256<Single>, Vector256<Int32>)

__m256 _mm256_permutevar_ps (__m256 a, __m256i b)

VPERMILPS ymm, ymm, ymm/m256

Reciprocal(Vector256<Single>)

__m256 _mm256_rcp_ps (__m256 a)

ymm VRCPPS, ymm/m256

ReciprocalSqrt(Vector256<Single>)

__m256 _mm256_rsqrt_ps (__m256 a)

VRSQRTPS ymm, ymm/m256

RoundCurrentDirection(Vector256<Double>)

__m256d _mm256_round_pd (__m256d _MM_FROUND_CUR_DIRECTION)

VROUNDPD ymm, ymm/m256, imm8(4)

RoundCurrentDirection(Vector256<Single>)

__m256 _mm256_round_ps (__m256 _MM_FROUND_CUR_DIRECTION)

VROUNDPS ymm, ymm/m256, imm8(4)

RoundToNearestInteger(Vector256<Double>)

__m256d _mm256_round_pd (__m256d _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)

VROUNDPD ymm, ymm/m256, imm8(8)

RoundToNearestInteger(Vector256<Single>)

__m256 _mm256_round_ps (__m256 a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)

VROUNDPS ymm, ymm/m256, imm8(8)

RoundToNegativeInfinity(Vector256<Double>)

__m256d _mm256_round_pd (__m256d a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)

VROUNDPD ymm, ymm/m256, imm8(9)

RoundToNegativeInfinity(Vector256<Single>)

__m256 _mm256_round_ps (__m256 _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)

VROUNDPS ymm, ymm/m256, imm8(9)

RoundToPositiveInfinity(Vector256<Double>)

__m256d _mm256_round_pd (__m256d a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)

VROUNDPD ymm, ymm/m256, imm8(10)

RoundToPositiveInfinity(Vector256<Single>)

__m256 _mm256_round_ps (__m256 a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)

VROUNDPS ymm, ymm/m256, imm8(10)

RoundToZero(Vector256<Double>)

__m256d _mm256_round_pd (__m256d _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)

VROUNDPD ymm, ymm/m256, imm8(11)

RoundToZero(Vector256<Single>)

__m256 _mm256_round_ps (__m256 _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)

VROUNDPS ymm, ymm/m256, imm8(11)

Shuffle(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_shuffle_pd (__m256d a, __m256d b, const int imm8)

VSHUFPD ymm, ymm, ymm/ m256, imm8

Shuffle(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_shuffle_ps (__m256 a, __m256 b, const int imm8)

VSHUFPS ymm, ymm, ymm, m256, imm8

Sqrt(Vector256<Double>)

__m256d _mm256_sqrt_pd (__m256d a)

VSQRTPD ymm, ymm/m256

Sqrt(Vector256<Single>)

__m256 _mm256_sqrt_ps (__m256 a)

VSQRTPS ymm, ymm/m256

Store(Byte*, Vector256<Byte>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(Double*, Vector256<Double>)

void _mm256_storeu_pd (double * mem_addr, __m256d a)

MOVUPD m256, ymm

Store(Int16*, Vector256<Int16>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(Int32*, Vector256<Int32>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(Int64*, Vector256<Int64>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(SByte*, Vector256<SByte>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(Single*, Vector256<Single>)

void _mm256_storeu_ps (float * mem_addr, __m256 a)

MOVUPS m256, ymm

Store(UInt16*, Vector256<UInt16>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(UInt32*, Vector256<UInt32>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

Store(UInt64*, Vector256<UInt64>)

void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)

MOVDQU m256, ymm

StoreAligned(Byte*, Vector256<Byte>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(Double*, Vector256<Double>)

void _mm256_store_pd (double * mem_addr, __m256d a)

VMOVAPD m256, ymm

StoreAligned(Int16*, Vector256<Int16>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(Int32*, Vector256<Int32>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(Int64*, Vector256<Int64>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(SByte*, Vector256<SByte>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(Single*, Vector256<Single>)

void _mm256_store_ps (float * mem_addr, __m256 a)

VMOVAPS m256, ymm

StoreAligned(UInt16*, Vector256<UInt16>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(UInt32*, Vector256<UInt32>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAligned(UInt64*, Vector256<UInt64>)

void _mm256_store_si256 (__m256i * mem_addr, __m256i a)

MOVDQA m256, ymm

StoreAlignedNonTemporal(Byte*, Vector256<Byte>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(Double*, Vector256<Double>)

void _mm256_stream_pd (double * mem_addr, __m256d a)

MOVNTPD m256, ymm

StoreAlignedNonTemporal(Int16*, Vector256<Int16>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(Int32*, Vector256<Int32>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(Int64*, Vector256<Int64>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(SByte*, Vector256<SByte>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(Single*, Vector256<Single>)

void _mm256_stream_ps (float * mem_addr, __m256 a)

MOVNTPS m256, ymm

StoreAlignedNonTemporal(UInt16*, Vector256<UInt16>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(UInt32*, Vector256<UInt32>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

StoreAlignedNonTemporal(UInt64*, Vector256<UInt64>)

void _mm256_stream_si256 (__m256i * mem_addr, __m256i a)

VMOVNTDQ m256, ymm

Subtract(Vector256<Double>, Vector256<Double>)

__m256d _mm256_sub_pd (__m256d a, __m256d b)

VSUBPD ymm, ymm, ymm/m256

Subtract(Vector256<Single>, Vector256<Single>)

__m256 _mm256_sub_ps (__m256 a, __m256 b)

VSUBPS ymm, ymm, ymm/m256

TestC(Vector128<Double>, Vector128<Double>)

int _mm_testc_pd (__m128d a, __m128d b)

VTESTPD xmm, xmm/m128

TestC(Vector128<Single>, Vector128<Single>)

int _mm_testc_ps (__m128 a, __m128 b)

VTESTPS xmm, xmm/m128

TestC(Vector256<Byte>, Vector256<Byte>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<Double>, Vector256<Double>)

int _mm256_testc_pd (__m256d a, __m256d b)

VTESTPS ymm, ymm/m256

TestC(Vector256<Int16>, Vector256<Int16>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<Int32>, Vector256<Int32>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<Int64>, Vector256<Int64>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<SByte>, Vector256<SByte>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<Single>, Vector256<Single>)

int _mm256_testc_ps (__m256 a, __m256 b)

VTESTPS ymm, ymm/m256

TestC(Vector256<UInt16>, Vector256<UInt16>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<UInt32>, Vector256<UInt32>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestC(Vector256<UInt64>, Vector256<UInt64>)

int _mm256_testc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector128<Double>, Vector128<Double>)

int _mm_testnzc_pd (__m128d a, __m128d b)

VTESTPD xmm, xmm/m128

TestNotZAndNotC(Vector128<Single>, Vector128<Single>)

int _mm_testnzc_ps (__m128 a, __m128 b)

VTESTPS xmm, xmm/m128

TestNotZAndNotC(Vector256<Byte>, Vector256<Byte>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<Double>, Vector256<Double>)

int _mm256_testnzc_pd (__m256d a, __m256d b)

VTESTPD ymm, ymm/m256

TestNotZAndNotC(Vector256<Int16>, Vector256<Int16>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<Int32>, Vector256<Int32>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<Int64>, Vector256<Int64>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<SByte>, Vector256<SByte>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<Single>, Vector256<Single>)

int _mm256_testnzc_ps (__m256 a, __m256 b)

VTESTPS ymm, ymm/m256

TestNotZAndNotC(Vector256<UInt16>, Vector256<UInt16>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<UInt32>, Vector256<UInt32>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestNotZAndNotC(Vector256<UInt64>, Vector256<UInt64>)

int _mm256_testnzc_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector128<Double>, Vector128<Double>)

int _mm_testz_pd (__m128d a, __m128d b)

VTESTPD xmm, xmm/m128

TestZ(Vector128<Single>, Vector128<Single>)

int _mm_testz_ps (__m128 a, __m128 b)

VTESTPS xmm, xmm/m128

TestZ(Vector256<Byte>, Vector256<Byte>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<Double>, Vector256<Double>)

_mm256_testz_pd int (__m256d a, __m256d b)

VTESTPD ymm, ymm/m256

TestZ(Vector256<Int16>, Vector256<Int16>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<Int32>, Vector256<Int32>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<Int64>, Vector256<Int64>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<SByte>, Vector256<SByte>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<Single>, Vector256<Single>)

int _mm256_testz_ps (__m256 a, __m256 b)

VTESTPS ymm, ymm/m256

TestZ(Vector256<UInt16>, Vector256<UInt16>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<UInt32>, Vector256<UInt32>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

TestZ(Vector256<UInt64>, Vector256<UInt64>)

int _mm256_testz_si256 (__m256i a, __m256i b)

VPTEST ymm, ymm/m256

ToString()

Zwraca ciąg reprezentujący bieżący obiekt.

(Odziedziczone po Object)
UnpackHigh(Vector256<Double>, Vector256<Double>)

__m256d _mm256_unpackhi_pd (__m256d a, __m256d b)

VUNPCKHPD ymm, ymm, ymm/m256

UnpackHigh(Vector256<Single>, Vector256<Single>)

__m256 _mm256_unpackhi_ps (__m256 a, __m256 b)

VUNPCKHPS ymm, ymm, ymm/m256

UnpackLow(Vector256<Double>, Vector256<Double>)

__m256d _mm256_unpacklo_pd (__m256d a, __m256d b)

VUNPCKLPD ymm, ymm, ymm/m256

UnpackLow(Vector256<Single>, Vector256<Single>)

__m256 _mm256_unpacklo_ps (__m256 a, __m256 b)

VUNPCKLPS ymm, ymm, ymm/m256

Xor(Vector256<Double>, Vector256<Double>)

__m256d _mm256_xor_pd (__m256d a, __m256d b)

VXORPS ymm, ymm, ymm/m256

Xor(Vector256<Single>, Vector256<Single>)

__m256 _mm256_xor_ps (__m256 a, __m256 b)

VXORPS ymm, ymm, ymm/m256

Dotyczy