#[repr(transparent)]pub struct V3 {Show 14 fields
pub sse: Sse,
pub sse2: Sse2,
pub fxsr: Fxsr,
pub sse3: Sse3,
pub ssse3: Ssse3,
pub sse4_1: Sse4_1,
pub sse4_2: Sse4_2,
pub popcnt: Popcnt,
pub avx: Avx,
pub avx2: Avx2,
pub bmi1: Bmi1,
pub bmi2: Bmi2,
pub fma: Fma,
pub lzcnt: Lzcnt,
}
Expand description
AVX instruction set.
Notable additions over V2
include:
- Instructions operating on 256-bit SIMD vectors.
- Shift functions with a separate shift per lane, such as
V3::shl_dyn_u32x4
. - Fused multiply-accumulate instructions, such as
V3::mul_add_f32x4
.
Fields§
§sse: Sse
§sse2: Sse2
§fxsr: Fxsr
§sse3: Sse3
§ssse3: Ssse3
§sse4_1: Sse4_1
§sse4_2: Sse4_2
§popcnt: Popcnt
§avx: Avx
§avx2: Avx2
§bmi1: Bmi1
§bmi2: Bmi2
§fma: Fma
§lzcnt: Lzcnt
Implementations§
source§impl V3
impl V3
AVX instruction set.
Notable additions over V2
include:
- Instructions operating on 256-bit SIMD vectors.
- Shift functions with a separate shift per lane, such as
V3::shl_dyn_u32x4
. - Fused multiply-accumulate instructions, such as
V3::mul_add_f32x4
.
sourcepub unsafe fn new_unchecked() -> Self
pub unsafe fn new_unchecked() -> Self
Returns a SIMD token type without checking if the required CPU features for this type are available.
§Safety
- the required CPU features must be available.
sourcepub fn try_new() -> Option<Self>
pub fn try_new() -> Option<Self>
Returns a SIMD token type if the required CPU features for this type are
available, otherwise returns None
.
sourcepub fn is_available() -> bool
pub fn is_available() -> bool
Returns true
if the required CPU features for this type are available,
otherwise returns false
.
sourcepub fn vectorize<F: NullaryFnOnce>(self, f: F) -> F::Output
pub fn vectorize<F: NullaryFnOnce>(self, f: F) -> F::Output
Vectorizes the given function as if the CPU features for this type were applied to it.
§Note
For the vectorization to work properly, the given function must be inlined.
Consider marking it as #[inline(always)]
sourcepub fn to_ref(self) -> &'static Self
pub fn to_ref(self) -> &'static Self
Takes a proof of the existence of this SIMD token (self
), and returns a
persistent reference to it.
source§impl V3
impl V3
sourcepub fn splat_u8x32(self, value: u8) -> u8x32
pub fn splat_u8x32(self, value: u8) -> u8x32
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_i8x32(self, value: i8) -> i8x32
pub fn splat_i8x32(self, value: i8) -> i8x32
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_m8x32(self, value: m8) -> m8x32
pub fn splat_m8x32(self, value: m8) -> m8x32
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_u16x16(self, value: u16) -> u16x16
pub fn splat_u16x16(self, value: u16) -> u16x16
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_i16x16(self, value: i16) -> i16x16
pub fn splat_i16x16(self, value: i16) -> i16x16
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_m16x16(self, value: m16) -> m16x16
pub fn splat_m16x16(self, value: m16) -> m16x16
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_u32x8(self, value: u32) -> u32x8
pub fn splat_u32x8(self, value: u32) -> u32x8
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_i32x8(self, value: i32) -> i32x8
pub fn splat_i32x8(self, value: i32) -> i32x8
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_m32x8(self, value: m32) -> m32x8
pub fn splat_m32x8(self, value: m32) -> m32x8
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_f32x8(self, value: f32) -> f32x8
pub fn splat_f32x8(self, value: f32) -> f32x8
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_u64x4(self, value: u64) -> u64x4
pub fn splat_u64x4(self, value: u64) -> u64x4
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_i64x4(self, value: i64) -> i64x4
pub fn splat_i64x4(self, value: i64) -> i64x4
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_m64x4(self, value: m64) -> m64x4
pub fn splat_m64x4(self, value: m64) -> m64x4
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn splat_f64x4(self, value: f64) -> f64x4
pub fn splat_f64x4(self, value: f64) -> f64x4
Returns a SIMD vector with all lanes set to the given value.
sourcepub fn and_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn and_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns the bitwise AND of a
and b
.
sourcepub fn and_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn and_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns the bitwise AND of a
and b
.
sourcepub fn and_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn and_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns the bitwise AND of a
and b
.
sourcepub fn xor_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn xor_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns the bitwise XOR of a
and b
.
sourcepub fn xor_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn xor_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns the bitwise XOR of a
and b
.
sourcepub fn xor_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn xor_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns the bitwise XOR of a
and b
.
sourcepub fn not_u16x16(self, a: u16x16) -> u16x16
pub fn not_u16x16(self, a: u16x16) -> u16x16
Returns the bitwise NOT of a
.
sourcepub fn not_i16x16(self, a: i16x16) -> i16x16
pub fn not_i16x16(self, a: i16x16) -> i16x16
Returns the bitwise NOT of a
.
sourcepub fn not_m16x16(self, a: m16x16) -> m16x16
pub fn not_m16x16(self, a: m16x16) -> m16x16
Returns the bitwise NOT of a
.
sourcepub fn andnot_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn andnot_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn andnot_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_m8x32(self, a: m8x32, b: m8x32) -> m8x32
pub fn andnot_m8x32(self, a: m8x32, b: m8x32) -> m8x32
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn andnot_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn andnot_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn andnot_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn andnot_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn andnot_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_m32x8(self, a: m32x8, b: m32x8) -> m32x8
pub fn andnot_m32x8(self, a: m32x8, b: m32x8) -> m32x8
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn andnot_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn andnot_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn andnot_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_m64x4(self, a: m64x4, b: m64x4) -> m64x4
pub fn andnot_m64x4(self, a: m64x4, b: m64x4) -> m64x4
Returns the bitwise AND of NOT a
and b
.
sourcepub fn andnot_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn andnot_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Returns the bitwise AND of NOT a
and b
.
sourcepub fn shl_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
pub fn shl_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
pub fn shl_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
pub fn shl_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
pub fn shl_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
pub fn shl_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_const_i64x4<const AMOUNT: i32>(self, a: i64x4) -> i64x4
pub fn shl_const_i64x4<const AMOUNT: i32>(self, a: i64x4) -> i64x4
Shift the bits of each lane of a
to the left by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
pub fn shr_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
Shift the bits of each lane of a
to the right by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
pub fn shr_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
Shift the bits of each lane of a
to the right by AMOUNT
, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
pub fn shr_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
Shift the bits of each lane of a
to the right by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
pub fn shr_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
Shift the bits of each lane of a
to the right by AMOUNT
, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
pub fn shr_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
Shift the bits of each lane of a
to the right by AMOUNT
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
pub fn shl_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
pub fn shl_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
pub fn shl_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
pub fn shl_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
pub fn shl_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_i64x4(self, a: i64x4, amount: u64x2) -> i64x4
pub fn shl_i64x4(self, a: i64x4, amount: u64x2) -> i64x4
Shift the bits of each lane of a
to the left by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
pub fn shr_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
Shift the bits of each lane of a
to the right by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
pub fn shr_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
Shift the bits of each lane of a
to the right by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
pub fn shr_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
Shift the bits of each lane of a
to the right by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
pub fn shr_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
Shift the bits of each lane of a
to the right by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
pub fn shr_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
Shift the bits of each lane of a
to the right by the first element in amount
, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
pub fn shl_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_i32x4(self, a: i32x4, amount: u32x4) -> i32x4
pub fn shl_dyn_i32x4(self, a: i32x4, amount: u32x4) -> i32x4
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
pub fn shl_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_i32x8(self, a: i32x8, amount: u32x8) -> i32x8
pub fn shl_dyn_i32x8(self, a: i32x8, amount: u32x8) -> i32x8
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
pub fn shl_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_i64x2(self, a: i64x2, amount: u64x2) -> i64x2
pub fn shl_dyn_i64x2(self, a: i64x2, amount: u64x2) -> i64x2
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
pub fn shl_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shl_dyn_i64x4(self, a: i64x4, amount: u64x4) -> i64x4
pub fn shl_dyn_i64x4(self, a: i64x4, amount: u64x4) -> i64x4
Shift the bits of each lane of a
to the left by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
pub fn shr_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
pub fn shr_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_dyn_i32x4(self, a: i32x4, amount: i32x4) -> i32x4
pub fn shr_dyn_i32x4(self, a: i32x4, amount: i32x4) -> i32x4
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_dyn_i32x8(self, a: i32x8, amount: i32x8) -> i32x8
pub fn shr_dyn_i32x8(self, a: i32x8, amount: i32x8) -> i32x8
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1
if the sign bit is set.
sourcepub fn shr_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
pub fn shr_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn shr_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
pub fn shr_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
Shift the bits of each lane of a
to the right by the element in the corresponding lane in
amount
, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
sourcepub fn sub_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn sub_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Subtracts the elements of each lane of a
and b
.
sourcepub fn sub_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn sub_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Subtracts the elements of each lane of a
and b
.
sourcepub fn subadd_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn subadd_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Alternatively subtracts and adds the elements of each lane of a
and b
.
sourcepub fn subadd_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn subadd_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Alternatively subtracts and adds the elements of each lane of a
and b
.
sourcepub fn mul_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn mul_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Multiplies the elements of each lane of a
and b
.
sourcepub fn mul_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn mul_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Multiplies the elements of each lane of a
and b
.
sourcepub fn mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, and adds the results to each lane of
c
.
sourcepub fn mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, and adds the results to each lane of
c
.
sourcepub fn mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, and adds the results to each lane of
c
.
sourcepub fn mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, and adds the results to each lane of
c
.
sourcepub fn mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the results.
sourcepub fn mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the results.
sourcepub fn mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the results.
sourcepub fn mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the results.
sourcepub fn negate_mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn negate_mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, negates the results, and adds them to
each lane of c
.
sourcepub fn negate_mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn negate_mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, negates the results, and adds them to
each lane of c
.
sourcepub fn negate_mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn negate_mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, negates the results, and adds them to
each lane of c
.
sourcepub fn negate_mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn negate_mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, negates the results, and adds them to
each lane of c
.
sourcepub fn negate_mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn negate_mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the negation of the results.
sourcepub fn negate_mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn negate_mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the negation of the results.
sourcepub fn negate_mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn negate_mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the negation of the results.
sourcepub fn negate_mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn negate_mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, and subtracts each lane of c
from
the negation of the results.
sourcepub fn mul_addsub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_addsub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, and alternatively adds/subtracts ‘c’
to/from the results.
sourcepub fn mul_addsub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_addsub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, and alternatively adds/subtracts ‘c’
to/from the results.
sourcepub fn mul_addsub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_addsub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, and alternatively adds/subtracts ‘c’
to/from the results.
sourcepub fn mul_addsub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_addsub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, and alternatively adds/subtracts ‘c’
to/from the results.
sourcepub fn mul_subadd_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_subadd_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a
and b
, and alternatively subtracts/adds ‘c’
to/from the results.
sourcepub fn mul_subadd_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_subadd_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a
and b
, and alternatively subtracts/adds ‘c’
to/from the results.
sourcepub fn mul_subadd_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_subadd_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a
and b
, and alternatively subtracts/adds ‘c’
to/from the results.
sourcepub fn mul_subadd_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_subadd_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a
and b
, and alternatively subtracts/adds ‘c’
to/from the results.
sourcepub fn div_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn div_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Divides the elements of each lane of a
and b
.
sourcepub fn div_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn div_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Divides the elements of each lane of a
and b
.
sourcepub fn wrapping_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn wrapping_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn wrapping_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_add_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_add_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn wrapping_add_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_add_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn wrapping_add_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Adds the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn saturating_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn saturating_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Adds the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn saturating_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Adds the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn saturating_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Adds the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn saturating_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Adds the elements of each lane of a
and b
, with saturation.
sourcepub fn wrapping_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn wrapping_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn wrapping_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_sub_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_sub_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn wrapping_sub_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_sub_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn wrapping_sub_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Subtracts the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn saturating_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn saturating_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Subtracts the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn saturating_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Subtracts the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn saturating_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Subtracts the elements of each lane of a
and b
, with saturation.
sourcepub fn saturating_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn saturating_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Subtracts the elements of each lane of a
and b
, with saturation.
sourcepub fn wrapping_mul_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_mul_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Multiplies the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_mul_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_mul_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Multiplies the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_mul_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_mul_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Multiplies the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn wrapping_mul_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_mul_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Multiplies the elements of each lane of a
and b
, with wrapping on overflow.
sourcepub fn widening_mul_u16x16(self, a: u16x16, b: u16x16) -> (u16x16, u16x16)
pub fn widening_mul_u16x16(self, a: u16x16, b: u16x16) -> (u16x16, u16x16)
Multiplies the elements of each lane of a
and b
, and returns separately the low and
high bits of the result.
sourcepub fn widening_mul_i16x16(self, a: i16x16, b: i16x16) -> (i16x16, i16x16)
pub fn widening_mul_i16x16(self, a: i16x16, b: i16x16) -> (i16x16, i16x16)
Multiplies the elements of each lane of a
and b
, and returns separately the low and
high bits of the result.
sourcepub fn widening_mul_u32x8(self, a: u32x8, b: u32x8) -> (u32x8, u32x8)
pub fn widening_mul_u32x8(self, a: u32x8, b: u32x8) -> (u32x8, u32x8)
Multiplies the elements of each lane of a
and b
, and returns separately the low and
high bits of the result.
sourcepub fn widening_mul_i32x8(self, a: i32x8, b: i32x8) -> (i32x8, i32x8)
pub fn widening_mul_i32x8(self, a: i32x8, b: i32x8) -> (i32x8, i32x8)
Multiplies the elements of each lane of a
and b
, and returns separately the low and
high bits of the result.
sourcepub fn average_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn average_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Averages the elements of each lane of a
and b
.
sourcepub fn average_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn average_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Averages the elements of each lane of a
and b
.
sourcepub fn min_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn min_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn min_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn min_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn min_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn min_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn min_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn min_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn min_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn min_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Computes the elementwise minimum of each lane of a
and b
.
sourcepub fn max_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn max_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn max_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn max_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn max_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn max_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn max_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn max_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn max_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn max_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Computes the elementwise maximum of each lane of a
and b
.
sourcepub fn abs_f32x8(self, a: f32x8) -> f32x8
pub fn abs_f32x8(self, a: f32x8) -> f32x8
Computes the absolute value of the elements of each lane of a
.
sourcepub fn abs_f64x4(self, a: f64x4) -> f64x4
pub fn abs_f64x4(self, a: f64x4) -> f64x4
Computes the absolute value of the elements of each lane of a
.
sourcepub fn unsigned_abs_i8x32(self, a: i8x32) -> u8x32
pub fn unsigned_abs_i8x32(self, a: i8x32) -> u8x32
Computes the unsigned absolute value of the elements of each lane of a
.
sourcepub fn unsigned_abs_i16x16(self, a: i16x16) -> u16x16
pub fn unsigned_abs_i16x16(self, a: i16x16) -> u16x16
Computes the unsigned absolute value of the elements of each lane of a
.
sourcepub fn unsigned_abs_i32x8(self, a: i32x8) -> u32x8
pub fn unsigned_abs_i32x8(self, a: i32x8) -> u32x8
Computes the unsigned absolute value of the elements of each lane of a
.
sourcepub fn apply_sign_i8x32(self, sign: i8x32, a: i8x32) -> i8x32
pub fn apply_sign_i8x32(self, sign: i8x32, a: i8x32) -> i8x32
Applies the sign of each element of sign
to the corresponding lane in a
.
- If
sign
is zero, the corresponding element is zeroed. - If
sign
is positive, the corresponding element is returned as is. - If
sign
is negative, the corresponding element is negated.
sourcepub fn apply_sign_i16x16(self, sign: i16x16, a: i16x16) -> i16x16
pub fn apply_sign_i16x16(self, sign: i16x16, a: i16x16) -> i16x16
Applies the sign of each element of sign
to the corresponding lane in a
.
- If
sign
is zero, the corresponding element is zeroed. - If
sign
is positive, the corresponding element is returned as is. - If
sign
is negative, the corresponding element is negated.
sourcepub fn apply_sign_i32x8(self, sign: i32x8, a: i32x8) -> i32x8
pub fn apply_sign_i32x8(self, sign: i32x8, a: i32x8) -> i32x8
Applies the sign of each element of sign
to the corresponding lane in a
.
- If
sign
is zero, the corresponding element is zeroed. - If
sign
is positive, the corresponding element is returned as is. - If
sign
is negative, the corresponding element is negated.
sourcepub fn sqrt_f32x8(self, a: f32x8) -> f32x8
pub fn sqrt_f32x8(self, a: f32x8) -> f32x8
Computes the square roots of the elements of each lane of a
.
sourcepub fn sqrt_f64x4(self, a: f64x4) -> f64x4
pub fn sqrt_f64x4(self, a: f64x4) -> f64x4
Computes the square roots of the elements of each lane of a
.
sourcepub fn approx_reciprocal_f32x8(self, a: f32x8) -> f32x8
pub fn approx_reciprocal_f32x8(self, a: f32x8) -> f32x8
Computes the approximate reciprocal of the elements of each lane of a
.
sourcepub fn approx_reciprocal_sqrt_f32x8(self, a: f32x8) -> f32x8
pub fn approx_reciprocal_sqrt_f32x8(self, a: f32x8) -> f32x8
Computes the approximate reciprocal of the square roots of the elements of each lane of a
.
sourcepub fn floor_f32x8(self, a: f32x8) -> f32x8
pub fn floor_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a
to the nearest integer towards negative infinity.
sourcepub fn floor_f64x4(self, a: f64x4) -> f64x4
pub fn floor_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a
to the nearest integer towards negative infinity.
sourcepub fn ceil_f32x8(self, a: f32x8) -> f32x8
pub fn ceil_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a
to the nearest integer towards positive infinity.
sourcepub fn ceil_f64x4(self, a: f64x4) -> f64x4
pub fn ceil_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a
to the nearest integer towards positive infinity.
sourcepub fn round_f32x8(self, a: f32x8) -> f32x8
pub fn round_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a
to the nearest integer. If two values are equally
close, the even value is returned.
sourcepub fn round_f64x4(self, a: f64x4) -> f64x4
pub fn round_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a
to the nearest integer. If two values are equally
close, the even value is returned.
sourcepub fn truncate_f32x8(self, a: f32x8) -> f32x8
pub fn truncate_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a
to the nearest integer towards zero.
sourcepub fn truncate_f64x4(self, a: f64x4) -> f64x4
pub fn truncate_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a
to the nearest integer towards zero.
sourcepub fn horizontal_add_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn horizontal_add_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
See _mm256_hadd_epi16
.
sourcepub fn horizontal_add_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn horizontal_add_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
See _mm256_hadd_epi32
.
sourcepub fn horizontal_add_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn horizontal_add_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
See _mm256_hadd_ps
.
sourcepub fn horizontal_add_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn horizontal_add_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
See _mm256_hadd_pd
.
sourcepub fn horizontal_sub_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn horizontal_sub_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
See _mm256_hsub_epi16
sourcepub fn horizontal_sub_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn horizontal_sub_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
See _mm256_hsub_epi32
sourcepub fn horizontal_sub_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn horizontal_sub_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
See _mm256_hsub_ps
sourcepub fn horizontal_sub_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn horizontal_sub_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
See _mm256_hsub_pd
sourcepub fn horizontal_saturating_add_pack_i16x16(
self,
a: i16x16,
b: i16x16
) -> i16x16
pub fn horizontal_saturating_add_pack_i16x16( self, a: i16x16, b: i16x16 ) -> i16x16
See _mm256_hadds_epi16
sourcepub fn horizontal_saturating_sub_pack_i16x16(
self,
a: i16x16,
b: i16x16
) -> i16x16
pub fn horizontal_saturating_sub_pack_i16x16( self, a: i16x16, b: i16x16 ) -> i16x16
See _mm256_hsubs_epi16
sourcepub fn multiply_wrapping_add_adjacent_i16x16(
self,
a: i16x16,
b: i16x16
) -> i32x8
pub fn multiply_wrapping_add_adjacent_i16x16( self, a: i16x16, b: i16x16 ) -> i32x8
See _mm256_madd_epi16
sourcepub fn multiply_saturating_add_adjacent_i8x32(
self,
a: i8x32,
b: i8x32
) -> i16x16
pub fn multiply_saturating_add_adjacent_i8x32( self, a: i8x32, b: i8x32 ) -> i16x16
See _mm256_maddubs_epi16
sourcepub fn multisum_of_absolute_differences_u8x32<const OFFSETS: i32>(
self,
a: u8x32,
b: u8x32
) -> u16x16
pub fn multisum_of_absolute_differences_u8x32<const OFFSETS: i32>( self, a: u8x32, b: u8x32 ) -> u16x16
See _mm256_mpsadbw_epu8
.
sourcepub fn pack_with_signed_saturation_i16x16(self, a: i16x16, b: i16x16) -> i8x32
pub fn pack_with_signed_saturation_i16x16(self, a: i16x16, b: i16x16) -> i8x32
See _mm256_packs_epi16
sourcepub fn pack_with_signed_saturation_i32x8(self, a: i32x8, b: i32x8) -> i16x16
pub fn pack_with_signed_saturation_i32x8(self, a: i32x8, b: i32x8) -> i16x16
See _mm256_packs_epi32
sourcepub fn pack_with_unsigned_saturation_i16x16(self, a: i16x16, b: i16x16) -> u8x32
pub fn pack_with_unsigned_saturation_i16x16(self, a: i16x16, b: i16x16) -> u8x32
See _mm256_packus_epi16
sourcepub fn pack_with_unsigned_saturation_i32x8(self, a: i32x8, b: i32x8) -> u16x16
pub fn pack_with_unsigned_saturation_i32x8(self, a: i32x8, b: i32x8) -> u16x16
See _mm256_packus_epi32
sourcepub fn sum_of_absolute_differences_u8x32(self, a: u8x32, b: u8x32) -> u64x4
pub fn sum_of_absolute_differences_u8x32(self, a: u8x32, b: u8x32) -> u64x4
See _mm256_sad_epu8
sourcepub fn convert_u8x32_to_i8x32(self, a: u8x32) -> i8x32
pub fn convert_u8x32_to_i8x32(self, a: u8x32) -> i8x32
Converts a u8x32
to i8x32
, elementwise.
sourcepub fn convert_u8x16_to_u16x16(self, a: u8x16) -> u16x16
pub fn convert_u8x16_to_u16x16(self, a: u8x16) -> u16x16
Converts a u8x16
to u16x16
, elementwise.
sourcepub fn convert_u8x16_to_i16x16(self, a: u8x16) -> i16x16
pub fn convert_u8x16_to_i16x16(self, a: u8x16) -> i16x16
Converts a u8x16
to i16x16
, elementwise.
sourcepub fn convert_u8x16_to_u32x8(self, a: u8x16) -> u32x8
pub fn convert_u8x16_to_u32x8(self, a: u8x16) -> u32x8
Converts a u8x16
to u32x8
, elementwise, while truncating the extra elements.
sourcepub fn convert_u8x16_to_i32x8(self, a: u8x16) -> i32x8
pub fn convert_u8x16_to_i32x8(self, a: u8x16) -> i32x8
Converts a u8x16
to i32x8
, elementwise, while truncating the extra elements.
sourcepub fn convert_u8x16_to_u64x4(self, a: u8x16) -> u64x4
pub fn convert_u8x16_to_u64x4(self, a: u8x16) -> u64x4
Converts a u8x16
to u64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_u8x16_to_i64x4(self, a: u8x16) -> i64x4
pub fn convert_u8x16_to_i64x4(self, a: u8x16) -> i64x4
Converts a u8x16
to i64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_i8x32_to_u8x32(self, a: i8x32) -> u8x32
pub fn convert_i8x32_to_u8x32(self, a: i8x32) -> u8x32
Converts a i8x32
to u8x32
, elementwise.
sourcepub fn convert_i8x16_to_u16x16(self, a: i8x16) -> u16x16
pub fn convert_i8x16_to_u16x16(self, a: i8x16) -> u16x16
Converts a i8x16
to u16x16
, elementwise.
sourcepub fn convert_i8x16_to_i16x16(self, a: i8x16) -> i16x16
pub fn convert_i8x16_to_i16x16(self, a: i8x16) -> i16x16
Converts a i8x16
to i16x16
, elementwise.
sourcepub fn convert_i8x16_to_u32x8(self, a: i8x16) -> u32x8
pub fn convert_i8x16_to_u32x8(self, a: i8x16) -> u32x8
Converts a i8x16
to u32x8
, elementwise, while truncating the extra elements.
sourcepub fn convert_i8x16_to_i32x8(self, a: i8x16) -> i32x8
pub fn convert_i8x16_to_i32x8(self, a: i8x16) -> i32x8
Converts a i8x16
to i32x8
, elementwise, while truncating the extra elements.
sourcepub fn convert_i8x16_to_u64x4(self, a: i8x16) -> u64x4
pub fn convert_i8x16_to_u64x4(self, a: i8x16) -> u64x4
Converts a i8x16
to u64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_i8x16_to_i64x4(self, a: i8x16) -> i64x4
pub fn convert_i8x16_to_i64x4(self, a: i8x16) -> i64x4
Converts a i8x16
to i64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_u16x16_to_i16x16(self, a: u16x16) -> i16x16
pub fn convert_u16x16_to_i16x16(self, a: u16x16) -> i16x16
Converts a u16x16
to i16x16
, elementwise.
sourcepub fn convert_u16x8_to_u32x8(self, a: u16x8) -> u32x8
pub fn convert_u16x8_to_u32x8(self, a: u16x8) -> u32x8
Converts a u16x8
to u32x8
, elementwise.
sourcepub fn convert_u16x8_to_i32x8(self, a: u16x8) -> i32x8
pub fn convert_u16x8_to_i32x8(self, a: u16x8) -> i32x8
Converts a u16x8
to i32x8
, elementwise.
sourcepub fn convert_u16x8_to_u64x4(self, a: u16x8) -> u64x4
pub fn convert_u16x8_to_u64x4(self, a: u16x8) -> u64x4
Converts a u16x8
to u64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_u16x8_to_i64x4(self, a: u16x8) -> i64x4
pub fn convert_u16x8_to_i64x4(self, a: u16x8) -> i64x4
Converts a u16x8
to i64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_i16x16_to_u16x16(self, a: i16x16) -> u16x16
pub fn convert_i16x16_to_u16x16(self, a: i16x16) -> u16x16
Converts a i16x16
to u16x16
, elementwise.
sourcepub fn convert_i16x8_to_u32x8(self, a: i16x8) -> u32x8
pub fn convert_i16x8_to_u32x8(self, a: i16x8) -> u32x8
Converts a i16x8
to u32x8
, elementwise.
sourcepub fn convert_i16x8_to_i32x8(self, a: i16x8) -> i32x8
pub fn convert_i16x8_to_i32x8(self, a: i16x8) -> i32x8
Converts a i16x8
to i32x8
, elementwise.
sourcepub fn convert_i16x8_to_u64x4(self, a: i16x8) -> u64x4
pub fn convert_i16x8_to_u64x4(self, a: i16x8) -> u64x4
Converts a i16x8
to u64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_i16x8_to_i64x4(self, a: i16x8) -> i64x4
pub fn convert_i16x8_to_i64x4(self, a: i16x8) -> i64x4
Converts a i16x8
to i64x4
, elementwise, while truncating the extra elements.
sourcepub fn convert_u32x8_to_i32x8(self, a: u32x8) -> i32x8
pub fn convert_u32x8_to_i32x8(self, a: u32x8) -> i32x8
Converts a u32x8
to i32x8
, elementwise.
sourcepub fn convert_u32x4_to_u64x4(self, a: u32x4) -> u64x4
pub fn convert_u32x4_to_u64x4(self, a: u32x4) -> u64x4
Converts a u32x4
to u64x4
, elementwise.
sourcepub fn convert_u32x4_to_i64x4(self, a: u32x4) -> i64x4
pub fn convert_u32x4_to_i64x4(self, a: u32x4) -> i64x4
Converts a u32x4
to i64x4
, elementwise.
sourcepub fn convert_i32x8_to_u32x8(self, a: i32x8) -> u32x8
pub fn convert_i32x8_to_u32x8(self, a: i32x8) -> u32x8
Converts a i32x8
to u32x8
, elementwise.
sourcepub fn convert_i32x8_to_f32x8(self, a: i32x8) -> f32x8
pub fn convert_i32x8_to_f32x8(self, a: i32x8) -> f32x8
Converts a i32x8
to f32x8
, elementwise.
sourcepub fn convert_i32x4_to_u64x4(self, a: i32x4) -> u64x4
pub fn convert_i32x4_to_u64x4(self, a: i32x4) -> u64x4
Converts a i32x4
to u64x4
, elementwise.
sourcepub fn convert_i32x4_to_i64x4(self, a: i32x4) -> i64x4
pub fn convert_i32x4_to_i64x4(self, a: i32x4) -> i64x4
Converts a i32x4
to i64x4
, elementwise.
sourcepub fn convert_i32x4_to_f64x4(self, a: i32x4) -> f64x4
pub fn convert_i32x4_to_f64x4(self, a: i32x4) -> f64x4
Converts a i32x4
to f64x4
, elementwise.
sourcepub fn convert_f32x8_to_i32x8(self, a: f32x8) -> i32x8
pub fn convert_f32x8_to_i32x8(self, a: f32x8) -> i32x8
Converts a f32x8
to i32x8
, elementwise.
sourcepub fn convert_f32x4_to_f64x4(self, a: f32x4) -> f64x4
pub fn convert_f32x4_to_f64x4(self, a: f32x4) -> f64x4
Converts a f32x4
to f64x4
, elementwise.
sourcepub fn convert_f64x4_to_i32x4(self, a: f64x4) -> i32x4
pub fn convert_f64x4_to_i32x4(self, a: f64x4) -> i32x4
Converts a f64x4
to i32x4
, elementwise.
sourcepub fn convert_f64x4_to_f32x4(self, a: f64x4) -> f32x4
pub fn convert_f64x4_to_f32x4(self, a: f64x4) -> f32x4
Converts a f64x4
to f32x4
, elementwise.
sourcepub fn cmp_eq_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_eq_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_eq_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_eq_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_eq_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_eq_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_eq_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_eq_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_eq_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_gt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_gt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_gt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_gt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_gt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_gt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_gt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_gt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_gt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_ge_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_ge_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_ge_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_ge_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_ge_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_ge_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_ge_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_ge_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_ge_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_lt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_lt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_lt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_lt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_lt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_lt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_lt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_lt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_lt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_le_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_le_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_le_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_le_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_le_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_le_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_le_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_le_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_le_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for equality.
sourcepub fn cmp_not_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for inequality.
sourcepub fn cmp_not_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for inequality.
sourcepub fn cmp_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than.
sourcepub fn cmp_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for greater-than-or-equal-to.
sourcepub fn cmp_not_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for not-greater-than.
sourcepub fn cmp_not_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for not-greater-than.
sourcepub fn cmp_not_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for not-greater-than-or-equal.
sourcepub fn cmp_not_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for not-greater-than-or-equal.
sourcepub fn cmp_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than.
sourcepub fn cmp_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for less-than-or-equal-to.
sourcepub fn cmp_not_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for not-less-than.
sourcepub fn cmp_not_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for not-less-than.
sourcepub fn cmp_not_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a
and b
for not-less-than-or-equal.
sourcepub fn cmp_not_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a
and b
for not-less-than-or-equal.
sourcepub fn is_nan_f32x8(self, a: f32x8) -> m32x8
pub fn is_nan_f32x8(self, a: f32x8) -> m32x8
Checks if the elements in each lane of a
are NaN.
sourcepub fn is_nan_f64x4(self, a: f64x4) -> m64x4
pub fn is_nan_f64x4(self, a: f64x4) -> m64x4
Checks if the elements in each lane of a
are NaN.
sourcepub fn is_not_nan_f32x8(self, a: f32x8) -> m32x8
pub fn is_not_nan_f32x8(self, a: f32x8) -> m32x8
Checks if the elements in each lane of a
are not NaN.
sourcepub fn is_not_nan_f64x4(self, a: f64x4) -> m64x4
pub fn is_not_nan_f64x4(self, a: f64x4) -> m64x4
Checks if the elements in each lane of a
are not NaN.
sourcepub fn select_const_u32x8<const MASK8: i32>(
self,
if_true: u32x8,
if_false: u32x8
) -> u32x8
pub fn select_const_u32x8<const MASK8: i32>( self, if_true: u32x8, if_false: u32x8 ) -> u32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_const_i32x8<const MASK8: i32>(
self,
if_true: i32x8,
if_false: i32x8
) -> i32x8
pub fn select_const_i32x8<const MASK8: i32>( self, if_true: i32x8, if_false: i32x8 ) -> i32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_const_f32x8<const MASK8: i32>(
self,
if_true: f32x8,
if_false: f32x8
) -> f32x8
pub fn select_const_f32x8<const MASK8: i32>( self, if_true: f32x8, if_false: f32x8 ) -> f32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_const_u64x4<const MASK4: i32>(
self,
if_true: u64x4,
if_false: u64x4
) -> u64x4
pub fn select_const_u64x4<const MASK4: i32>( self, if_true: u64x4, if_false: u64x4 ) -> u64x4
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_const_i64x4<const MASK4: i32>(
self,
if_true: i64x4,
if_false: i64x4
) -> i64x4
pub fn select_const_i64x4<const MASK4: i32>( self, if_true: i64x4, if_false: i64x4 ) -> i64x4
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_const_f64x4<const MASK4: i32>(
self,
if_true: f64x4,
if_false: f64x4
) -> f64x4
pub fn select_const_f64x4<const MASK4: i32>( self, if_true: f64x4, if_false: f64x4 ) -> f64x4
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
bit in the mask is set, otherwise selecting elements from if_false
.
sourcepub fn select_u8x32(self, mask: m8x32, if_true: u8x32, if_false: u8x32) -> u8x32
pub fn select_u8x32(self, mask: m8x32, if_true: u8x32, if_false: u8x32) -> u8x32
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_i8x32(self, mask: m8x32, if_true: i8x32, if_false: i8x32) -> i8x32
pub fn select_i8x32(self, mask: m8x32, if_true: i8x32, if_false: i8x32) -> i8x32
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_u16x16(
self,
mask: m16x16,
if_true: u16x16,
if_false: u16x16
) -> u16x16
pub fn select_u16x16( self, mask: m16x16, if_true: u16x16, if_false: u16x16 ) -> u16x16
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_i16x16(
self,
mask: m16x16,
if_true: i16x16,
if_false: i16x16
) -> i16x16
pub fn select_i16x16( self, mask: m16x16, if_true: i16x16, if_false: i16x16 ) -> i16x16
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_u32x8(self, mask: m32x8, if_true: u32x8, if_false: u32x8) -> u32x8
pub fn select_u32x8(self, mask: m32x8, if_true: u32x8, if_false: u32x8) -> u32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_i32x8(self, mask: m32x8, if_true: i32x8, if_false: i32x8) -> i32x8
pub fn select_i32x8(self, mask: m32x8, if_true: i32x8, if_false: i32x8) -> i32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_f32x8(self, mask: m32x8, if_true: f32x8, if_false: f32x8) -> f32x8
pub fn select_f32x8(self, mask: m32x8, if_true: f32x8, if_false: f32x8) -> f32x8
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
sourcepub fn select_u64x4(self, mask: m64x4, if_true: u64x4, if_false: u64x4) -> u64x4
pub fn select_u64x4(self, mask: m64x4, if_true: u64x4, if_false: u64x4) -> u64x4
Combines if_true
and if_false
, selecting elements from if_true
if the corresponding
mask in mask
is set, otherwise selecting elements from if_false
.
Trait Implementations§
source§impl Simd for V3
impl Simd for V3
source§unsafe fn u32s_mask_load_ptr(
self,
mask: Self::m32s,
ptr: *const u32,
or: Self::u32s
) -> Self::u32s
unsafe fn u32s_mask_load_ptr( self, mask: Self::m32s, ptr: *const u32, or: Self::u32s ) -> Self::u32s
§Safety
See the trait-level safety documentation.
source§unsafe fn c32s_mask_load_ptr(
self,
mask: Self::m32s,
ptr: *const c32,
or: Self::c32s
) -> Self::c32s
unsafe fn c32s_mask_load_ptr( self, mask: Self::m32s, ptr: *const c32, or: Self::c32s ) -> Self::c32s
§Safety
See the trait-level safety documentation.
source§unsafe fn u64s_mask_load_ptr(
self,
mask: Self::m64s,
ptr: *const u64,
or: Self::u64s
) -> Self::u64s
unsafe fn u64s_mask_load_ptr( self, mask: Self::m64s, ptr: *const u64, or: Self::u64s ) -> Self::u64s
§Safety
See the trait-level safety documentation.
source§unsafe fn c64s_mask_load_ptr(
self,
mask: Self::m64s,
ptr: *const c64,
or: Self::c64s
) -> Self::c64s
unsafe fn c64s_mask_load_ptr( self, mask: Self::m64s, ptr: *const c64, or: Self::c64s ) -> Self::c64s
§Safety
See the trait-level safety documentation.
source§unsafe fn u32s_mask_store_ptr(
self,
mask: Self::m32s,
ptr: *mut u32,
values: Self::u32s
)
unsafe fn u32s_mask_store_ptr( self, mask: Self::m32s, ptr: *mut u32, values: Self::u32s )
§Safety
See the trait-level safety documentation.
source§unsafe fn c32s_mask_store_ptr(
self,
mask: Self::m32s,
ptr: *mut c32,
values: Self::c32s
)
unsafe fn c32s_mask_store_ptr( self, mask: Self::m32s, ptr: *mut c32, values: Self::c32s )
§Safety
See the trait-level safety documentation.
source§unsafe fn u64s_mask_store_ptr(
self,
mask: Self::m64s,
ptr: *mut u64,
values: Self::u64s
)
unsafe fn u64s_mask_store_ptr( self, mask: Self::m64s, ptr: *mut u64, values: Self::u64s )
§Safety
See the trait-level safety documentation.
source§unsafe fn c64s_mask_store_ptr(
self,
mask: Self::m64s,
ptr: *mut c64,
values: Self::c64s
)
unsafe fn c64s_mask_store_ptr( self, mask: Self::m64s, ptr: *mut c64, values: Self::c64s )
§Safety
See the trait-level safety documentation.
type m32s = m32x8
type f32s = f32x8
type i32s = i32x8
type u32s = u32x8
type m64s = m64x4
type f64s = f64x4
type i64s = i64x4
type u64s = u64x4
fn m32s_not(self, a: Self::m32s) -> Self::m32s
fn m32s_and(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn m32s_or(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn m32s_xor(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn m64s_not(self, a: Self::m64s) -> Self::m64s
fn m64s_and(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn m64s_or(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn m64s_xor(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn u32s_not(self, a: Self::u32s) -> Self::u32s
fn u32s_and(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn u32s_or(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn u32s_xor(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn u64s_not(self, a: Self::u64s) -> Self::u64s
fn u64s_and(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn u64s_or(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn u64s_xor(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn f32s_splat(self, value: f32) -> Self::f32s
fn f32s_add(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_sub(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_mul(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_div(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_equal(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn f32s_less_than(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn f32s_less_than_or_equal(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn f64s_splat(self, value: f64) -> Self::f64s
fn f64s_add(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_sub(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_mul(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_div(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_equal(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn f64s_less_than(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn f64s_less_than_or_equal(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn m32s_select_u32s( self, mask: Self::m32s, if_true: Self::u32s, if_false: Self::u32s ) -> Self::u32s
fn m64s_select_u64s( self, mask: Self::m64s, if_true: Self::u64s, if_false: Self::u64s ) -> Self::u64s
fn f32s_min(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_max(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f64s_min(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_max(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn u32s_splat(self, value: u32) -> Self::u32s
fn u64s_splat(self, value: u64) -> Self::u64s
fn u32s_add(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn u32s_sub(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn u64s_add(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn u64s_sub(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn f64s_mul_add_e( self, a: Self::f64s, b: Self::f64s, c: Self::f64s ) -> Self::f64s
fn f64_scalar_mul_add_e(self, a: f64, b: f64, c: f64) -> f64
fn f32s_mul_add_e( self, a: Self::f32s, b: Self::f32s, c: Self::f32s ) -> Self::f32s
fn f32_scalar_mul_add_e(self, a: f32, b: f32, c: f32) -> f32
fn vectorize<Op: WithSimd>(self, op: Op) -> Op::Output
fn f32s_reduce_sum(self, a: Self::f32s) -> f32
fn f32s_reduce_product(self, a: Self::f32s) -> f32
fn f32s_reduce_min(self, a: Self::f32s) -> f32
fn f32s_reduce_max(self, a: Self::f32s) -> f32
fn f64s_reduce_sum(self, a: Self::f64s) -> f64
fn f64s_reduce_product(self, a: Self::f64s) -> f64
fn f64s_reduce_min(self, a: Self::f64s) -> f64
fn f64s_reduce_max(self, a: Self::f64s) -> f64
type c32s = f32x8
type c64s = f64x4
fn c32s_splat(self, value: c32) -> Self::c32s
fn c32s_add(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn c32s_sub(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn c32s_mul(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn c32_scalar_mul(self, a: c32, b: c32) -> c32
fn c32_scalar_mul_add(self, a: c32, b: c32, c: c32) -> c32
fn c32_scalar_conj_mul(self, a: c32, b: c32) -> c32
fn c32_scalar_conj_mul_add(self, a: c32, b: c32, c: c32) -> c32
fn c64_scalar_mul(self, a: c64, b: c64) -> c64
fn c64_scalar_mul_add(self, a: c64, b: c64, c: c64) -> c64
fn c64_scalar_conj_mul(self, a: c64, b: c64) -> c64
fn c64_scalar_conj_mul_add(self, a: c64, b: c64, c: c64) -> c64
fn f32s_mul_add(self, a: Self::f32s, b: Self::f32s, c: Self::f32s) -> Self::f32s
fn f64s_mul_add(self, a: Self::f64s, b: Self::f64s, c: Self::f64s) -> Self::f64s
fn c64s_splat(self, value: c64) -> Self::c64s
fn c64s_add(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn c64s_sub(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn c64s_mul(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
source§fn c32s_abs2(self, a: Self::c32s) -> Self::c32s
fn c32s_abs2(self, a: Self::c32s) -> Self::c32s
source§fn c64s_abs2(self, a: Self::c64s) -> Self::c64s
fn c64s_abs2(self, a: Self::c64s) -> Self::c64s
fn u32s_partial_load(self, slice: &[u32]) -> Self::u32s
fn u32s_partial_store(self, slice: &mut [u32], values: Self::u32s)
fn u64s_partial_load(self, slice: &[u64]) -> Self::u64s
fn u64s_partial_store(self, slice: &mut [u64], values: Self::u64s)
fn c64s_partial_load(self, slice: &[c64]) -> Self::c64s
fn c64s_partial_store(self, slice: &mut [c64], values: Self::c64s)
fn u32s_partial_load_last(self, slice: &[u32]) -> Self::u32s
fn u32s_partial_store_last(self, slice: &mut [u32], values: Self::u32s)
fn u64s_partial_load_last(self, slice: &[u64]) -> Self::u64s
fn u64s_partial_store_last(self, slice: &mut [u64], values: Self::u64s)
fn c64s_partial_load_last(self, slice: &[c64]) -> Self::c64s
fn c64s_partial_store_last(self, slice: &mut [c64], values: Self::c64s)
fn c32s_conj(self, a: Self::c32s) -> Self::c32s
fn c32s_conj_mul(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn c32s_mul_add(self, a: Self::c32s, b: Self::c32s, c: Self::c32s) -> Self::c32s
fn c32s_conj_mul_add( self, a: Self::c32s, b: Self::c32s, c: Self::c32s ) -> Self::c32s
fn c64s_conj(self, a: Self::c64s) -> Self::c64s
fn c64s_conj_mul(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn c64s_mul_add(self, a: Self::c64s, b: Self::c64s, c: Self::c64s) -> Self::c64s
fn c64s_conj_mul_add( self, a: Self::c64s, b: Self::c64s, c: Self::c64s ) -> Self::c64s
fn c32s_neg(self, a: Self::c32s) -> Self::c32s
fn c32s_reduce_sum(self, a: Self::c32s) -> c32
fn c64s_neg(self, a: Self::c64s) -> Self::c64s
fn c64s_reduce_sum(self, a: Self::c64s) -> c64
fn u32s_wrapping_dyn_shl(self, a: Self::u32s, amount: Self::u32s) -> Self::u32s
fn u32s_wrapping_dyn_shr(self, a: Self::u32s, amount: Self::u32s) -> Self::u32s
fn u32s_widening_mul( self, a: Self::u32s, b: Self::u32s ) -> (Self::u32s, Self::u32s)
fn u32s_less_than(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn u32s_greater_than(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn u32s_less_than_or_equal(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn u32s_greater_than_or_equal(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn u64s_less_than(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn u64s_greater_than(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn u64s_less_than_or_equal(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn u64s_greater_than_or_equal(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn u32s_rotate_right(self, a: Self::u32s, amount: usize) -> Self::u32s
fn c32s_rotate_right(self, a: Self::c32s, amount: usize) -> Self::c32s
fn u64s_rotate_right(self, a: Self::u64s, amount: usize) -> Self::u64s
fn c64s_rotate_right(self, a: Self::c64s, amount: usize) -> Self::c64s
fn c32s_swap_re_im(self, a: Self::c32s) -> Self::c32s
fn c64s_swap_re_im(self, a: Self::c64s) -> Self::c64s
fn f32s_as_simd(slice: &[f32]) -> (&[Self::f32s], &[f32])
fn f32s_as_mut_simd(slice: &mut [f32]) -> (&mut [Self::f32s], &mut [f32])
fn f32s_as_uninit_mut_simd( slice: &mut [MaybeUninit<f32>] ) -> (&mut [MaybeUninit<Self::f32s>], &mut [MaybeUninit<f32>])
fn c32s_as_simd(slice: &[c32]) -> (&[Self::c32s], &[c32])
fn c32s_as_mut_simd(slice: &mut [c32]) -> (&mut [Self::c32s], &mut [c32])
fn c32s_as_uninit_mut_simd( slice: &mut [MaybeUninit<c32>] ) -> (&mut [MaybeUninit<Self::c32s>], &mut [MaybeUninit<c32>])
fn i32s_as_simd(slice: &[i32]) -> (&[Self::i32s], &[i32])
fn i32s_as_mut_simd(slice: &mut [i32]) -> (&mut [Self::i32s], &mut [i32])
fn i32s_as_uninit_mut_simd( slice: &mut [MaybeUninit<i32>] ) -> (&mut [MaybeUninit<Self::i32s>], &mut [MaybeUninit<i32>])
fn u32s_as_simd(slice: &[u32]) -> (&[Self::u32s], &[u32])
fn u32s_as_mut_simd(slice: &mut [u32]) -> (&mut [Self::u32s], &mut [u32])
fn u32s_as_uninit_mut_simd( slice: &mut [MaybeUninit<u32>] ) -> (&mut [MaybeUninit<Self::u32s>], &mut [MaybeUninit<u32>])
fn f64s_as_simd(slice: &[f64]) -> (&[Self::f64s], &[f64])
fn f64s_as_mut_simd(slice: &mut [f64]) -> (&mut [Self::f64s], &mut [f64])
fn f64s_as_uninit_mut_simd( slice: &mut [MaybeUninit<f64>] ) -> (&mut [MaybeUninit<Self::f64s>], &mut [MaybeUninit<f64>])
fn c64s_as_simd(slice: &[c64]) -> (&[Self::c64s], &[c64])
fn c64s_as_mut_simd(slice: &mut [c64]) -> (&mut [Self::c64s], &mut [c64])
fn c64s_as_uninit_mut_simd( slice: &mut [MaybeUninit<c64>] ) -> (&mut [MaybeUninit<Self::c64s>], &mut [MaybeUninit<c64>])
fn i64s_as_simd(slice: &[i64]) -> (&[Self::i64s], &[i64])
fn i64s_as_mut_simd(slice: &mut [i64]) -> (&mut [Self::i64s], &mut [i64])
fn i64s_as_uninit_mut_simd( slice: &mut [MaybeUninit<i64>] ) -> (&mut [MaybeUninit<Self::i64s>], &mut [MaybeUninit<i64>])
fn u64s_as_simd(slice: &[u64]) -> (&[Self::u64s], &[u64])
fn u64s_as_mut_simd(slice: &mut [u64]) -> (&mut [Self::u64s], &mut [u64])
fn u64s_as_uninit_mut_simd( slice: &mut [MaybeUninit<u64>] ) -> (&mut [MaybeUninit<Self::u64s>], &mut [MaybeUninit<u64>])
fn i32s_align_offset(self, ptr: *const i32, len: usize) -> Offset<Self::m32s>
fn f32s_align_offset(self, ptr: *const f32, len: usize) -> Offset<Self::m32s>
fn u32s_align_offset(self, ptr: *const u32, len: usize) -> Offset<Self::m32s>
fn c32s_align_offset(self, ptr: *const c32, len: usize) -> Offset<Self::m32s>
fn i64s_align_offset(self, ptr: *const i64, len: usize) -> Offset<Self::m64s>
fn f64s_align_offset(self, ptr: *const f64, len: usize) -> Offset<Self::m64s>
fn u64s_align_offset(self, ptr: *const u64, len: usize) -> Offset<Self::m64s>
fn c64s_align_offset(self, ptr: *const c64, len: usize) -> Offset<Self::m64s>
fn i32s_as_aligned_simd( self, slice: &[i32], offset: Offset<Self::m32s> ) -> (Prefix<'_, i32, Self, Self::m32s>, &[Self::i32s], Suffix<'_, i32, Self, Self::m32s>)
fn f32s_as_aligned_simd( self, slice: &[f32], offset: Offset<Self::m32s> ) -> (Prefix<'_, f32, Self, Self::m32s>, &[Self::f32s], Suffix<'_, f32, Self, Self::m32s>)
fn u32s_as_aligned_simd( self, slice: &[u32], offset: Offset<Self::m32s> ) -> (Prefix<'_, u32, Self, Self::m32s>, &[Self::u32s], Suffix<'_, u32, Self, Self::m32s>)
fn c32s_as_aligned_simd( self, slice: &[c32], offset: Offset<Self::m32s> ) -> (Prefix<'_, c32, Self, Self::m32s>, &[Self::c32s], Suffix<'_, c32, Self, Self::m32s>)
fn i64s_as_aligned_simd( self, slice: &[i64], offset: Offset<Self::m64s> ) -> (Prefix<'_, i64, Self, Self::m64s>, &[Self::i64s], Suffix<'_, i64, Self, Self::m64s>)
fn f64s_as_aligned_simd( self, slice: &[f64], offset: Offset<Self::m64s> ) -> (Prefix<'_, f64, Self, Self::m64s>, &[Self::f64s], Suffix<'_, f64, Self, Self::m64s>)
fn u64s_as_aligned_simd( self, slice: &[u64], offset: Offset<Self::m64s> ) -> (Prefix<'_, u64, Self, Self::m64s>, &[Self::u64s], Suffix<'_, u64, Self, Self::m64s>)
fn c64s_as_aligned_simd( self, slice: &[c64], offset: Offset<Self::m64s> ) -> (Prefix<'_, c64, Self, Self::m64s>, &[Self::c64s], Suffix<'_, c64, Self, Self::m64s>)
fn i32s_as_aligned_mut_simd( self, slice: &mut [i32], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, i32, Self, Self::m32s>, &mut [Self::i32s], SuffixMut<'_, i32, Self, Self::m32s>)
fn f32s_as_aligned_mut_simd( self, slice: &mut [f32], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, f32, Self, Self::m32s>, &mut [Self::f32s], SuffixMut<'_, f32, Self, Self::m32s>)
fn u32s_as_aligned_mut_simd( self, slice: &mut [u32], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, u32, Self, Self::m32s>, &mut [Self::u32s], SuffixMut<'_, u32, Self, Self::m32s>)
fn c32s_as_aligned_mut_simd( self, slice: &mut [c32], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, c32, Self, Self::m32s>, &mut [Self::c32s], SuffixMut<'_, c32, Self, Self::m32s>)
fn i64s_as_aligned_mut_simd( self, slice: &mut [i64], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, i64, Self, Self::m64s>, &mut [Self::i64s], SuffixMut<'_, i64, Self, Self::m64s>)
fn f64s_as_aligned_mut_simd( self, slice: &mut [f64], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, f64, Self, Self::m64s>, &mut [Self::f64s], SuffixMut<'_, f64, Self, Self::m64s>)
fn u64s_as_aligned_mut_simd( self, slice: &mut [u64], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, u64, Self, Self::m64s>, &mut [Self::u64s], SuffixMut<'_, u64, Self, Self::m64s>)
fn c64s_as_aligned_mut_simd( self, slice: &mut [c64], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, c64, Self, Self::m64s>, &mut [Self::c64s], SuffixMut<'_, c64, Self, Self::m64s>)
fn i32s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<i32>], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, MaybeUninit<i32>, Self, Self::m32s>, &mut [MaybeUninit<Self::i32s>], SuffixMut<'_, MaybeUninit<i32>, Self, Self::m32s>)
fn f32s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<f32>], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, MaybeUninit<f32>, Self, Self::m32s>, &mut [MaybeUninit<Self::f32s>], SuffixMut<'_, MaybeUninit<f32>, Self, Self::m32s>)
fn u32s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<u32>], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, MaybeUninit<u32>, Self, Self::m32s>, &mut [MaybeUninit<Self::u32s>], SuffixMut<'_, MaybeUninit<u32>, Self, Self::m32s>)
fn c32s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<c32>], offset: Offset<Self::m32s> ) -> (PrefixMut<'_, MaybeUninit<c32>, Self, Self::m32s>, &mut [MaybeUninit<Self::c32s>], SuffixMut<'_, MaybeUninit<c32>, Self, Self::m32s>)
fn i64s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<i64>], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, MaybeUninit<i64>, Self, Self::m64s>, &mut [MaybeUninit<Self::i64s>], SuffixMut<'_, MaybeUninit<i64>, Self, Self::m64s>)
fn f64s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<f64>], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, MaybeUninit<f64>, Self, Self::m64s>, &mut [MaybeUninit<Self::f64s>], SuffixMut<'_, MaybeUninit<f64>, Self, Self::m64s>)
fn u64s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<u64>], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, MaybeUninit<u64>, Self, Self::m64s>, &mut [MaybeUninit<Self::u64s>], SuffixMut<'_, MaybeUninit<u64>, Self, Self::m64s>)
fn c64s_as_aligned_uninit_mut_simd( self, slice: &mut [MaybeUninit<c64>], offset: Offset<Self::m64s> ) -> (PrefixMut<'_, MaybeUninit<c64>, Self, Self::m64s>, &mut [MaybeUninit<Self::c64s>], SuffixMut<'_, MaybeUninit<c64>, Self, Self::m64s>)
source§unsafe fn i32s_mask_load_ptr(
self,
mask: Self::m32s,
ptr: *const i32,
or: Self::i32s
) -> Self::i32s
unsafe fn i32s_mask_load_ptr( self, mask: Self::m32s, ptr: *const i32, or: Self::i32s ) -> Self::i32s
source§unsafe fn f32s_mask_load_ptr(
self,
mask: Self::m32s,
ptr: *const f32,
or: Self::f32s
) -> Self::f32s
unsafe fn f32s_mask_load_ptr( self, mask: Self::m32s, ptr: *const f32, or: Self::f32s ) -> Self::f32s
source§unsafe fn i32s_mask_store_ptr(
self,
mask: Self::m32s,
ptr: *mut i32,
values: Self::i32s
)
unsafe fn i32s_mask_store_ptr( self, mask: Self::m32s, ptr: *mut i32, values: Self::i32s )
source§unsafe fn f32s_mask_store_ptr(
self,
mask: Self::m32s,
ptr: *mut f32,
values: Self::f32s
)
unsafe fn f32s_mask_store_ptr( self, mask: Self::m32s, ptr: *mut f32, values: Self::f32s )
source§unsafe fn i64s_mask_load_ptr(
self,
mask: Self::m64s,
ptr: *const i64,
or: Self::i64s
) -> Self::i64s
unsafe fn i64s_mask_load_ptr( self, mask: Self::m64s, ptr: *const i64, or: Self::i64s ) -> Self::i64s
source§unsafe fn f64s_mask_load_ptr(
self,
mask: Self::m64s,
ptr: *const f64,
or: Self::f64s
) -> Self::f64s
unsafe fn f64s_mask_load_ptr( self, mask: Self::m64s, ptr: *const f64, or: Self::f64s ) -> Self::f64s
source§unsafe fn i64s_mask_store_ptr(
self,
mask: Self::m64s,
ptr: *mut i64,
values: Self::i64s
)
unsafe fn i64s_mask_store_ptr( self, mask: Self::m64s, ptr: *mut i64, values: Self::i64s )
source§unsafe fn f64s_mask_store_ptr(
self,
mask: Self::m64s,
ptr: *mut f64,
values: Self::f64s
)
unsafe fn f64s_mask_store_ptr( self, mask: Self::m64s, ptr: *mut f64, values: Self::f64s )
fn i32s_partial_load(self, slice: &[i32]) -> Self::i32s
fn i32s_partial_store(self, slice: &mut [i32], values: Self::i32s)
fn i64s_partial_load(self, slice: &[i64]) -> Self::i64s
fn i64s_partial_store(self, slice: &mut [i64], values: Self::i64s)
fn f32s_partial_load(self, slice: &[f32]) -> Self::f32s
fn f32s_partial_store(self, slice: &mut [f32], values: Self::f32s)
fn f64s_partial_load(self, slice: &[f64]) -> Self::f64s
fn f64s_partial_store(self, slice: &mut [f64], values: Self::f64s)
fn c32s_partial_load(self, slice: &[c32]) -> Self::c32s
fn c32s_partial_store(self, slice: &mut [c32], values: Self::c32s)
fn i32s_partial_load_last(self, slice: &[i32]) -> Self::i32s
fn i32s_partial_store_last(self, slice: &mut [i32], values: Self::i32s)
fn i64s_partial_load_last(self, slice: &[i64]) -> Self::i64s
fn i64s_partial_store_last(self, slice: &mut [i64], values: Self::i64s)
fn f32s_partial_load_last(self, slice: &[f32]) -> Self::f32s
fn f32s_partial_store_last(self, slice: &mut [f32], values: Self::f32s)
fn f64s_partial_load_last(self, slice: &[f64]) -> Self::f64s
fn f64s_partial_store_last(self, slice: &mut [f64], values: Self::f64s)
fn c32s_partial_load_last(self, slice: &[c32]) -> Self::c32s
fn c32s_partial_store_last(self, slice: &mut [c32], values: Self::c32s)
fn i32s_not(self, a: Self::i32s) -> Self::i32s
fn i32s_and(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn i32s_or(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn i32s_xor(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn i64s_not(self, a: Self::i64s) -> Self::i64s
fn i64s_and(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn i64s_or(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn i64s_xor(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn f32s_not(self, a: Self::f32s) -> Self::f32s
fn f32s_and(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_or(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f32s_xor(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn f64s_not(self, a: Self::f64s) -> Self::f64s
fn f64s_and(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_or(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn f64s_xor(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn m32s_select_i32s( self, mask: Self::m32s, if_true: Self::i32s, if_false: Self::i32s ) -> Self::i32s
fn m32s_select_f32s( self, mask: Self::m32s, if_true: Self::f32s, if_false: Self::f32s ) -> Self::f32s
fn m64s_select_i64s( self, mask: Self::m64s, if_true: Self::i64s, if_false: Self::i64s ) -> Self::i64s
fn m64s_select_f64s( self, mask: Self::m64s, if_true: Self::f64s, if_false: Self::f64s ) -> Self::f64s
fn i32s_splat(self, value: i32) -> Self::i32s
fn i32s_add(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn i32s_sub(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn i64s_splat(self, value: i64) -> Self::i64s
fn i64s_add(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn i64s_sub(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn f32s_abs(self, a: Self::f32s) -> Self::f32s
fn f32s_neg(self, a: Self::f32s) -> Self::f32s
fn f32_scalar_mul_add(self, a: f32, b: f32, c: f32) -> f32
fn f32s_greater_than(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn f32s_greater_than_or_equal(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn c32_scalar_mul_e(self, a: c32, b: c32) -> c32
fn c32_scalar_conj_mul_e(self, a: c32, b: c32) -> c32
source§fn c32s_mul_add_e(
self,
a: Self::c32s,
b: Self::c32s,
c: Self::c32s
) -> Self::c32s
fn c32s_mul_add_e( self, a: Self::c32s, b: Self::c32s, c: Self::c32s ) -> Self::c32s
a * b + c
fn c32_scalar_mul_add_e(self, a: c32, b: c32, c: c32) -> c32
source§fn c32s_conj_mul_add_e(
self,
a: Self::c32s,
b: Self::c32s,
c: Self::c32s
) -> Self::c32s
fn c32s_conj_mul_add_e( self, a: Self::c32s, b: Self::c32s, c: Self::c32s ) -> Self::c32s
conj(a) * b + c
fn c32_scalar_conj_mul_add_e(self, a: c32, b: c32, c: c32) -> c32
fn f64s_abs(self, a: Self::f64s) -> Self::f64s
fn f64s_neg(self, a: Self::f64s) -> Self::f64s
fn f64_scalar_mul_add(self, a: f64, b: f64, c: f64) -> f64
fn f64s_greater_than(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn f64s_greater_than_or_equal(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn c64_scalar_mul_e(self, a: c64, b: c64) -> c64
fn c64_scalar_conj_mul_e(self, a: c64, b: c64) -> c64
source§fn c64s_mul_add_e(
self,
a: Self::c64s,
b: Self::c64s,
c: Self::c64s
) -> Self::c64s
fn c64s_mul_add_e( self, a: Self::c64s, b: Self::c64s, c: Self::c64s ) -> Self::c64s
a * b + c
fn c64_scalar_mul_add_e(self, a: c64, b: c64, c: c64) -> c64
source§fn c64s_conj_mul_add_e(
self,
a: Self::c64s,
b: Self::c64s,
c: Self::c64s
) -> Self::c64s
fn c64s_conj_mul_add_e( self, a: Self::c64s, b: Self::c64s, c: Self::c64s ) -> Self::c64s
conj(a) * b + c