|
|
|
|
@@ -12,21 +12,21 @@ where
|
|
|
|
|
macro_rules! impl_ref_ops {
|
|
|
|
|
// binary op
|
|
|
|
|
{
|
|
|
|
|
impl core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
|
|
|
|
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
|
|
|
|
type Output = $output:ty;
|
|
|
|
|
|
|
|
|
|
$(#[$attrs:meta])*
|
|
|
|
|
fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt
|
|
|
|
|
}
|
|
|
|
|
} => {
|
|
|
|
|
impl core::ops::$trait<$rhs> for $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type {
|
|
|
|
|
type Output = $output;
|
|
|
|
|
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl core::ops::$trait<&'_ $rhs> for $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type {
|
|
|
|
|
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
|
|
|
|
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
@@ -35,7 +35,7 @@ macro_rules! impl_ref_ops {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl core::ops::$trait<$rhs> for &'_ $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type {
|
|
|
|
|
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
|
|
|
|
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
@@ -44,7 +44,7 @@ macro_rules! impl_ref_ops {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl core::ops::$trait<&'_ $rhs> for &'_ $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type {
|
|
|
|
|
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
|
|
|
|
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
@@ -56,17 +56,17 @@ macro_rules! impl_ref_ops {
|
|
|
|
|
|
|
|
|
|
// binary assignment op
|
|
|
|
|
{
|
|
|
|
|
impl core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
|
|
|
|
impl<const $lanes:ident: usize> core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
|
|
|
|
$(#[$attrs:meta])*
|
|
|
|
|
fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
|
|
|
|
|
}
|
|
|
|
|
} => {
|
|
|
|
|
impl core::ops::$trait<$rhs> for $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<$rhs> for $type {
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl core::ops::$trait<&'_ $rhs> for $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type {
|
|
|
|
|
$(#[$attrs])*
|
|
|
|
|
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
|
|
|
|
|
core::ops::$trait::$fn($self_tok, *$rhs_arg)
|
|
|
|
|
@@ -76,17 +76,17 @@ macro_rules! impl_ref_ops {
|
|
|
|
|
|
|
|
|
|
// unary op
|
|
|
|
|
{
|
|
|
|
|
impl core::ops::$trait:ident for $type:ty {
|
|
|
|
|
impl<const $lanes:ident: usize> core::ops::$trait:ident for $type:ty {
|
|
|
|
|
type Output = $output:ty;
|
|
|
|
|
fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
|
|
|
|
|
}
|
|
|
|
|
} => {
|
|
|
|
|
impl core::ops::$trait for $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait for $type {
|
|
|
|
|
type Output = $output;
|
|
|
|
|
fn $fn($self_tok) -> Self::Output $body
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl core::ops::$trait for &'_ $type {
|
|
|
|
|
impl<const $lanes: usize> core::ops::$trait for &'_ $type {
|
|
|
|
|
type Output = <$type as core::ops::$trait>::Output;
|
|
|
|
|
fn $fn($self_tok) -> Self::Output {
|
|
|
|
|
core::ops::$trait::$fn(*$self_tok)
|
|
|
|
|
@@ -95,152 +95,76 @@ macro_rules! impl_ref_ops {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Implements op traits for masks
|
|
|
|
|
macro_rules! impl_mask_element_ops {
|
|
|
|
|
{ $($mask:ty),* } => {
|
|
|
|
|
$(
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitAnd<$mask> for $mask {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn bitand(self, rhs: Self) -> Self::Output {
|
|
|
|
|
Self(self.0 & rhs.0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitAndAssign<$mask> for $mask {
|
|
|
|
|
fn bitand_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self & rhs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitOr<$mask> for $mask {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn bitor(self, rhs: Self) -> Self::Output {
|
|
|
|
|
Self(self.0 | rhs.0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitOrAssign<$mask> for $mask {
|
|
|
|
|
fn bitor_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self | rhs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitXor<$mask> for $mask {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn bitxor(self, rhs: Self) -> Self::Output {
|
|
|
|
|
Self(self.0 ^ rhs.0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::BitXorAssign<$mask> for $mask {
|
|
|
|
|
fn bitxor_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self ^ rhs;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Not for $mask {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn not(self) -> Self::Output {
|
|
|
|
|
Self(!self.0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
)*
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_mask_element_ops! {
|
|
|
|
|
crate::masks::wide::m8,
|
|
|
|
|
crate::masks::wide::m16,
|
|
|
|
|
crate::masks::wide::m32,
|
|
|
|
|
crate::masks::wide::m64,
|
|
|
|
|
crate::masks::wide::m128,
|
|
|
|
|
crate::masks::wide::msize
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Automatically implements operators over vectors and scalars for a particular vector.
|
|
|
|
|
macro_rules! impl_op {
|
|
|
|
|
{ impl Add for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Add for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Add::add, AddAssign::add_assign, simd_add }
|
|
|
|
|
};
|
|
|
|
|
{ impl Sub for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Sub for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Sub::sub, SubAssign::sub_assign, simd_sub }
|
|
|
|
|
};
|
|
|
|
|
{ impl Mul for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Mul for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Mul::mul, MulAssign::mul_assign, simd_mul }
|
|
|
|
|
};
|
|
|
|
|
{ impl Div for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Div for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Div::div, DivAssign::div_assign, simd_div }
|
|
|
|
|
};
|
|
|
|
|
{ impl Rem for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Rem for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Rem::rem, RemAssign::rem_assign, simd_rem }
|
|
|
|
|
};
|
|
|
|
|
{ impl Shl for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Shl for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Shl::shl, ShlAssign::shl_assign, simd_shl }
|
|
|
|
|
};
|
|
|
|
|
{ impl Shr for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Shr for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, Shr::shr, ShrAssign::shr_assign, simd_shr }
|
|
|
|
|
};
|
|
|
|
|
{ impl BitAnd for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl BitAnd for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, BitAnd::bitand, BitAndAssign::bitand_assign, simd_and }
|
|
|
|
|
};
|
|
|
|
|
{ impl BitOr for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl BitOr for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, BitOr::bitor, BitOrAssign::bitor_assign, simd_or }
|
|
|
|
|
};
|
|
|
|
|
{ impl BitXor for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl BitXor for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_op! { @binary $type, $scalar, BitXor::bitxor, BitXorAssign::bitxor_assign, simd_xor }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
{ impl Not for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Not for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Not for $type {
|
|
|
|
|
impl<const LANES: usize> core::ops::Not for crate::$type<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn not(self) -> Self::Output {
|
|
|
|
|
self ^ <$type>::splat(!<$scalar>::default())
|
|
|
|
|
self ^ Self::splat(!<$scalar>::default())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
{ impl Neg for $type:ty, $scalar:ty } => {
|
|
|
|
|
{ impl Neg for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Neg for $type {
|
|
|
|
|
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn neg(self) -> Self::Output {
|
|
|
|
|
<$type>::splat(0) - self
|
|
|
|
|
Self::splat(0) - self
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
{ impl Neg for $type:ty, $scalar:ty, @float } => {
|
|
|
|
|
{ impl Neg for $type:ident, $scalar:ty, @float } => {
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Neg for $type {
|
|
|
|
|
impl<const LANES: usize> core::ops::Neg for crate::$type<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
fn neg(self) -> Self::Output {
|
|
|
|
|
// FIXME: Replace this with fneg intrinsic once available.
|
|
|
|
|
// https://github.com/rust-lang/stdsimd/issues/32
|
|
|
|
|
Self::from_bits(<$type>::splat(-0.0).to_bits() ^ self.to_bits())
|
|
|
|
|
Self::from_bits(Self::splat(-0.0).to_bits() ^ self.to_bits())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
{ impl Index for $type:ty, $scalar:ty } => {
|
|
|
|
|
impl<I> core::ops::Index<I> for $type
|
|
|
|
|
{ impl Index for $type:ident, $scalar:ty } => {
|
|
|
|
|
impl<I, const LANES: usize> core::ops::Index<I> for crate::$type<LANES>
|
|
|
|
|
where
|
|
|
|
|
I: core::slice::SliceIndex<[$scalar]>,
|
|
|
|
|
{
|
|
|
|
|
@@ -251,7 +175,7 @@ macro_rules! impl_op {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<I> core::ops::IndexMut<I> for $type
|
|
|
|
|
impl<I, const LANES: usize> core::ops::IndexMut<I> for crate::$type<LANES>
|
|
|
|
|
where
|
|
|
|
|
I: core::slice::SliceIndex<[$scalar]>,
|
|
|
|
|
{
|
|
|
|
|
@@ -263,13 +187,13 @@ macro_rules! impl_op {
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// generic binary op with assignment when output is `Self`
|
|
|
|
|
{ @binary $type:ty, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
|
|
|
|
|
{ @binary $type:ident, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::$trait<$type> for $type {
|
|
|
|
|
type Output = $type;
|
|
|
|
|
impl<const LANES: usize> core::ops::$trait<Self> for crate::$type<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $trait_fn(self, rhs: $type) -> Self::Output {
|
|
|
|
|
fn $trait_fn(self, rhs: Self) -> Self::Output {
|
|
|
|
|
unsafe {
|
|
|
|
|
crate::intrinsics::$intrinsic(self, rhs)
|
|
|
|
|
}
|
|
|
|
|
@@ -278,31 +202,31 @@ macro_rules! impl_op {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::$trait<$scalar> for $type {
|
|
|
|
|
type Output = $type;
|
|
|
|
|
impl<const LANES: usize> core::ops::$trait<$scalar> for crate::$type<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $trait_fn(self, rhs: $scalar) -> Self::Output {
|
|
|
|
|
core::ops::$trait::$trait_fn(self, <$type>::splat(rhs))
|
|
|
|
|
core::ops::$trait::$trait_fn(self, Self::splat(rhs))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::$trait<$type> for $scalar {
|
|
|
|
|
type Output = $type;
|
|
|
|
|
impl<const LANES: usize> core::ops::$trait<crate::$type<LANES>> for $scalar {
|
|
|
|
|
type Output = crate::$type<LANES>;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $trait_fn(self, rhs: $type) -> Self::Output {
|
|
|
|
|
core::ops::$trait::$trait_fn(<$type>::splat(self), rhs)
|
|
|
|
|
fn $trait_fn(self, rhs: crate::$type<LANES>) -> Self::Output {
|
|
|
|
|
core::ops::$trait::$trait_fn(crate::$type::splat(self), rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::$assign_trait<$type> for $type {
|
|
|
|
|
impl<const LANES: usize> core::ops::$assign_trait<Self> for crate::$type<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $assign_trait_fn(&mut self, rhs: $type) {
|
|
|
|
|
fn $assign_trait_fn(&mut self, rhs: Self) {
|
|
|
|
|
unsafe {
|
|
|
|
|
*self = crate::intrinsics::$intrinsic(*self, rhs);
|
|
|
|
|
}
|
|
|
|
|
@@ -311,10 +235,10 @@ macro_rules! impl_op {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::$assign_trait<$scalar> for $type {
|
|
|
|
|
impl<const LANES: usize> core::ops::$assign_trait<$scalar> for crate::$type<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $assign_trait_fn(&mut self, rhs: $scalar) {
|
|
|
|
|
core::ops::$assign_trait::$assign_trait_fn(self, <$type>::splat(rhs));
|
|
|
|
|
core::ops::$assign_trait::$assign_trait_fn(self, Self::splat(rhs));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
@@ -323,7 +247,7 @@ macro_rules! impl_op {
|
|
|
|
|
|
|
|
|
|
/// Implements floating-point operators for the provided types.
|
|
|
|
|
macro_rules! impl_float_ops {
|
|
|
|
|
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
|
|
|
|
{ $($scalar:ty => $($vector:ident),*;)* } => {
|
|
|
|
|
$( // scalar
|
|
|
|
|
$( // vector
|
|
|
|
|
impl_op! { impl Add for $vector, $scalar }
|
|
|
|
|
@@ -340,7 +264,7 @@ macro_rules! impl_float_ops {
|
|
|
|
|
|
|
|
|
|
/// Implements mask operators for the provided types.
|
|
|
|
|
macro_rules! impl_mask_ops {
|
|
|
|
|
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
|
|
|
|
{ $($scalar:ty => $($vector:ident),*;)* } => {
|
|
|
|
|
$( // scalar
|
|
|
|
|
$( // vector
|
|
|
|
|
impl_op! { impl BitAnd for $vector, $scalar }
|
|
|
|
|
@@ -355,7 +279,7 @@ macro_rules! impl_mask_ops {
|
|
|
|
|
|
|
|
|
|
/// Implements unsigned integer operators for the provided types.
|
|
|
|
|
macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
|
|
|
|
{ $($scalar:ty => $($vector:ident),*;)* } => {
|
|
|
|
|
$( // scalar
|
|
|
|
|
$( // vector
|
|
|
|
|
impl_op! { impl Add for $vector, $scalar }
|
|
|
|
|
@@ -369,11 +293,11 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
// Integers panic on divide by 0
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Div<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::Div<Self> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn div(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
fn div(self, rhs: Self) -> Self::Output {
|
|
|
|
|
// TODO there is probably a better way of doing this
|
|
|
|
|
if AsRef::<[$scalar]>::as_ref(&rhs)
|
|
|
|
|
.iter()
|
|
|
|
|
@@ -387,8 +311,8 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Div<$scalar> for $vector {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Div<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn div(self, rhs: $scalar) -> Self::Output {
|
|
|
|
|
@@ -402,18 +326,18 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Div<$vector> for $scalar {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Div<crate::$vector<LANES>> for $scalar {
|
|
|
|
|
type Output = crate::$vector<LANES>;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn div(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
<$vector>::splat(self) / rhs
|
|
|
|
|
fn div(self, rhs: crate::$vector<LANES>) -> Self::Output {
|
|
|
|
|
crate::$vector::splat(self) / rhs
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::DivAssign<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::DivAssign<Self> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn div_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self / rhs;
|
|
|
|
|
@@ -422,7 +346,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::DivAssign<$scalar> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::DivAssign<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn div_assign(&mut self, rhs: $scalar) {
|
|
|
|
|
*self = *self / rhs;
|
|
|
|
|
@@ -432,11 +356,11 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
// remainder panics on zero divisor
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Rem<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::Rem<Self> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rem(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
fn rem(self, rhs: Self) -> Self::Output {
|
|
|
|
|
// TODO there is probably a better way of doing this
|
|
|
|
|
if AsRef::<[$scalar]>::as_ref(&rhs)
|
|
|
|
|
.iter()
|
|
|
|
|
@@ -450,8 +374,8 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Rem<$scalar> for $vector {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Rem<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rem(self, rhs: $scalar) -> Self::Output {
|
|
|
|
|
@@ -465,18 +389,18 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Rem<$vector> for $scalar {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Rem<crate::$vector<LANES>> for $scalar {
|
|
|
|
|
type Output = crate::$vector<LANES>;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rem(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
<$vector>::splat(self) % rhs
|
|
|
|
|
fn rem(self, rhs: crate::$vector<LANES>) -> Self::Output {
|
|
|
|
|
crate::$vector::splat(self) % rhs
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::RemAssign<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::RemAssign<Self> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rem_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self % rhs;
|
|
|
|
|
@@ -485,7 +409,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::RemAssign<$scalar> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::RemAssign<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn rem_assign(&mut self, rhs: $scalar) {
|
|
|
|
|
*self = *self % rhs;
|
|
|
|
|
@@ -495,11 +419,11 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
// shifts panic on overflow
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Shl<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::Shl<Self> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shl(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
fn shl(self, rhs: Self) -> Self::Output {
|
|
|
|
|
// TODO there is probably a better way of doing this
|
|
|
|
|
if AsRef::<[$scalar]>::as_ref(&rhs)
|
|
|
|
|
.iter()
|
|
|
|
|
@@ -514,8 +438,8 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Shl<$scalar> for $vector {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Shl<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shl(self, rhs: $scalar) -> Self::Output {
|
|
|
|
|
@@ -530,7 +454,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::ShlAssign<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::ShlAssign<Self> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shl_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self << rhs;
|
|
|
|
|
@@ -539,7 +463,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::ShlAssign<$scalar> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::ShlAssign<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shl_assign(&mut self, rhs: $scalar) {
|
|
|
|
|
*self = *self << rhs;
|
|
|
|
|
@@ -548,13 +472,13 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Shr<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::Shr<Self> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shr(self, rhs: $vector) -> Self::Output {
|
|
|
|
|
fn shr(self, rhs: Self) -> Self::Output {
|
|
|
|
|
// TODO there is probably a better way of doing this
|
|
|
|
|
if AsRef::<[$scalar]>::as_ref(&rhs)
|
|
|
|
|
if rhs.as_slice()
|
|
|
|
|
.iter()
|
|
|
|
|
.copied()
|
|
|
|
|
.any(invalid_shift_rhs)
|
|
|
|
|
@@ -567,8 +491,8 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::Shr<$scalar> for $vector {
|
|
|
|
|
type Output = $vector;
|
|
|
|
|
impl<const LANES: usize> core::ops::Shr<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shr(self, rhs: $scalar) -> Self::Output {
|
|
|
|
|
@@ -583,7 +507,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::ShrAssign<$vector> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::ShrAssign<Self> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shr_assign(&mut self, rhs: Self) {
|
|
|
|
|
*self = *self >> rhs;
|
|
|
|
|
@@ -592,7 +516,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_ref_ops! {
|
|
|
|
|
impl core::ops::ShrAssign<$scalar> for $vector {
|
|
|
|
|
impl<const LANES: usize> core::ops::ShrAssign<$scalar> for crate::$vector<LANES> {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn shr_assign(&mut self, rhs: $scalar) {
|
|
|
|
|
*self = *self >> rhs;
|
|
|
|
|
@@ -606,7 +530,7 @@ macro_rules! impl_unsigned_int_ops {
|
|
|
|
|
|
|
|
|
|
/// Implements unsigned integer operators for the provided types.
|
|
|
|
|
macro_rules! impl_signed_int_ops {
|
|
|
|
|
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
|
|
|
|
{ $($scalar:ty => $($vector:ident),*;)* } => {
|
|
|
|
|
impl_unsigned_int_ops! { $($scalar => $($vector),*;)* }
|
|
|
|
|
$( // scalar
|
|
|
|
|
$( // vector
|
|
|
|
|
@@ -617,33 +541,24 @@ macro_rules! impl_signed_int_ops {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_unsigned_int_ops! {
|
|
|
|
|
u8 => crate::u8x8, crate::u8x16, crate::u8x32, crate::u8x64;
|
|
|
|
|
u16 => crate::u16x4, crate::u16x8, crate::u16x16, crate::u16x32;
|
|
|
|
|
u32 => crate::u32x2, crate::u32x4, crate::u32x8, crate::u32x16;
|
|
|
|
|
u64 => crate::u64x2, crate::u64x4, crate::u64x8;
|
|
|
|
|
u128 => crate::u128x2, crate::u128x4;
|
|
|
|
|
usize => crate::usizex2, crate::usizex4, crate::usizex8;
|
|
|
|
|
u8 => SimdU8;
|
|
|
|
|
u16 => SimdU16;
|
|
|
|
|
u32 => SimdU32;
|
|
|
|
|
u64 => SimdU64;
|
|
|
|
|
u128 => SimdU128;
|
|
|
|
|
usize => SimdUsize;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_signed_int_ops! {
|
|
|
|
|
i8 => crate::i8x8, crate::i8x16, crate::i8x32, crate::i8x64;
|
|
|
|
|
i16 => crate::i16x4, crate::i16x8, crate::i16x16, crate::i16x32;
|
|
|
|
|
i32 => crate::i32x2, crate::i32x4, crate::i32x8, crate::i32x16;
|
|
|
|
|
i64 => crate::i64x2, crate::i64x4, crate::i64x8;
|
|
|
|
|
i128 => crate::i128x2, crate::i128x4;
|
|
|
|
|
isize => crate::isizex2, crate::isizex4, crate::isizex8;
|
|
|
|
|
i8 => SimdI8;
|
|
|
|
|
i16 => SimdI16;
|
|
|
|
|
i32 => SimdI32;
|
|
|
|
|
i64 => SimdI64;
|
|
|
|
|
i128 => SimdI128;
|
|
|
|
|
isize => SimdIsize;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_float_ops! {
|
|
|
|
|
f32 => crate::f32x2, crate::f32x4, crate::f32x8, crate::f32x16;
|
|
|
|
|
f64 => crate::f64x2, crate::f64x4, crate::f64x8;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_mask_ops! {
|
|
|
|
|
crate::masks::wide::m8 => crate::masks::wide::m8x8, crate::masks::wide::m8x16, crate::masks::wide::m8x32, crate::masks::wide::m8x64;
|
|
|
|
|
crate::masks::wide::m16 => crate::masks::wide::m16x4, crate::masks::wide::m16x8, crate::masks::wide::m16x16, crate::masks::wide::m16x32;
|
|
|
|
|
crate::masks::wide::m32 => crate::masks::wide::m32x2, crate::masks::wide::m32x4, crate::masks::wide::m32x8, crate::masks::wide::m32x16;
|
|
|
|
|
crate::masks::wide::m64 => crate::masks::wide::m64x2, crate::masks::wide::m64x4, crate::masks::wide::m64x8;
|
|
|
|
|
crate::masks::wide::m128 => crate::masks::wide::m128x2, crate::masks::wide::m128x4;
|
|
|
|
|
crate::masks::wide::msize => crate::masks::wide::msizex2, crate::masks::wide::msizex4, crate::masks::wide::msizex8;
|
|
|
|
|
f32 => SimdF32;
|
|
|
|
|
f64 => SimdF64;
|
|
|
|
|
}
|
|
|
|
|
|