Implement core::ops (#10)
* Add vector-vector arithmetic ops * Add operators and integer conversions for masks * Add unary traits * Implement Index and IndexMut * Implement by-ref ops for masks * Document intrinsics * Implement format traits for masks * Add floating point ops tests * Add integer tests * Add mask tests
This commit is contained in:
39
crates/core_simd/src/intrinsics.rs
Normal file
39
crates/core_simd/src/intrinsics.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
//! This module contains the LLVM intrinsics bindings that provide the functionality for this
|
||||
//! crate.
|
||||
//!
|
||||
//! The LLVM assembly language is documented here: https://llvm.org/docs/LangRef.html
|
||||
|
||||
/// These intrinsics aren't linked directly from LLVM and are mostly undocumented, however they are
|
||||
/// simply lowered to the matching LLVM instructions by the compiler. The associated instruction
|
||||
/// is documented alongside each intrinsic.
|
||||
extern "platform-intrinsic" {
|
||||
/// add/fadd
|
||||
pub(crate) fn simd_add<T>(x: T, y: T) -> T;
|
||||
|
||||
/// sub/fsub
|
||||
pub(crate) fn simd_sub<T>(x: T, y: T) -> T;
|
||||
|
||||
/// mul/fmul
|
||||
pub(crate) fn simd_mul<T>(x: T, y: T) -> T;
|
||||
|
||||
/// udiv/sdiv/fdiv
|
||||
pub(crate) fn simd_div<T>(x: T, y: T) -> T;
|
||||
|
||||
/// urem/srem/frem
|
||||
pub(crate) fn simd_rem<T>(x: T, y: T) -> T;
|
||||
|
||||
/// shl
|
||||
pub(crate) fn simd_shl<T>(x: T, y: T) -> T;
|
||||
|
||||
/// lshr/ashr
|
||||
pub(crate) fn simd_shr<T>(x: T, y: T) -> T;
|
||||
|
||||
/// and
|
||||
pub(crate) fn simd_and<T>(x: T, y: T) -> T;
|
||||
|
||||
/// or
|
||||
pub(crate) fn simd_or<T>(x: T, y: T) -> T;
|
||||
|
||||
/// xor
|
||||
pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
#![no_std]
|
||||
#![feature(repr_simd)]
|
||||
#![feature(repr_simd, platform_intrinsics)]
|
||||
#![warn(missing_docs)]
|
||||
//! Portable SIMD module.
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
mod macros;
|
||||
|
||||
mod fmt;
|
||||
mod intrinsics;
|
||||
mod ops;
|
||||
|
||||
mod masks;
|
||||
pub use masks::*;
|
||||
|
||||
@@ -1,3 +1,13 @@
|
||||
/// The error type returned when converting an integer to a mask fails.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct TryFromMaskError(());
|
||||
|
||||
impl core::fmt::Display for TryFromMaskError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
write!(f, "mask must have all bits set or unset")
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! define_mask {
|
||||
{ $(#[$attr:meta])* struct $name:ident($type:ty); } => {
|
||||
$(#[$attr])*
|
||||
@@ -34,11 +44,52 @@ macro_rules! define_mask {
|
||||
}
|
||||
}
|
||||
|
||||
impl core::convert::TryFrom<$type> for $name {
|
||||
type Error = TryFromMaskError;
|
||||
fn try_from(value: $type) -> Result<Self, Self::Error> {
|
||||
if value == 0 || !value == 0 {
|
||||
Ok(Self(value))
|
||||
} else {
|
||||
Err(TryFromMaskError(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::convert::From<$name> for $type {
|
||||
fn from(value: $name) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for $name {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
self.test().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Binary for $name {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
<$type as core::fmt::Binary>::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Octal for $name {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
<$type as core::fmt::Octal>::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::LowerHex for $name {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
<$type as core::fmt::LowerHex>::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::UpperHex for $name {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||
<$type as core::fmt::UpperHex>::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
628
crates/core_simd/src/ops.rs
Normal file
628
crates/core_simd/src/ops.rs
Normal file
@@ -0,0 +1,628 @@
|
||||
/// Checks if the right-hand side argument of a left- or right-shift would cause overflow.
|
||||
fn invalid_shift_rhs<T>(rhs: T) -> bool
|
||||
where
|
||||
T: Default + PartialOrd + core::convert::TryFrom<usize>,
|
||||
<T as core::convert::TryFrom<usize>>::Error: core::fmt::Debug,
|
||||
{
|
||||
let bits_in_type = T::try_from(8 * core::mem::size_of::<T>()).unwrap();
|
||||
rhs < T::default() || rhs >= bits_in_type
|
||||
}
|
||||
|
||||
/// Automatically implements operators over references in addition to the provided operator.
|
||||
macro_rules! impl_ref_ops {
|
||||
// binary op
|
||||
{
|
||||
impl core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
||||
type Output = $output:ty;
|
||||
|
||||
$(#[$attrs:meta])*
|
||||
fn $fn:ident($self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) -> Self::Output $body:tt
|
||||
}
|
||||
} => {
|
||||
impl core::ops::$trait<$rhs> for $type {
|
||||
type Output = $output;
|
||||
|
||||
$(#[$attrs])*
|
||||
fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
|
||||
}
|
||||
|
||||
impl core::ops::$trait<&'_ $rhs> for $type {
|
||||
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
||||
|
||||
$(#[$attrs])*
|
||||
fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
|
||||
core::ops::$trait::$fn($self_tok, *$rhs_arg)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::ops::$trait<$rhs> for &'_ $type {
|
||||
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
||||
|
||||
$(#[$attrs])*
|
||||
fn $fn($self_tok, $rhs_arg: $rhs) -> Self::Output {
|
||||
core::ops::$trait::$fn(*$self_tok, $rhs_arg)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::ops::$trait<&'_ $rhs> for &'_ $type {
|
||||
type Output = <$type as core::ops::$trait<$rhs>>::Output;
|
||||
|
||||
$(#[$attrs])*
|
||||
fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
|
||||
core::ops::$trait::$fn(*$self_tok, *$rhs_arg)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// binary assignment op
|
||||
{
|
||||
impl core::ops::$trait:ident<$rhs:ty> for $type:ty {
|
||||
$(#[$attrs:meta])*
|
||||
fn $fn:ident(&mut $self_tok:ident, $rhs_arg:ident: $rhs_arg_ty:ty) $body:tt
|
||||
}
|
||||
} => {
|
||||
impl core::ops::$trait<$rhs> for $type {
|
||||
$(#[$attrs])*
|
||||
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
|
||||
}
|
||||
|
||||
impl core::ops::$trait<&'_ $rhs> for $type {
|
||||
$(#[$attrs])*
|
||||
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
|
||||
core::ops::$trait::$fn($self_tok, *$rhs_arg)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// unary op
|
||||
{
|
||||
impl core::ops::$trait:ident for $type:ty {
|
||||
type Output = $output:ty;
|
||||
fn $fn:ident($self_tok:ident) -> Self::Output $body:tt
|
||||
}
|
||||
} => {
|
||||
impl core::ops::$trait for $type {
|
||||
type Output = $output;
|
||||
fn $fn($self_tok) -> Self::Output $body
|
||||
}
|
||||
|
||||
impl core::ops::$trait for &'_ $type {
|
||||
type Output = <$type as core::ops::$trait>::Output;
|
||||
fn $fn($self_tok) -> Self::Output {
|
||||
core::ops::$trait::$fn(*$self_tok)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implements op traits for masks
|
||||
macro_rules! impl_mask_ops {
|
||||
{ $($mask:ty),* } => {
|
||||
$(
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitAnd<$mask> for $mask {
|
||||
type Output = Self;
|
||||
fn bitand(self, rhs: Self) -> Self::Output {
|
||||
Self(self.0 & rhs.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitAndAssign<$mask> for $mask {
|
||||
fn bitand_assign(&mut self, rhs: Self) {
|
||||
*self = *self & rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitOr<$mask> for $mask {
|
||||
type Output = Self;
|
||||
fn bitor(self, rhs: Self) -> Self::Output {
|
||||
Self(self.0 | rhs.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitOrAssign<$mask> for $mask {
|
||||
fn bitor_assign(&mut self, rhs: Self) {
|
||||
*self = *self | rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitXor<$mask> for $mask {
|
||||
type Output = Self;
|
||||
fn bitxor(self, rhs: Self) -> Self::Output {
|
||||
Self(self.0 ^ rhs.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::BitXorAssign<$mask> for $mask {
|
||||
fn bitxor_assign(&mut self, rhs: Self) {
|
||||
*self = *self ^ rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Not for $mask {
|
||||
type Output = Self;
|
||||
fn not(self) -> Self::Output {
|
||||
Self(!self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
impl_mask_ops! { crate::mask8, crate::mask16, crate::mask32, crate::mask64, crate::mask128, crate::masksize }
|
||||
|
||||
/// Automatically implements operators over vectors and scalars for a particular vector.
|
||||
macro_rules! impl_op {
|
||||
{ impl Add for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Add::add, AddAssign::add_assign, simd_add }
|
||||
};
|
||||
{ impl Sub for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Sub::sub, SubAssign::sub_assign, simd_sub }
|
||||
};
|
||||
{ impl Mul for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Mul::mul, MulAssign::mul_assign, simd_mul }
|
||||
};
|
||||
{ impl Div for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Div::div, DivAssign::div_assign, simd_div }
|
||||
};
|
||||
{ impl Rem for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Rem::rem, RemAssign::rem_assign, simd_rem }
|
||||
};
|
||||
{ impl Shl for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Shl::shl, ShlAssign::shl_assign, simd_shl }
|
||||
};
|
||||
{ impl Shr for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, Shr::shr, ShrAssign::shr_assign, simd_shr }
|
||||
};
|
||||
{ impl BitAnd for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, BitAnd::bitand, BitAndAssign::bitand_assign, simd_and }
|
||||
};
|
||||
{ impl BitOr for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, BitOr::bitor, BitOrAssign::bitor_assign, simd_or }
|
||||
};
|
||||
{ impl BitXor for $type:ty, $scalar:ty } => {
|
||||
impl_op! { @binary $type, $scalar, BitXor::bitxor, BitXorAssign::bitxor_assign, simd_xor }
|
||||
};
|
||||
|
||||
{ impl Not for $type:ty, $scalar:ty } => {
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Not for $type {
|
||||
type Output = Self;
|
||||
fn not(self) -> Self::Output {
|
||||
self ^ <$type>::splat(!<$scalar>::default())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
{ impl Neg for $type:ty, $scalar:ty } => {
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Neg for $type {
|
||||
type Output = Self;
|
||||
fn neg(self) -> Self::Output {
|
||||
<$type>::splat(-<$scalar>::default()) - self
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
{ impl Index for $type:ty, $scalar:ty } => {
|
||||
impl<I> core::ops::Index<I> for $type
|
||||
where
|
||||
I: core::slice::SliceIndex<[$scalar]>,
|
||||
{
|
||||
type Output = I::Output;
|
||||
fn index(&self, index: I) -> &Self::Output {
|
||||
let slice: &[_] = self.as_ref();
|
||||
&slice[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> core::ops::IndexMut<I> for $type
|
||||
where
|
||||
I: core::slice::SliceIndex<[$scalar]>,
|
||||
{
|
||||
fn index_mut(&mut self, index: I) -> &mut Self::Output {
|
||||
let slice: &mut [_] = self.as_mut();
|
||||
&mut slice[index]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// generic binary op with assignment when output is `Self`
|
||||
{ @binary $type:ty, $scalar:ty, $trait:ident :: $trait_fn:ident, $assign_trait:ident :: $assign_trait_fn:ident, $intrinsic:ident } => {
|
||||
impl_ref_ops! {
|
||||
impl core::ops::$trait<$type> for $type {
|
||||
type Output = $type;
|
||||
|
||||
#[inline]
|
||||
fn $trait_fn(self, rhs: $type) -> Self::Output {
|
||||
unsafe {
|
||||
crate::intrinsics::$intrinsic(self, rhs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::$trait<$scalar> for $type {
|
||||
type Output = $type;
|
||||
|
||||
#[inline]
|
||||
fn $trait_fn(self, rhs: $scalar) -> Self::Output {
|
||||
core::ops::$trait::$trait_fn(self, <$type>::splat(rhs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::$trait<$type> for $scalar {
|
||||
type Output = $type;
|
||||
|
||||
#[inline]
|
||||
fn $trait_fn(self, rhs: $type) -> Self::Output {
|
||||
core::ops::$trait::$trait_fn(<$type>::splat(self), rhs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::$assign_trait<$type> for $type {
|
||||
#[inline]
|
||||
fn $assign_trait_fn(&mut self, rhs: $type) {
|
||||
unsafe {
|
||||
*self = crate::intrinsics::$intrinsic(*self, rhs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::$assign_trait<$scalar> for $type {
|
||||
#[inline]
|
||||
fn $assign_trait_fn(&mut self, rhs: $scalar) {
|
||||
core::ops::$assign_trait::$assign_trait_fn(self, <$type>::splat(rhs));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Implements floating-point operators for the provided types.
|
||||
macro_rules! impl_float_ops {
|
||||
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
||||
$( // scalar
|
||||
$( // vector
|
||||
impl_op! { impl Add for $vector, $scalar }
|
||||
impl_op! { impl Sub for $vector, $scalar }
|
||||
impl_op! { impl Mul for $vector, $scalar }
|
||||
impl_op! { impl Div for $vector, $scalar }
|
||||
impl_op! { impl Rem for $vector, $scalar }
|
||||
impl_op! { impl Neg for $vector, $scalar }
|
||||
impl_op! { impl Index for $vector, $scalar }
|
||||
)*
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
/// Implements mask operators for the provided types.
|
||||
macro_rules! impl_mask_ops {
|
||||
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
||||
$( // scalar
|
||||
$( // vector
|
||||
impl_op! { impl BitAnd for $vector, $scalar }
|
||||
impl_op! { impl BitOr for $vector, $scalar }
|
||||
impl_op! { impl BitXor for $vector, $scalar }
|
||||
impl_op! { impl Not for $vector, $scalar }
|
||||
impl_op! { impl Index for $vector, $scalar }
|
||||
)*
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
/// Implements unsigned integer operators for the provided types.
|
||||
macro_rules! impl_unsigned_int_ops {
|
||||
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
||||
$( // scalar
|
||||
$( // vector
|
||||
impl_op! { impl Add for $vector, $scalar }
|
||||
impl_op! { impl Sub for $vector, $scalar }
|
||||
impl_op! { impl Mul for $vector, $scalar }
|
||||
impl_op! { impl BitAnd for $vector, $scalar }
|
||||
impl_op! { impl BitOr for $vector, $scalar }
|
||||
impl_op! { impl BitXor for $vector, $scalar }
|
||||
impl_op! { impl Not for $vector, $scalar }
|
||||
impl_op! { impl Index for $vector, $scalar }
|
||||
|
||||
// Integers panic on divide by 0
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Div<$vector> for $vector {
|
||||
type Output = Self;
|
||||
|
||||
#[inline]
|
||||
fn div(self, rhs: $vector) -> Self::Output {
|
||||
// TODO there is probably a better way of doing this
|
||||
if AsRef::<[$scalar]>::as_ref(&rhs)
|
||||
.iter()
|
||||
.any(|x| *x == 0)
|
||||
{
|
||||
panic!("attempt to divide by zero");
|
||||
}
|
||||
unsafe { crate::intrinsics::simd_div(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Div<$scalar> for $vector {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn div(self, rhs: $scalar) -> Self::Output {
|
||||
if rhs == 0 {
|
||||
panic!("attempt to divide by zero");
|
||||
}
|
||||
let rhs = Self::splat(rhs);
|
||||
unsafe { crate::intrinsics::simd_div(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Div<$vector> for $scalar {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn div(self, rhs: $vector) -> Self::Output {
|
||||
<$vector>::splat(self) / rhs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::DivAssign<$vector> for $vector {
|
||||
#[inline]
|
||||
fn div_assign(&mut self, rhs: Self) {
|
||||
*self = *self / rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::DivAssign<$scalar> for $vector {
|
||||
#[inline]
|
||||
fn div_assign(&mut self, rhs: $scalar) {
|
||||
*self = *self / rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remainder panics on zero divisor
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Rem<$vector> for $vector {
|
||||
type Output = Self;
|
||||
|
||||
#[inline]
|
||||
fn rem(self, rhs: $vector) -> Self::Output {
|
||||
// TODO there is probably a better way of doing this
|
||||
if AsRef::<[$scalar]>::as_ref(&rhs)
|
||||
.iter()
|
||||
.any(|x| *x == 0)
|
||||
{
|
||||
panic!("attempt to calculate the remainder with a divisor of zero");
|
||||
}
|
||||
unsafe { crate::intrinsics::simd_rem(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Rem<$scalar> for $vector {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn rem(self, rhs: $scalar) -> Self::Output {
|
||||
if rhs == 0 {
|
||||
panic!("attempt to calculate the remainder with a divisor of zero");
|
||||
}
|
||||
let rhs = Self::splat(rhs);
|
||||
unsafe { crate::intrinsics::simd_rem(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Rem<$vector> for $scalar {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn rem(self, rhs: $vector) -> Self::Output {
|
||||
<$vector>::splat(self) % rhs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::RemAssign<$vector> for $vector {
|
||||
#[inline]
|
||||
fn rem_assign(&mut self, rhs: Self) {
|
||||
*self = *self % rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::RemAssign<$scalar> for $vector {
|
||||
#[inline]
|
||||
fn rem_assign(&mut self, rhs: $scalar) {
|
||||
*self = *self % rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shifts panic on overflow
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Shl<$vector> for $vector {
|
||||
type Output = Self;
|
||||
|
||||
#[inline]
|
||||
fn shl(self, rhs: $vector) -> Self::Output {
|
||||
// TODO there is probably a better way of doing this
|
||||
if AsRef::<[$scalar]>::as_ref(&rhs)
|
||||
.iter()
|
||||
.copied()
|
||||
.any(invalid_shift_rhs)
|
||||
{
|
||||
panic!("attempt to shift left with overflow");
|
||||
}
|
||||
unsafe { crate::intrinsics::simd_shl(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Shl<$scalar> for $vector {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn shl(self, rhs: $scalar) -> Self::Output {
|
||||
if invalid_shift_rhs(rhs) {
|
||||
panic!("attempt to shift left with overflow");
|
||||
}
|
||||
let rhs = Self::splat(rhs);
|
||||
unsafe { crate::intrinsics::simd_shl(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::ShlAssign<$vector> for $vector {
|
||||
#[inline]
|
||||
fn shl_assign(&mut self, rhs: Self) {
|
||||
*self = *self << rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::ShlAssign<$scalar> for $vector {
|
||||
#[inline]
|
||||
fn shl_assign(&mut self, rhs: $scalar) {
|
||||
*self = *self << rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Shr<$vector> for $vector {
|
||||
type Output = Self;
|
||||
|
||||
#[inline]
|
||||
fn shr(self, rhs: $vector) -> Self::Output {
|
||||
// TODO there is probably a better way of doing this
|
||||
if AsRef::<[$scalar]>::as_ref(&rhs)
|
||||
.iter()
|
||||
.copied()
|
||||
.any(invalid_shift_rhs)
|
||||
{
|
||||
panic!("attempt to shift with overflow");
|
||||
}
|
||||
unsafe { crate::intrinsics::simd_shr(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::Shr<$scalar> for $vector {
|
||||
type Output = $vector;
|
||||
|
||||
#[inline]
|
||||
fn shr(self, rhs: $scalar) -> Self::Output {
|
||||
if invalid_shift_rhs(rhs) {
|
||||
panic!("attempt to shift with overflow");
|
||||
}
|
||||
let rhs = Self::splat(rhs);
|
||||
unsafe { crate::intrinsics::simd_shr(self, rhs) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::ShrAssign<$vector> for $vector {
|
||||
#[inline]
|
||||
fn shr_assign(&mut self, rhs: Self) {
|
||||
*self = *self >> rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl_ref_ops! {
|
||||
impl core::ops::ShrAssign<$scalar> for $vector {
|
||||
#[inline]
|
||||
fn shr_assign(&mut self, rhs: $scalar) {
|
||||
*self = *self >> rhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
/// Implements unsigned integer operators for the provided types.
|
||||
macro_rules! impl_signed_int_ops {
|
||||
{ $($scalar:ty => $($vector:ty),*;)* } => {
|
||||
impl_unsigned_int_ops! { $($scalar => $($vector),*;)* }
|
||||
$( // scalar
|
||||
$( // vector
|
||||
impl_op! { impl Neg for $vector, $scalar }
|
||||
)*
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_unsigned_int_ops! {
|
||||
u8 => crate::u8x8, crate::u8x16, crate::u8x32, crate::u8x64;
|
||||
u16 => crate::u16x4, crate::u16x8, crate::u16x16, crate::u16x32;
|
||||
u32 => crate::u32x2, crate::u32x4, crate::u32x8, crate::u32x16;
|
||||
u64 => crate::u64x2, crate::u64x4, crate::u64x8;
|
||||
u128 => crate::u128x2, crate::u128x4;
|
||||
usize => crate::usizex2, crate::usizex4, crate::usizex8;
|
||||
}
|
||||
|
||||
impl_signed_int_ops! {
|
||||
i8 => crate::i8x8, crate::i8x16, crate::i8x32, crate::i8x64;
|
||||
i16 => crate::i16x4, crate::i16x8, crate::i16x16, crate::i16x32;
|
||||
i32 => crate::i32x2, crate::i32x4, crate::i32x8, crate::i32x16;
|
||||
i64 => crate::i64x2, crate::i64x4, crate::i64x8;
|
||||
i128 => crate::i128x2, crate::i128x4;
|
||||
isize => crate::isizex2, crate::isizex4, crate::isizex8;
|
||||
}
|
||||
|
||||
impl_float_ops! {
|
||||
f32 => crate::f32x2, crate::f32x4, crate::f32x8, crate::f32x16;
|
||||
f64 => crate::f64x2, crate::f64x4, crate::f64x8;
|
||||
}
|
||||
|
||||
impl_mask_ops! {
|
||||
crate::mask8 => crate::mask8x8, crate::mask8x16, crate::mask8x32, crate::mask8x64;
|
||||
crate::mask16 => crate::mask16x4, crate::mask16x8, crate::mask16x16, crate::mask16x32;
|
||||
crate::mask32 => crate::mask32x2, crate::mask32x4, crate::mask32x8, crate::mask32x16;
|
||||
crate::mask64 => crate::mask64x2, crate::mask64x4, crate::mask64x8;
|
||||
crate::mask128 => crate::mask128x2, crate::mask128x4;
|
||||
crate::masksize => crate::masksizex2, crate::masksizex4, crate::masksizex8;
|
||||
}
|
||||
Reference in New Issue
Block a user