use crate::Uint;
use core::ops::{
BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, ShlAssign, Shr,
ShrAssign,
};
impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
#[must_use]
#[inline]
pub const fn bit(&self, index: usize) -> bool {
if index >= BITS {
return false;
}
let (limbs, bits) = (index / 64, index % 64);
self.limbs[limbs] & (1 << bits) != 0
}
#[inline]
pub fn set_bit(&mut self, index: usize, value: bool) {
if index >= BITS {
return;
}
let (limbs, bits) = (index / 64, index % 64);
if value {
self.limbs[limbs] |= 1 << bits;
} else {
self.limbs[limbs] &= !(1 << bits);
}
}
#[inline]
#[must_use]
#[track_caller]
pub const fn byte(&self, index: usize) -> u8 {
#[cfg(target_endian = "little")]
{
self.as_le_slice()[index]
}
#[cfg(target_endian = "big")]
#[allow(clippy::cast_possible_truncation)] {
(self.limbs[index / 8] >> ((index % 8) * 8)) as u8
}
}
#[inline]
#[must_use]
pub fn reverse_bits(mut self) -> Self {
self.limbs.reverse();
for limb in &mut self.limbs {
*limb = limb.reverse_bits();
}
if BITS % 64 != 0 {
self >>= 64 - BITS % 64;
}
self
}
#[inline]
#[must_use]
pub fn leading_zeros(&self) -> usize {
self.as_limbs()
.iter()
.rev()
.position(|&limb| limb != 0)
.map_or(BITS, |n| {
let fixed = Self::MASK.leading_zeros() as usize;
let skipped = n * 64;
let top = self.as_limbs()[LIMBS - n - 1].leading_zeros() as usize;
skipped + top - fixed
})
}
#[inline]
#[must_use]
pub fn leading_ones(&self) -> usize {
(self.not()).leading_zeros()
}
#[inline]
#[must_use]
pub fn trailing_zeros(&self) -> usize {
self.as_limbs()
.iter()
.position(|&limb| limb != 0)
.map_or(BITS, |n| {
n * 64 + self.as_limbs()[n].trailing_zeros() as usize
})
}
#[inline]
#[must_use]
pub fn trailing_ones(&self) -> usize {
self.as_limbs()
.iter()
.position(|&limb| limb != u64::MAX)
.map_or(BITS, |n| {
n * 64 + self.as_limbs()[n].trailing_ones() as usize
})
}
#[inline]
#[must_use]
pub fn count_ones(&self) -> usize {
self.as_limbs()
.iter()
.map(|limb| limb.count_ones() as usize)
.sum()
}
#[must_use]
#[inline]
pub fn count_zeros(&self) -> usize {
BITS - self.count_ones()
}
#[must_use]
#[inline]
pub fn bit_len(&self) -> usize {
BITS - self.leading_zeros()
}
#[must_use]
#[inline]
pub fn byte_len(&self) -> usize {
(self.bit_len() + 7) / 8
}
#[inline]
#[must_use]
pub fn most_significant_bits(&self) -> (u64, usize) {
let first_set_limb = self
.as_limbs()
.iter()
.rposition(|&limb| limb != 0)
.unwrap_or(0);
if first_set_limb == 0 {
(self.as_limbs().first().copied().unwrap_or(0), 0)
} else {
let hi = self.as_limbs()[first_set_limb];
let lo = self.as_limbs()[first_set_limb - 1];
let leading_zeros = hi.leading_zeros();
let bits = if leading_zeros > 0 {
(hi << leading_zeros) | (lo >> (64 - leading_zeros))
} else {
hi
};
let exponent = first_set_limb * 64 - leading_zeros as usize;
(bits, exponent)
}
}
#[inline(always)]
#[must_use]
pub fn checked_shl(self, rhs: usize) -> Option<Self> {
match self.overflowing_shl(rhs) {
(value, false) => Some(value),
_ => None,
}
}
#[inline(always)]
#[must_use]
pub fn saturating_shl(self, rhs: usize) -> Self {
match self.overflowing_shl(rhs) {
(value, false) => value,
_ => Self::MAX,
}
}
#[inline]
#[must_use]
pub fn overflowing_shl(mut self, rhs: usize) -> (Self, bool) {
let (limbs, bits) = (rhs / 64, rhs % 64);
if limbs >= LIMBS {
return (Self::ZERO, self != Self::ZERO);
}
if bits == 0 {
let mut overflow = false;
for i in (LIMBS - limbs)..LIMBS {
overflow |= self.limbs[i] != 0;
}
if self.limbs[LIMBS - limbs - 1] > Self::MASK {
overflow = true;
}
for i in (limbs..LIMBS).rev() {
assume!(i >= limbs && i - limbs < LIMBS);
self.limbs[i] = self.limbs[i - limbs];
}
self.limbs[..limbs].fill(0);
self.limbs[LIMBS - 1] &= Self::MASK;
return (self, overflow);
}
let mut overflow = false;
for i in (LIMBS - limbs)..LIMBS {
overflow |= self.limbs[i] != 0;
}
if self.limbs[LIMBS - limbs - 1] >> (64 - bits) != 0 {
overflow = true;
}
if self.limbs[LIMBS - limbs - 1] << bits > Self::MASK {
overflow = true;
}
for i in (limbs + 1..LIMBS).rev() {
assume!(i - limbs < LIMBS && i - limbs - 1 < LIMBS);
self.limbs[i] = self.limbs[i - limbs] << bits;
self.limbs[i] |= self.limbs[i - limbs - 1] >> (64 - bits);
}
self.limbs[limbs] = self.limbs[0] << bits;
self.limbs[..limbs].fill(0);
self.limbs[LIMBS - 1] &= Self::MASK;
(self, overflow)
}
#[inline(always)]
#[must_use]
pub fn wrapping_shl(self, rhs: usize) -> Self {
self.overflowing_shl(rhs).0
}
#[inline(always)]
#[must_use]
pub fn checked_shr(self, rhs: usize) -> Option<Self> {
match self.overflowing_shr(rhs) {
(value, false) => Some(value),
_ => None,
}
}
#[inline]
#[must_use]
pub fn overflowing_shr(mut self, rhs: usize) -> (Self, bool) {
let (limbs, bits) = (rhs / 64, rhs % 64);
if limbs >= LIMBS {
return (Self::ZERO, self != Self::ZERO);
}
if bits == 0 {
let mut overflow = false;
for i in 0..limbs {
overflow |= self.limbs[i] != 0;
}
for i in 0..(LIMBS - limbs) {
self.limbs[i] = self.limbs[i + limbs];
}
self.limbs[LIMBS - limbs..].fill(0);
return (self, overflow);
}
let overflow = self.limbs[LIMBS - limbs - 1] >> (bits - 1) & 1 != 0;
for i in 0..(LIMBS - limbs - 1) {
assume!(i + limbs < LIMBS && i + limbs + 1 < LIMBS);
self.limbs[i] = self.limbs[i + limbs] >> bits;
self.limbs[i] |= self.limbs[i + limbs + 1] << (64 - bits);
}
self.limbs[LIMBS - limbs - 1] = self.limbs[LIMBS - 1] >> bits;
self.limbs[LIMBS - limbs..].fill(0);
(self, overflow)
}
#[inline(always)]
#[must_use]
pub fn wrapping_shr(self, rhs: usize) -> Self {
self.overflowing_shr(rhs).0
}
#[inline]
#[must_use]
pub fn arithmetic_shr(self, rhs: usize) -> Self {
if BITS == 0 {
return Self::ZERO;
}
let sign = self.bit(BITS - 1);
let mut r = self >> rhs;
if sign {
r |= Self::MAX << BITS.saturating_sub(rhs);
}
r
}
#[inline]
#[must_use]
#[allow(clippy::missing_const_for_fn)] pub fn rotate_left(self, rhs: usize) -> Self {
if BITS == 0 {
return Self::ZERO;
}
let rhs = rhs % BITS;
self << rhs | self >> (BITS - rhs)
}
#[inline(always)]
#[must_use]
pub fn rotate_right(self, rhs: usize) -> Self {
if BITS == 0 {
return Self::ZERO;
}
let rhs = rhs % BITS;
self.rotate_left(BITS - rhs)
}
}
impl<const BITS: usize, const LIMBS: usize> Not for Uint<BITS, LIMBS> {
type Output = Self;
#[inline]
fn not(mut self) -> Self::Output {
if BITS == 0 {
return Self::ZERO;
}
for limb in &mut self.limbs {
*limb = u64::not(*limb);
}
self.limbs[LIMBS - 1] &= Self::MASK;
self
}
}
impl<const BITS: usize, const LIMBS: usize> Not for &Uint<BITS, LIMBS> {
type Output = Uint<BITS, LIMBS>;
#[inline]
fn not(self) -> Self::Output {
(*self).not()
}
}
macro_rules! impl_bit_op {
($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
impl<const BITS: usize, const LIMBS: usize> $trait_assign<Uint<BITS, LIMBS>>
for Uint<BITS, LIMBS>
{
#[inline(always)]
fn $fn_assign(&mut self, rhs: Uint<BITS, LIMBS>) {
self.$fn_assign(&rhs);
}
}
impl<const BITS: usize, const LIMBS: usize> $trait_assign<&Uint<BITS, LIMBS>>
for Uint<BITS, LIMBS>
{
#[inline]
fn $fn_assign(&mut self, rhs: &Uint<BITS, LIMBS>) {
for i in 0..LIMBS {
u64::$fn_assign(&mut self.limbs[i], rhs.limbs[i]);
}
}
}
impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
for Uint<BITS, LIMBS>
{
type Output = Uint<BITS, LIMBS>;
#[inline(always)]
fn $fn(mut self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
self.$fn_assign(rhs);
self
}
}
impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
for Uint<BITS, LIMBS>
{
type Output = Uint<BITS, LIMBS>;
#[inline(always)]
fn $fn(mut self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self.$fn_assign(rhs);
self
}
}
impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
for &Uint<BITS, LIMBS>
{
type Output = Uint<BITS, LIMBS>;
#[inline(always)]
fn $fn(self, mut rhs: Uint<BITS, LIMBS>) -> Self::Output {
rhs.$fn_assign(self);
rhs
}
}
impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
for &Uint<BITS, LIMBS>
{
type Output = Uint<BITS, LIMBS>;
#[inline(always)]
fn $fn(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
self.clone().$fn(rhs)
}
}
};
}
impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign);
impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);
impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shl(self, rhs: Self) -> Self::Output {
if BITS == 0 {
return self;
}
#[allow(clippy::cast_possible_truncation)]
self.wrapping_shl(rhs.as_limbs()[0] as usize)
}
}
impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shl(self, rhs: &Self) -> Self::Output {
self << *rhs
}
}
impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shr(self, rhs: Self) -> Self::Output {
if BITS == 0 {
return self;
}
#[allow(clippy::cast_possible_truncation)]
self.wrapping_shr(rhs.as_limbs()[0] as usize)
}
}
impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shr(self, rhs: &Self) -> Self::Output {
self >> *rhs
}
}
impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: Self) {
*self = *self << rhs;
}
}
impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: &Self) {
*self = *self << rhs;
}
}
impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: Self) {
*self = *self >> rhs;
}
}
impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: &Self) {
*self = *self >> rhs;
}
}
macro_rules! impl_shift {
(@main $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
#[allow(clippy::cast_possible_truncation)]
fn shl(self, rhs: $u) -> Self::Output {
self.wrapping_shl(rhs as usize)
}
}
impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
#[allow(clippy::cast_possible_truncation)]
fn shr(self, rhs: $u) -> Self::Output {
self.wrapping_shr(rhs as usize)
}
}
};
(@ref $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shl(self, rhs: &$u) -> Self::Output {
<Self>::shl(self, *rhs)
}
}
impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
type Output = Self;
#[inline(always)]
fn shr(self, rhs: &$u) -> Self::Output {
<Self>::shr(self, *rhs)
}
}
};
(@assign $u:ty) => {
impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shl_assign(&mut self, rhs: $u) {
*self = *self << rhs;
}
}
impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
#[inline(always)]
fn shr_assign(&mut self, rhs: $u) {
*self = *self >> rhs;
}
}
};
($u:ty) => {
impl_shift!(@main $u);
impl_shift!(@ref $u);
impl_shift!(@assign $u);
impl_shift!(@assign &$u);
};
($u:ty, $($tail:ty),*) => {
impl_shift!($u);
impl_shift!($($tail),*);
};
}
impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);
#[cfg(target_pointer_width = "64")]
impl_shift!(u64, i64);
#[cfg(test)]
mod tests {
use super::*;
use crate::{aliases::U128, const_for, nlimbs};
use core::cmp::min;
use proptest::proptest;
#[test]
fn test_leading_zeros() {
assert_eq!(Uint::<0, 0>::ZERO.leading_zeros(), 0);
assert_eq!(Uint::<1, 1>::ZERO.leading_zeros(), 1);
assert_eq!(Uint::<1, 1>::from(1).leading_zeros(), 0);
const_for!(BITS in NON_ZERO {
const LIMBS: usize = nlimbs(BITS);
type U = Uint::<BITS, LIMBS>;
assert_eq!(U::ZERO.leading_zeros(), BITS);
assert_eq!(U::MAX.leading_zeros(), 0);
assert_eq!(U::from(1).leading_zeros(), BITS - 1);
proptest!(|(value: U)| {
let zeros = value.leading_zeros();
assert!(zeros <= BITS);
assert!(zeros < BITS || value == U::ZERO);
if zeros < BITS {
let (left, overflow) = value.overflowing_shl(zeros);
assert!(!overflow);
assert!(left.leading_zeros() == 0 || value == U::ZERO);
assert!(left.bit(BITS - 1));
assert_eq!(value >> (BITS - zeros), Uint::ZERO);
}
});
});
proptest!(|(value: u128)| {
let uint = U128::from(value);
assert_eq!(uint.leading_zeros(), value.leading_zeros() as usize);
});
}
#[test]
fn test_leading_ones() {
assert_eq!(Uint::<0, 0>::ZERO.leading_ones(), 0);
assert_eq!(Uint::<1, 1>::ZERO.leading_ones(), 0);
assert_eq!(Uint::<1, 1>::from(1).leading_ones(), 1);
}
#[test]
fn test_most_significant_bits() {
const_for!(BITS in NON_ZERO {
const LIMBS: usize = nlimbs(BITS);
type U = Uint::<BITS, LIMBS>;
proptest!(|(value: u64)| {
let value = if U::LIMBS <= 1 { value & U::MASK } else { value };
assert_eq!(U::from(value).most_significant_bits(), (value, 0));
});
});
proptest!(|(mut limbs: [u64; 2])| {
if limbs[1] == 0 {
limbs[1] = 1;
}
let (bits, exponent) = U128::from_limbs(limbs).most_significant_bits();
assert!(bits >= 1_u64 << 63);
assert_eq!(exponent, 64 - limbs[1].leading_zeros() as usize);
});
}
#[test]
fn test_checked_shl() {
assert_eq!(
Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(1),
Some(Uint::<65, 2>::from_limbs([0x0020_0000_0000_0000, 0]))
);
assert_eq!(
Uint::<127, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(64),
Some(Uint::<127, 2>::from_limbs([0, 0x0010_0000_0000_0000]))
);
}
#[test]
#[allow(clippy::cast_lossless, clippy::cast_possible_truncation)]
fn test_small() {
const_for!(BITS in [1, 2, 8, 16, 32, 63, 64] {
type U = Uint::<BITS, 1>;
proptest!(|(a: U, b: U)| {
assert_eq!(a | b, U::from_limbs([a.limbs[0] | b.limbs[0]]));
assert_eq!(a & b, U::from_limbs([a.limbs[0] & b.limbs[0]]));
assert_eq!(a ^ b, U::from_limbs([a.limbs[0] ^ b.limbs[0]]));
});
proptest!(|(a: U, s in 0..BITS)| {
assert_eq!(a << s, U::from_limbs([a.limbs[0] << s & U::MASK]));
assert_eq!(a >> s, U::from_limbs([a.limbs[0] >> s]));
});
});
proptest!(|(a: Uint::<32, 1>, s in 0_usize..=34)| {
assert_eq!(a.reverse_bits(), Uint::from((a.limbs[0] as u32).reverse_bits() as u64));
assert_eq!(a.rotate_left(s), Uint::from((a.limbs[0] as u32).rotate_left(s as u32) as u64));
assert_eq!(a.rotate_right(s), Uint::from((a.limbs[0] as u32).rotate_right(s as u32) as u64));
if s < 32 {
let arr_shifted = (((a.limbs[0] as i32) >> s) as u32) as u64;
assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
}
});
proptest!(|(a: Uint::<64, 1>, s in 0_usize..=66)| {
assert_eq!(a.reverse_bits(), Uint::from(a.limbs[0].reverse_bits()));
assert_eq!(a.rotate_left(s), Uint::from(a.limbs[0].rotate_left(s as u32)));
assert_eq!(a.rotate_right(s), Uint::from(a.limbs[0].rotate_right(s as u32)));
if s < 64 {
let arr_shifted = ((a.limbs[0] as i64) >> s) as u64;
assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
}
});
}
#[test]
fn test_shift_reverse() {
const_for!(BITS in SIZES {
const LIMBS: usize = nlimbs(BITS);
type U = Uint::<BITS, LIMBS>;
proptest!(|(value: U, shift in 0..=BITS + 2)| {
let left = (value << shift).reverse_bits();
let right = value.reverse_bits() >> shift;
assert_eq!(left, right);
});
});
}
#[test]
fn test_rotate() {
const_for!(BITS in SIZES {
const LIMBS: usize = nlimbs(BITS);
type U = Uint::<BITS, LIMBS>;
proptest!(|(value: U, shift in 0..=BITS + 2)| {
let rotated = value.rotate_left(shift).rotate_right(shift);
assert_eq!(value, rotated);
});
});
}
#[test]
fn test_arithmetic_shr() {
const_for!(BITS in SIZES {
const LIMBS: usize = nlimbs(BITS);
type U = Uint::<BITS, LIMBS>;
proptest!(|(value: U, shift in 0..=BITS + 2)| {
let shifted = value.arithmetic_shr(shift);
dbg!(value, shifted, shift);
assert_eq!(shifted.leading_ones(), match value.leading_ones() {
0 => 0,
n => min(BITS, n + shift)
});
});
});
}
#[test]
fn test_overflowing_shr() {
assert_eq!(
Uint::<64, 1>::from_limbs([40u64]).overflowing_shr(1),
(Uint::<64, 1>::from(20), false)
);
assert_eq!(
Uint::<64, 1>::from_limbs([41u64]).overflowing_shr(1),
(Uint::<64, 1>::from(20), true)
);
assert_eq!(
Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).overflowing_shr(1),
(Uint::<65, 2>::from_limbs([0x0080_0000_0000_000, 0]), false)
);
assert_eq!(
Uint::<256, 4>::MAX.overflowing_shr(65),
(
Uint::<256, 4>::from_str_radix(
"7fffffffffffffffffffffffffffffffffffffffffffffff",
16
)
.unwrap(),
true
)
);
assert_eq!(
Uint::<4096, 64>::from_str_radix("3ffffffffffffffffffffffffffffc00000000", 16,)
.unwrap()
.overflowing_shr(34),
(
Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
false
)
);
assert_eq!(
Uint::<4096, 64>::from_str_radix(
"fffffffffffffffffffffffffffff0000000000000000000000000",
16,
)
.unwrap()
.overflowing_shr(100),
(
Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
false
)
);
assert_eq!(
Uint::<4096, 64>::from_str_radix(
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0bdbfe",
16,
)
.unwrap()
.overflowing_shr(1),
(
Uint::<4096, 64>::from_str_radix(
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffff85edff",
16
)
.unwrap(),
false
)
);
assert_eq!(
Uint::<4096, 64>::from_str_radix(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
16,
)
.unwrap()
.overflowing_shr(1000),
(
Uint::<4096, 64>::from_str_radix(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
16
)
.unwrap(),
false
)
);
assert_eq!(
Uint::<4096, 64>::MAX
.overflowing_shr(34),
(
Uint::<4096, 64>::from_str_radix(
"3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
16
)
.unwrap(),
true
)
);
}
}