1use crate::Uint;
2use core::ops::{
3 BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, ShlAssign, Shr,
4 ShrAssign,
5};
6
7impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
8 #[must_use]
12 #[inline]
13 pub const fn bit(&self, index: usize) -> bool {
14 if index >= BITS {
15 return false;
16 }
17 let (limbs, bits) = (index / 64, index % 64);
18 self.limbs[limbs] & (1 << bits) != 0
19 }
20
21 #[inline]
23 pub fn set_bit(&mut self, index: usize, value: bool) {
24 if index >= BITS {
25 return;
26 }
27 let (limbs, bits) = (index / 64, index % 64);
28 if value {
29 self.limbs[limbs] |= 1 << bits;
30 } else {
31 self.limbs[limbs] &= !(1 << bits);
32 }
33 }
34
35 #[inline]
68 #[must_use]
69 #[track_caller]
70 pub const fn byte(&self, index: usize) -> u8 {
71 #[cfg(target_endian = "little")]
72 {
73 self.as_le_slice()[index]
74 }
75
76 #[cfg(target_endian = "big")]
77 #[allow(clippy::cast_possible_truncation)] {
79 (self.limbs[index / 8] >> ((index % 8) * 8)) as u8
80 }
81 }
82
83 #[inline]
87 #[must_use]
88 pub fn reverse_bits(mut self) -> Self {
89 self.limbs.reverse();
90 for limb in &mut self.limbs {
91 *limb = limb.reverse_bits();
92 }
93 if BITS % 64 != 0 {
94 self >>= 64 - BITS % 64;
95 }
96 self
97 }
98
99 #[inline]
102 #[must_use]
103 pub fn leading_zeros(&self) -> usize {
104 self.as_limbs()
105 .iter()
106 .rev()
107 .position(|&limb| limb != 0)
108 .map_or(BITS, |n| {
109 let fixed = Self::MASK.leading_zeros() as usize;
110 let skipped = n * 64;
111 let top = self.as_limbs()[LIMBS - n - 1].leading_zeros() as usize;
112 skipped + top - fixed
113 })
114 }
115
116 #[inline]
119 #[must_use]
120 pub fn leading_ones(&self) -> usize {
121 (self.not()).leading_zeros()
122 }
123
124 #[inline]
127 #[must_use]
128 pub fn trailing_zeros(&self) -> usize {
129 self.as_limbs()
130 .iter()
131 .position(|&limb| limb != 0)
132 .map_or(BITS, |n| {
133 n * 64 + self.as_limbs()[n].trailing_zeros() as usize
134 })
135 }
136
137 #[inline]
140 #[must_use]
141 pub fn trailing_ones(&self) -> usize {
142 self.as_limbs()
143 .iter()
144 .position(|&limb| limb != u64::MAX)
145 .map_or(BITS, |n| {
146 n * 64 + self.as_limbs()[n].trailing_ones() as usize
147 })
148 }
149
150 #[inline]
152 #[must_use]
153 pub fn count_ones(&self) -> usize {
154 self.as_limbs()
155 .iter()
156 .map(|limb| limb.count_ones() as usize)
157 .sum()
158 }
159
160 #[must_use]
162 #[inline]
163 pub fn count_zeros(&self) -> usize {
164 BITS - self.count_ones()
165 }
166
167 #[must_use]
169 #[inline]
170 pub fn bit_len(&self) -> usize {
171 BITS - self.leading_zeros()
172 }
173
174 #[must_use]
176 #[inline]
177 pub fn byte_len(&self) -> usize {
178 (self.bit_len() + 7) / 8
179 }
180
181 #[inline]
193 #[must_use]
194 pub fn most_significant_bits(&self) -> (u64, usize) {
195 let first_set_limb = self
196 .as_limbs()
197 .iter()
198 .rposition(|&limb| limb != 0)
199 .unwrap_or(0);
200 if first_set_limb == 0 {
201 (self.as_limbs().first().copied().unwrap_or(0), 0)
202 } else {
203 let hi = self.as_limbs()[first_set_limb];
204 let lo = self.as_limbs()[first_set_limb - 1];
205 let leading_zeros = hi.leading_zeros();
206 let bits = if leading_zeros > 0 {
207 (hi << leading_zeros) | (lo >> (64 - leading_zeros))
208 } else {
209 hi
210 };
211 let exponent = first_set_limb * 64 - leading_zeros as usize;
212 (bits, exponent)
213 }
214 }
215
216 #[inline(always)]
225 #[must_use]
226 pub fn checked_shl(self, rhs: usize) -> Option<Self> {
227 match self.overflowing_shl(rhs) {
228 (value, false) => Some(value),
229 _ => None,
230 }
231 }
232
233 #[inline(always)]
239 #[must_use]
240 pub fn saturating_shl(self, rhs: usize) -> Self {
241 match self.overflowing_shl(rhs) {
242 (value, false) => value,
243 _ => Self::MAX,
244 }
245 }
246
247 #[inline]
256 #[must_use]
257 pub fn overflowing_shl(self, rhs: usize) -> (Self, bool) {
258 let (limbs, bits) = (rhs / 64, rhs % 64);
259 if limbs >= LIMBS {
260 return (Self::ZERO, self != Self::ZERO);
261 }
262
263 let word_bits = 64;
264 let mut r = Self::ZERO;
265 let mut carry = 0;
266 for i in 0..Self::LIMBS - limbs {
267 let x = self.limbs[i];
268 r.limbs[i + limbs] = (x << bits) | carry;
269 carry = (x >> (word_bits - bits - 1)) >> 1;
270 }
271 r.limbs[LIMBS - 1] &= Self::MASK;
272 (r, carry != 0)
273 }
274
275 #[inline(always)]
282 #[must_use]
283 pub fn wrapping_shl(self, rhs: usize) -> Self {
284 self.overflowing_shl(rhs).0
285 }
286
287 #[inline(always)]
299 #[must_use]
300 pub fn checked_shr(self, rhs: usize) -> Option<Self> {
301 match self.overflowing_shr(rhs) {
302 (value, false) => Some(value),
303 _ => None,
304 }
305 }
306
307 #[inline]
320 #[must_use]
321 pub fn overflowing_shr(self, rhs: usize) -> (Self, bool) {
322 let (limbs, bits) = (rhs / 64, rhs % 64);
323 if limbs >= LIMBS {
324 return (Self::ZERO, self != Self::ZERO);
325 }
326
327 let word_bits = 64;
328 let mut r = Self::ZERO;
329 let mut carry = 0;
330 for i in 0..LIMBS - limbs {
331 let x = self.limbs[LIMBS - 1 - i];
332 r.limbs[LIMBS - 1 - i - limbs] = (x >> bits) | carry;
333 carry = (x << (word_bits - bits - 1)) << 1;
334 }
335 (r, carry != 0)
336 }
337
338 #[inline(always)]
348 #[must_use]
349 pub fn wrapping_shr(self, rhs: usize) -> Self {
350 self.overflowing_shr(rhs).0
351 }
352
353 #[inline]
355 #[must_use]
356 pub fn arithmetic_shr(self, rhs: usize) -> Self {
357 if BITS == 0 {
358 return Self::ZERO;
359 }
360 let sign = self.bit(BITS - 1);
361 let mut r = self >> rhs;
362 if sign {
363 r |= Self::MAX << BITS.saturating_sub(rhs);
364 }
365 r
366 }
367
368 #[inline]
371 #[must_use]
372 #[allow(clippy::missing_const_for_fn)] pub fn rotate_left(self, rhs: usize) -> Self {
374 if BITS == 0 {
375 return Self::ZERO;
376 }
377 let rhs = rhs % BITS;
378 self << rhs | self >> (BITS - rhs)
379 }
380
381 #[inline(always)]
384 #[must_use]
385 pub fn rotate_right(self, rhs: usize) -> Self {
386 if BITS == 0 {
387 return Self::ZERO;
388 }
389 let rhs = rhs % BITS;
390 self.rotate_left(BITS - rhs)
391 }
392}
393
394impl<const BITS: usize, const LIMBS: usize> Not for Uint<BITS, LIMBS> {
395 type Output = Self;
396
397 #[inline]
398 fn not(mut self) -> Self::Output {
399 if BITS == 0 {
400 return Self::ZERO;
401 }
402 for limb in &mut self.limbs {
403 *limb = u64::not(*limb);
404 }
405 self.limbs[LIMBS - 1] &= Self::MASK;
406 self
407 }
408}
409
410impl<const BITS: usize, const LIMBS: usize> Not for &Uint<BITS, LIMBS> {
411 type Output = Uint<BITS, LIMBS>;
412
413 #[inline]
414 fn not(self) -> Self::Output {
415 (*self).not()
416 }
417}
418
419macro_rules! impl_bit_op {
420 ($trait:ident, $fn:ident, $trait_assign:ident, $fn_assign:ident) => {
421 impl<const BITS: usize, const LIMBS: usize> $trait_assign<Uint<BITS, LIMBS>>
422 for Uint<BITS, LIMBS>
423 {
424 #[inline(always)]
425 fn $fn_assign(&mut self, rhs: Uint<BITS, LIMBS>) {
426 self.$fn_assign(&rhs);
427 }
428 }
429
430 impl<const BITS: usize, const LIMBS: usize> $trait_assign<&Uint<BITS, LIMBS>>
431 for Uint<BITS, LIMBS>
432 {
433 #[inline]
434 fn $fn_assign(&mut self, rhs: &Uint<BITS, LIMBS>) {
435 for i in 0..LIMBS {
436 u64::$fn_assign(&mut self.limbs[i], rhs.limbs[i]);
437 }
438 }
439 }
440
441 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
442 for Uint<BITS, LIMBS>
443 {
444 type Output = Uint<BITS, LIMBS>;
445
446 #[inline(always)]
447 fn $fn(mut self, rhs: Uint<BITS, LIMBS>) -> Self::Output {
448 self.$fn_assign(rhs);
449 self
450 }
451 }
452
453 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
454 for Uint<BITS, LIMBS>
455 {
456 type Output = Uint<BITS, LIMBS>;
457
458 #[inline(always)]
459 fn $fn(mut self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
460 self.$fn_assign(rhs);
461 self
462 }
463 }
464
465 impl<const BITS: usize, const LIMBS: usize> $trait<Uint<BITS, LIMBS>>
466 for &Uint<BITS, LIMBS>
467 {
468 type Output = Uint<BITS, LIMBS>;
469
470 #[inline(always)]
471 fn $fn(self, mut rhs: Uint<BITS, LIMBS>) -> Self::Output {
472 rhs.$fn_assign(self);
473 rhs
474 }
475 }
476
477 impl<const BITS: usize, const LIMBS: usize> $trait<&Uint<BITS, LIMBS>>
478 for &Uint<BITS, LIMBS>
479 {
480 type Output = Uint<BITS, LIMBS>;
481
482 #[inline(always)]
483 fn $fn(self, rhs: &Uint<BITS, LIMBS>) -> Self::Output {
484 self.clone().$fn(rhs)
485 }
486 }
487 };
488}
489
490impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign);
491impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign);
492impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign);
493
494impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
495 type Output = Self;
496
497 #[inline(always)]
498 fn shl(self, rhs: Self) -> Self::Output {
499 if BITS == 0 {
501 return self;
502 }
503 #[allow(clippy::cast_possible_truncation)]
507 self.wrapping_shl(rhs.as_limbs()[0] as usize)
508 }
509}
510
511impl<const BITS: usize, const LIMBS: usize> Shl<&Self> for Uint<BITS, LIMBS> {
512 type Output = Self;
513
514 #[inline(always)]
515 fn shl(self, rhs: &Self) -> Self::Output {
516 self << *rhs
517 }
518}
519
520impl<const BITS: usize, const LIMBS: usize> Shr<Self> for Uint<BITS, LIMBS> {
521 type Output = Self;
522
523 #[inline(always)]
524 fn shr(self, rhs: Self) -> Self::Output {
525 if BITS == 0 {
527 return self;
528 }
529 #[allow(clippy::cast_possible_truncation)]
533 self.wrapping_shr(rhs.as_limbs()[0] as usize)
534 }
535}
536
537impl<const BITS: usize, const LIMBS: usize> Shr<&Self> for Uint<BITS, LIMBS> {
538 type Output = Self;
539
540 #[inline(always)]
541 fn shr(self, rhs: &Self) -> Self::Output {
542 self >> *rhs
543 }
544}
545
546impl<const BITS: usize, const LIMBS: usize> ShlAssign<Self> for Uint<BITS, LIMBS> {
547 #[inline(always)]
548 fn shl_assign(&mut self, rhs: Self) {
549 *self = *self << rhs;
550 }
551}
552
553impl<const BITS: usize, const LIMBS: usize> ShlAssign<&Self> for Uint<BITS, LIMBS> {
554 #[inline(always)]
555 fn shl_assign(&mut self, rhs: &Self) {
556 *self = *self << rhs;
557 }
558}
559
560impl<const BITS: usize, const LIMBS: usize> ShrAssign<Self> for Uint<BITS, LIMBS> {
561 #[inline(always)]
562 fn shr_assign(&mut self, rhs: Self) {
563 *self = *self >> rhs;
564 }
565}
566
567impl<const BITS: usize, const LIMBS: usize> ShrAssign<&Self> for Uint<BITS, LIMBS> {
568 #[inline(always)]
569 fn shr_assign(&mut self, rhs: &Self) {
570 *self = *self >> rhs;
571 }
572}
573
574macro_rules! impl_shift {
575 (@main $u:ty) => {
576 impl<const BITS: usize, const LIMBS: usize> Shl<$u> for Uint<BITS, LIMBS> {
577 type Output = Self;
578
579 #[inline(always)]
580 #[allow(clippy::cast_possible_truncation)]
581 fn shl(self, rhs: $u) -> Self::Output {
582 self.wrapping_shl(rhs as usize)
583 }
584 }
585
586 impl<const BITS: usize, const LIMBS: usize> Shr<$u> for Uint<BITS, LIMBS> {
587 type Output = Self;
588
589 #[inline(always)]
590 #[allow(clippy::cast_possible_truncation)]
591 fn shr(self, rhs: $u) -> Self::Output {
592 self.wrapping_shr(rhs as usize)
593 }
594 }
595 };
596
597 (@ref $u:ty) => {
598 impl<const BITS: usize, const LIMBS: usize> Shl<&$u> for Uint<BITS, LIMBS> {
599 type Output = Self;
600
601 #[inline(always)]
602 fn shl(self, rhs: &$u) -> Self::Output {
603 <Self>::shl(self, *rhs)
604 }
605 }
606
607 impl<const BITS: usize, const LIMBS: usize> Shr<&$u> for Uint<BITS, LIMBS> {
608 type Output = Self;
609
610 #[inline(always)]
611 fn shr(self, rhs: &$u) -> Self::Output {
612 <Self>::shr(self, *rhs)
613 }
614 }
615 };
616
617 (@assign $u:ty) => {
618 impl<const BITS: usize, const LIMBS: usize> ShlAssign<$u> for Uint<BITS, LIMBS> {
619 #[inline(always)]
620 fn shl_assign(&mut self, rhs: $u) {
621 *self = *self << rhs;
622 }
623 }
624
625 impl<const BITS: usize, const LIMBS: usize> ShrAssign<$u> for Uint<BITS, LIMBS> {
626 #[inline(always)]
627 fn shr_assign(&mut self, rhs: $u) {
628 *self = *self >> rhs;
629 }
630 }
631 };
632
633 ($u:ty) => {
634 impl_shift!(@main $u);
635 impl_shift!(@ref $u);
636 impl_shift!(@assign $u);
637 impl_shift!(@assign &$u);
638 };
639
640 ($u:ty, $($tail:ty),*) => {
641 impl_shift!($u);
642 impl_shift!($($tail),*);
643 };
644}
645
646impl_shift!(usize, u8, u16, u32, isize, i8, i16, i32);
647
648#[cfg(target_pointer_width = "64")]
650impl_shift!(u64, i64);
651
652#[cfg(test)]
653mod tests {
654 use super::*;
655 use crate::{aliases::U128, const_for, nlimbs};
656 use core::cmp::min;
657 use proptest::proptest;
658
659 #[test]
660 fn test_leading_zeros() {
661 assert_eq!(Uint::<0, 0>::ZERO.leading_zeros(), 0);
662 assert_eq!(Uint::<1, 1>::ZERO.leading_zeros(), 1);
663 assert_eq!(Uint::<1, 1>::from(1).leading_zeros(), 0);
664 const_for!(BITS in NON_ZERO {
665 const LIMBS: usize = nlimbs(BITS);
666 type U = Uint::<BITS, LIMBS>;
667 assert_eq!(U::ZERO.leading_zeros(), BITS);
668 assert_eq!(U::MAX.leading_zeros(), 0);
669 assert_eq!(U::from(1).leading_zeros(), BITS - 1);
670 proptest!(|(value: U)| {
671 let zeros = value.leading_zeros();
672 assert!(zeros <= BITS);
673 assert!(zeros < BITS || value == U::ZERO);
674 if zeros < BITS {
675 let (left, overflow) = value.overflowing_shl(zeros);
676 assert!(!overflow);
677 assert!(left.leading_zeros() == 0 || value == U::ZERO);
678 assert!(left.bit(BITS - 1));
679 assert_eq!(value >> (BITS - zeros), Uint::ZERO);
680 }
681 });
682 });
683 proptest!(|(value: u128)| {
684 let uint = U128::from(value);
685 assert_eq!(uint.leading_zeros(), value.leading_zeros() as usize);
686 });
687 }
688
689 #[test]
690 fn test_leading_ones() {
691 assert_eq!(Uint::<0, 0>::ZERO.leading_ones(), 0);
692 assert_eq!(Uint::<1, 1>::ZERO.leading_ones(), 0);
693 assert_eq!(Uint::<1, 1>::from(1).leading_ones(), 1);
694 }
695
696 #[test]
697 fn test_most_significant_bits() {
698 const_for!(BITS in NON_ZERO {
699 const LIMBS: usize = nlimbs(BITS);
700 type U = Uint::<BITS, LIMBS>;
701 proptest!(|(value: u64)| {
702 let value = if U::LIMBS <= 1 { value & U::MASK } else { value };
703 assert_eq!(U::from(value).most_significant_bits(), (value, 0));
704 });
705 });
706 proptest!(|(mut limbs: [u64; 2])| {
707 if limbs[1] == 0 {
708 limbs[1] = 1;
709 }
710 let (bits, exponent) = U128::from_limbs(limbs).most_significant_bits();
711 assert!(bits >= 1_u64 << 63);
712 assert_eq!(exponent, 64 - limbs[1].leading_zeros() as usize);
713 });
714 }
715
716 #[test]
717 fn test_checked_shl() {
718 assert_eq!(
719 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(1),
720 Some(Uint::<65, 2>::from_limbs([0x0020_0000_0000_0000, 0]))
721 );
722 assert_eq!(
723 Uint::<127, 2>::from_limbs([0x0010_0000_0000_0000, 0]).checked_shl(64),
724 Some(Uint::<127, 2>::from_limbs([0, 0x0010_0000_0000_0000]))
725 );
726 }
727
728 #[test]
729 #[allow(
730 clippy::cast_lossless,
731 clippy::cast_possible_truncation,
732 clippy::cast_possible_wrap
733 )]
734 fn test_small() {
735 const_for!(BITS in [1, 2, 8, 16, 32, 63, 64] {
736 type U = Uint::<BITS, 1>;
737 proptest!(|(a: U, b: U)| {
738 assert_eq!(a | b, U::from_limbs([a.limbs[0] | b.limbs[0]]));
739 assert_eq!(a & b, U::from_limbs([a.limbs[0] & b.limbs[0]]));
740 assert_eq!(a ^ b, U::from_limbs([a.limbs[0] ^ b.limbs[0]]));
741 });
742 proptest!(|(a: U, s in 0..BITS)| {
743 assert_eq!(a << s, U::from_limbs([a.limbs[0] << s & U::MASK]));
744 assert_eq!(a >> s, U::from_limbs([a.limbs[0] >> s]));
745 });
746 });
747 proptest!(|(a: Uint::<32, 1>, s in 0_usize..=34)| {
748 assert_eq!(a.reverse_bits(), Uint::from((a.limbs[0] as u32).reverse_bits() as u64));
749 assert_eq!(a.rotate_left(s), Uint::from((a.limbs[0] as u32).rotate_left(s as u32) as u64));
750 assert_eq!(a.rotate_right(s), Uint::from((a.limbs[0] as u32).rotate_right(s as u32) as u64));
751 if s < 32 {
752 let arr_shifted = (((a.limbs[0] as i32) >> s) as u32) as u64;
753 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
754 }
755 });
756 proptest!(|(a: Uint::<64, 1>, s in 0_usize..=66)| {
757 assert_eq!(a.reverse_bits(), Uint::from(a.limbs[0].reverse_bits()));
758 assert_eq!(a.rotate_left(s), Uint::from(a.limbs[0].rotate_left(s as u32)));
759 assert_eq!(a.rotate_right(s), Uint::from(a.limbs[0].rotate_right(s as u32)));
760 if s < 64 {
761 let arr_shifted = ((a.limbs[0] as i64) >> s) as u64;
762 assert_eq!(a.arithmetic_shr(s), Uint::from_limbs([arr_shifted]));
763 }
764 });
765 }
766
767 #[test]
768 fn test_shift_reverse() {
769 const_for!(BITS in SIZES {
770 const LIMBS: usize = nlimbs(BITS);
771 type U = Uint::<BITS, LIMBS>;
772 proptest!(|(value: U, shift in 0..=BITS + 2)| {
773 let left = (value << shift).reverse_bits();
774 let right = value.reverse_bits() >> shift;
775 assert_eq!(left, right);
776 });
777 });
778 }
779
780 #[test]
781 fn test_rotate() {
782 const_for!(BITS in SIZES {
783 const LIMBS: usize = nlimbs(BITS);
784 type U = Uint::<BITS, LIMBS>;
785 proptest!(|(value: U, shift in 0..=BITS + 2)| {
786 let rotated = value.rotate_left(shift).rotate_right(shift);
787 assert_eq!(value, rotated);
788 });
789 });
790 }
791
792 #[test]
793 fn test_arithmetic_shr() {
794 const_for!(BITS in SIZES {
795 const LIMBS: usize = nlimbs(BITS);
796 type U = Uint::<BITS, LIMBS>;
797 proptest!(|(value: U, shift in 0..=BITS + 2)| {
798 let shifted = value.arithmetic_shr(shift);
799 assert_eq!(shifted.leading_ones(), match value.leading_ones() {
801 0 => 0,
802 n => min(BITS, n + shift)
803 });
804 });
805 });
806 }
807
808 #[test]
809 fn test_overflowing_shr() {
810 assert_eq!(
813 Uint::<64, 1>::from_limbs([40u64]).overflowing_shr(1),
814 (Uint::<64, 1>::from(20), false)
815 );
816
817 assert_eq!(
820 Uint::<64, 1>::from_limbs([41u64]).overflowing_shr(1),
821 (Uint::<64, 1>::from(20), true)
822 );
823
824 assert_eq!(
827 Uint::<65, 2>::from_limbs([0x0010_0000_0000_0000, 0]).overflowing_shr(1),
828 (Uint::<65, 2>::from_limbs([0x0008_0000_0000_0000, 0]), false)
829 );
830
831 assert_eq!(
835 Uint::<256, 4>::MAX.overflowing_shr(65),
836 (
837 Uint::<256, 4>::from_str_radix(
838 "7fffffffffffffffffffffffffffffffffffffffffffffff",
839 16
840 )
841 .unwrap(),
842 true
843 )
844 );
845 assert_eq!(
848 Uint::<4096, 64>::from_str_radix("3ffffffffffffffffffffffffffffc00000000", 16,)
849 .unwrap()
850 .overflowing_shr(34),
851 (
852 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
853 false
854 )
855 );
856 assert_eq!(
859 Uint::<4096, 64>::from_str_radix(
860 "fffffffffffffffffffffffffffff0000000000000000000000000",
861 16,
862 )
863 .unwrap()
864 .overflowing_shr(100),
865 (
866 Uint::<4096, 64>::from_str_radix("fffffffffffffffffffffffffffff", 16).unwrap(),
867 false
868 )
869 );
870 assert_eq!(
873 Uint::<4096, 64>::from_str_radix(
874 "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0bdbfe",
875 16,
876 )
877 .unwrap()
878 .overflowing_shr(1),
879 (
880 Uint::<4096, 64>::from_str_radix(
881 "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffff85edff",
882 16
883 )
884 .unwrap(),
885 false
886 )
887 );
888 assert_eq!(
891 Uint::<4096, 64>::from_str_radix(
892 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
893 16,
894 )
895 .unwrap()
896 .overflowing_shr(1000),
897 (
898 Uint::<4096, 64>::from_str_radix(
899 "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
900 16
901 )
902 .unwrap(),
903 false
904 )
905 );
906 assert_eq!(
909 Uint::<4096, 64>::MAX
910 .overflowing_shr(34),
911 (
912 Uint::<4096, 64>::from_str_radix(
913 "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
914 16
915 )
916 .unwrap(),
917 true
918 )
919 );
920 }
921}