bitcode/
pack_ints.rs

1use crate::coder::Result;
2use crate::consume::{consume_byte, consume_byte_arrays};
3use crate::error::error;
4use crate::fast::CowSlice;
5use crate::pack::{invalid_packing, pack_bytes, unpack_bytes};
6use crate::Error;
7use alloc::vec::Vec;
8use bytemuck::Pod;
9
10/// Possible integer sizes in descending order.
11/// TODO consider nonstandard sizes like 24.
12#[repr(u8)]
13#[derive(Copy, Clone, PartialEq, PartialOrd)]
14enum Packing {
15    _128 = 0,
16    _64,
17    _32,
18    _16,
19    _8,
20}
21
22impl Packing {
23    fn new<T: SizedUInt>(max: T) -> Self {
24        let max: u128 = max.try_into().unwrap_or_else(|_| unreachable!()); // From<usize> isn't implemented for u128.
25        #[allow(clippy::match_overlapping_arm)] // Just make sure not to reorder them.
26        match max {
27            ..=0xFF => Self::_8,
28            ..=0xFF_FF => Self::_16,
29            ..=0xFF_FF_FF_FF => Self::_32,
30            ..=0xFF_FF_FF_FF_FF_FF_FF_FF => Self::_64,
31            _ => Self::_128,
32        }
33    }
34
35    fn write<T: SizedUInt>(self, out: &mut Vec<u8>, offset_by_min: bool) {
36        // Encoded in such a way such that 0 is no packing and higher numbers are smaller packing.
37        // Also makes no packing with offset_by_min = true is unrepresentable.
38        out.push((self as u8 - Self::new(T::MAX) as u8) * 2 - offset_by_min as u8);
39    }
40
41    fn read<T: SizedUInt>(input: &mut &[u8]) -> Result<(Self, bool)> {
42        let v = consume_byte(input)?;
43        let p_u8 = crate::nightly::div_ceil_u8(v, 2) + Self::new(T::MAX) as u8;
44        let offset_by_min = v & 1 != 0;
45        let p = match p_u8 {
46            0 => Self::_128,
47            1 => Self::_64,
48            2 => Self::_32,
49            3 => Self::_16,
50            4 => Self::_8,
51            _ => return invalid_packing(),
52        };
53        debug_assert_eq!(p as u8, p_u8);
54        Ok((p, offset_by_min))
55    }
56}
57
58fn usize_too_big() -> Error {
59    error("encountered a isize/usize with more than 32 bits on a 32 bit platform")
60}
61
62pub trait Int: Copy + core::fmt::Debug + Default + Ord + Pod + Send + Sized + Sync {
63    // Unaligned native endian. TODO could be aligned on big endian since we always have to copy.
64    type Une: Pod + Default + Send + Sync;
65    type Int: SizedInt;
66    #[inline]
67    fn from_unaligned(unaligned: Self::Une) -> Self {
68        bytemuck::must_cast(unaligned)
69    }
70    #[inline]
71    fn to_unaligned(self) -> Self::Une {
72        bytemuck::must_cast(self)
73    }
74    fn with_input(ints: &mut [Self], f: impl FnOnce(&mut [Self::Int]));
75    fn with_output<'a>(
76        out: &mut CowSlice<'a, Self::Une>,
77        length: usize,
78        f: impl FnOnce(&mut CowSlice<'a, <Self::Int as Int>::Une>) -> Result<()>,
79    ) -> Result<()>;
80}
81macro_rules! impl_usize_and_isize {
82    ($($isize:ident => $i64:ident),+) => {
83        $(
84            impl Int for $isize {
85                type Une = [u8; core::mem::size_of::<Self>()];
86                type Int = $i64;
87                fn with_input(ints: &mut [Self], f: impl FnOnce(&mut [Self::Int])) {
88                    if cfg!(target_pointer_width = "64") {
89                        f(bytemuck::cast_slice_mut(ints))
90                    } else {
91                        // 32 bit isize to i64 requires conversion. TODO reuse allocation.
92                        let mut ints: Vec<$i64> = ints.iter().map(|&v| v as $i64).collect();
93                        f(&mut ints);
94                    }
95                }
96                fn with_output<'a>(out: &mut CowSlice<'a, Self::Une>, length: usize, f: impl FnOnce(&mut CowSlice<'a, <Self::Int as Int>::Une>) -> Result<()>) -> Result<()> {
97                    if cfg!(target_pointer_width = "64") {
98                        f(out.cast_mut())
99                    } else {
100                        // i64 to 32 bit isize on requires checked conversion. TODO reuse allocations.
101                        let mut out_i64 = CowSlice::default();
102                        f(&mut out_i64)?;
103                        let out_i64 = unsafe { out_i64.as_slice(length) };
104                        let out_isize: Result<Vec<Self::Une>> = out_i64.iter().map(|&v| $i64::from_unaligned(v).try_into().map(Self::to_unaligned).map_err(|_| usize_too_big())).collect();
105                        *out.set_owned() = out_isize?;
106                        Ok(())
107                    }
108                }
109            }
110        )+
111    }
112}
113impl_usize_and_isize!(usize => u64, isize => i64);
114
115/// An [`Int`] that has a fixed size independent of platform (not usize).
116pub trait SizedInt: Int {
117    type Unsigned: SizedUInt;
118    const MIN: Self;
119    const MAX: Self;
120    fn to_unsigned(self) -> Self::Unsigned {
121        bytemuck::must_cast(self)
122    }
123}
124
125macro_rules! impl_int {
126    ($($int:ident => $uint:ident),+) => {
127        $(
128            impl Int for $int {
129                type Une = [u8; core::mem::size_of::<Self>()];
130                type Int = Self;
131                fn with_input(ints: &mut [Self], f: impl FnOnce(&mut [Self::Int])) {
132                    f(ints)
133                }
134                fn with_output<'a>(out: &mut CowSlice<'a, Self::Une>, _: usize, f: impl FnOnce(&mut CowSlice<'a, <Self::Int as Int>::Une>) -> Result<()>) -> Result<()> {
135                    f(out)
136                }
137            }
138            impl SizedInt for $int {
139                type Unsigned = $uint;
140                const MIN: Self = Self::MIN;
141                const MAX: Self = Self::MAX;
142            }
143        )+
144    }
145}
146impl_int!(u8 => u8, u16 => u16, u32 => u32, u64 => u64, u128 => u128);
147impl_int!(i8 => u8, i16 => u16, i32 => u32, i64 => u64, i128 => u128);
148
149/// A [`SizedInt`] that is unsigned.
150pub trait SizedUInt: SizedInt + TryInto<u128> {
151    fn read(input: &mut &[u8]) -> Result<Self>;
152    fn write(v: Self, out: &mut Vec<u8>);
153    fn wrapping_add(self, rhs: Self::Une) -> Self::Une;
154    fn wrapping_sub(self, rhs: Self) -> Self;
155    fn pack128(v: &[Self], out: &mut Vec<u8>);
156    fn pack64(v: &[Self], out: &mut Vec<u8>);
157    fn pack32(v: &[Self], out: &mut Vec<u8>);
158    fn pack16(v: &[Self], out: &mut Vec<u8>);
159    fn pack8(v: &mut [Self], out: &mut Vec<u8>);
160    fn unpack128<'a>(v: &'a [[u8; 16]], out: &mut CowSlice<'a, Self::Une>) -> Result<()>;
161    fn unpack64<'a>(v: &'a [[u8; 8]], out: &mut CowSlice<'a, Self::Une>) -> Result<()>;
162    fn unpack32<'a>(v: &'a [[u8; 4]], out: &mut CowSlice<'a, Self::Une>) -> Result<()>;
163    fn unpack16<'a>(v: &'a [[u8; 2]], out: &mut CowSlice<'a, Self::Une>) -> Result<()>;
164    fn unpack8<'a>(
165        input: &mut &'a [u8],
166        length: usize,
167        out: &mut CowSlice<'a, Self::Une>,
168    ) -> Result<()>;
169}
170
171macro_rules! impl_simple {
172    () => {
173        fn read(input: &mut &[u8]) -> Result<Self> {
174            Ok(Self::from_le_bytes(consume_byte_arrays(input, 1)?[0]))
175        }
176        fn write(v: Self, out: &mut Vec<u8>) {
177            out.extend_from_slice(&v.to_le_bytes());
178        }
179        #[inline]
180        fn wrapping_add(self, rhs: Self::Une) -> Self::Une {
181            self.wrapping_add(Self::from_ne_bytes(rhs)).to_ne_bytes()
182        }
183        #[inline]
184        fn wrapping_sub(self, rhs: Self) -> Self {
185            self.wrapping_sub(rhs)
186        }
187    };
188}
189macro_rules! impl_unreachable {
190    ($t:ty, $pack:ident, $unpack:ident) => {
191        fn $pack(_: &[Self], _: &mut Vec<u8>) {
192            unreachable!(); // Packings that increase size won't be chosen.
193        }
194        fn $unpack<'a>(_: &'a [<$t as Int>::Une], _: &mut CowSlice<'a, Self::Une>) -> Result<()> {
195            unreachable!(); // Packings that increase size are unrepresentable.
196        }
197    };
198}
199macro_rules! impl_self {
200    ($pack:ident, $unpack:ident) => {
201        fn $pack(v: &[Self], out: &mut Vec<u8>) {
202            // If we're little endian we can copy directly because we encode in little endian.
203            if cfg!(target_endian = "little") {
204                out.extend_from_slice(bytemuck::must_cast_slice(&v));
205            } else {
206                out.extend(v.iter().flat_map(|&v| v.to_le_bytes()));
207            }
208        }
209        fn $unpack<'a>(v: &'a [Self::Une], out: &mut CowSlice<'a, Self::Une>) -> Result<()> {
210            // If we're little endian we can borrow the input since we encode in little endian.
211            if cfg!(target_endian = "little") {
212                out.set_borrowed(v);
213            } else {
214                out.set_owned()
215                    .extend(v.iter().map(|&v| Self::from_le_bytes(v).to_ne_bytes()));
216            }
217            Ok(())
218        }
219    };
220}
221macro_rules! impl_smaller {
222    ($t:ty, $pack:ident, $unpack:ident) => {
223        fn $pack(v: &[Self], out: &mut Vec<u8>) {
224            out.extend(v.iter().flat_map(|&v| (v as $t).to_le_bytes()))
225        }
226        fn $unpack<'a>(v: &'a [<$t as Int>::Une], out: &mut CowSlice<'a, Self::Une>) -> Result<()> {
227            out.set_owned().extend(
228                v.iter()
229                    .map(|&v| (<$t>::from_le_bytes(v) as Self).to_ne_bytes()),
230            );
231            Ok(())
232        }
233    };
234}
235
236// Scratch space to bridge gap between pack_ints and pack_bytes.
237// In theory, we could avoid this intermediate step, but it would result in a lot of generated code.
238#[cfg(feature = "std")]
239fn with_scratch<T>(f: impl FnOnce(&mut Vec<u8>) -> T) -> T {
240    thread_local! {
241        static SCRATCH: core::cell::RefCell<Vec<u8>> = const { core::cell::RefCell::new(Vec::new()) }
242    }
243    SCRATCH.with(|s| {
244        let s = &mut s.borrow_mut();
245        s.clear();
246        f(s)
247    })
248}
249// Resort to allocation.
250#[cfg(not(feature = "std"))]
251fn with_scratch<T>(f: impl FnOnce(&mut Vec<u8>) -> T) -> T {
252    f(&mut Vec::new())
253}
254
255macro_rules! impl_u8 {
256    () => {
257        fn pack8(v: &mut [Self], out: &mut Vec<u8>) {
258            with_scratch(|bytes| {
259                bytes.extend(v.iter().map(|&v| v as u8));
260                pack_bytes(bytes, out);
261            })
262        }
263        fn unpack8(input: &mut &[u8], length: usize, out: &mut CowSlice<Self::Une>) -> Result<()> {
264            with_scratch(|allocation| {
265                // unpack_bytes might not result in a copy, but if it does we want to avoid an allocation.
266                let mut bytes = CowSlice::with_allocation(core::mem::take(allocation));
267                unpack_bytes(input, length, &mut bytes)?;
268                // Safety: unpack_bytes ensures bytes has length of `length`.
269                let slice = unsafe { bytes.as_slice(length) };
270                out.set_owned()
271                    .extend(slice.iter().map(|&v| (v as Self).to_ne_bytes()));
272                *allocation = bytes.into_allocation();
273                Ok(())
274            })
275        }
276    };
277}
278
279impl SizedUInt for u128 {
280    impl_simple!();
281    impl_self!(pack128, unpack128);
282    impl_smaller!(u64, pack64, unpack64);
283    impl_smaller!(u32, pack32, unpack32);
284    impl_smaller!(u16, pack16, unpack16);
285    impl_u8!();
286}
287impl SizedUInt for u64 {
288    impl_simple!();
289    impl_unreachable!(u128, pack128, unpack128);
290    impl_self!(pack64, unpack64);
291    impl_smaller!(u32, pack32, unpack32);
292    impl_smaller!(u16, pack16, unpack16);
293    impl_u8!();
294}
295impl SizedUInt for u32 {
296    impl_simple!();
297    impl_unreachable!(u128, pack128, unpack128);
298    impl_unreachable!(u64, pack64, unpack64);
299    impl_self!(pack32, unpack32);
300    impl_smaller!(u16, pack16, unpack16);
301    impl_u8!();
302}
303impl SizedUInt for u16 {
304    impl_simple!();
305    impl_unreachable!(u128, pack128, unpack128);
306    impl_unreachable!(u64, pack64, unpack64);
307    impl_unreachable!(u32, pack32, unpack32);
308    impl_self!(pack16, unpack16);
309    impl_u8!();
310}
311impl SizedUInt for u8 {
312    impl_simple!();
313    impl_unreachable!(u128, pack128, unpack128);
314    impl_unreachable!(u64, pack64, unpack64);
315    impl_unreachable!(u32, pack32, unpack32);
316    impl_unreachable!(u16, pack16, unpack16);
317    // Doesn't use impl_u8!() because it would copy unnecessary.
318    fn pack8(v: &mut [Self], out: &mut Vec<u8>) {
319        pack_bytes(v, out);
320    }
321    fn unpack8<'a>(
322        input: &mut &'a [u8],
323        length: usize,
324        out: &mut CowSlice<'a, [u8; 1]>,
325    ) -> Result<()> {
326        unpack_bytes(input, length, out.cast_mut::<u8>())
327    }
328}
329
330pub fn minmax<T: SizedInt>(v: &[T]) -> (T, T) {
331    let mut min = T::MAX;
332    let mut max = T::MIN;
333    for &v in v.iter() {
334        min = min.min(v);
335        max = max.max(v);
336    }
337    (min, max)
338}
339
340fn skip_packing<T: SizedInt>(length: usize) -> bool {
341    // Be careful using size_of::<T> since usize can be 4 or 8.
342    if core::mem::size_of::<T>() == 1 {
343        return true; // u8s can't be packed by pack_ints (only pack_bytes).
344    }
345    if length == 0 {
346        return true; // Can't pack 0 ints.
347    }
348    // Packing a single u16 is pointless (takes at least 2 bytes).
349    core::mem::size_of::<T>() == 2 && length == 1
350}
351
352/// Like [`pack_bytes`] but for larger integers. Handles endian conversion.
353pub fn pack_ints<T: Int>(ints: &mut [T], out: &mut Vec<u8>) {
354    T::with_input(ints, |ints| pack_ints_sized(ints, out));
355}
356
357/// [`pack_ints`] but after isize has been converted to i64.
358fn pack_ints_sized<T: SizedInt>(ints: &mut [T], out: &mut Vec<u8>) {
359    // Handle i8 right away since pack_bytes needs to know that it's signed.
360    // If we didn't have this special case [0i8, -1, 0, -1, 0, -1] couldn't be packed.
361    // Doesn't affect larger signed ints because they're made positive before pack_bytes::<u8> is called.
362    if core::mem::size_of::<T>() == 1 && T::MIN < T::default() {
363        let ints: &mut [i8] = bytemuck::must_cast_slice_mut(ints);
364        pack_bytes(ints, out);
365        return;
366    };
367
368    let (basic_packing, min_max) = if skip_packing::<T>(ints.len()) {
369        (Packing::new(T::Unsigned::MAX), None)
370    } else {
371        // Take a small sample to avoid wastefully scanning the whole slice.
372        let (sample, remaining) = ints.split_at(ints.len().min(16));
373        let (min, max) = minmax(sample);
374
375        // Only have to check packing(max - min) since it's always as good as packing(max).
376        let none = Packing::new(T::Unsigned::MAX);
377        if Packing::new(max.to_unsigned().wrapping_sub(min.to_unsigned())) == none {
378            none.write::<T::Unsigned>(out, false);
379            (none, None)
380        } else {
381            let (remaining_min, remaining_max) = minmax(remaining);
382            let min = min.min(remaining_min);
383            let max = max.max(remaining_max);
384
385            // Signed ints pack as unsigned ints if positive.
386            let basic_packing = if min >= T::default() {
387                Packing::new(max.to_unsigned())
388            } else {
389                none // Any negative can't be packed without offset_packing.
390            };
391            (basic_packing, Some((min, max)))
392        }
393    };
394    let ints = bytemuck::must_cast_slice_mut(ints);
395    let min_max = min_max.map(|(min, max)| (min.to_unsigned(), max.to_unsigned()));
396    pack_ints_sized_unsigned::<T::Unsigned>(ints, out, basic_packing, min_max);
397}
398
399/// [`pack_ints_sized`] but after signed integers have been cast to unsigned.
400fn pack_ints_sized_unsigned<T: SizedUInt>(
401    ints: &mut [T],
402    out: &mut Vec<u8>,
403    basic_packing: Packing,
404    min_max: Option<(T, T)>,
405) {
406    let p = if let Some((min, max)) = min_max {
407        // If subtracting min from all ints results in a better packing do it, otherwise don't bother.
408        let offset_packing = Packing::new(max.wrapping_sub(min));
409        if offset_packing > basic_packing && ints.len() > 5 {
410            for b in ints.iter_mut() {
411                *b = b.wrapping_sub(min);
412            }
413            offset_packing.write::<T>(out, true);
414            T::write(min, out);
415            offset_packing
416        } else {
417            basic_packing.write::<T>(out, false);
418            basic_packing
419        }
420    } else {
421        basic_packing
422    };
423
424    match p {
425        Packing::_128 => T::pack128(ints, out),
426        Packing::_64 => T::pack64(ints, out),
427        Packing::_32 => T::pack32(ints, out),
428        Packing::_16 => T::pack16(ints, out),
429        Packing::_8 => T::pack8(ints, out),
430    }
431}
432
433/// Opposite of [`pack_ints`]. Unpacks into `T::Une` aka unaligned native endian.
434pub fn unpack_ints<'a, T: Int>(
435    input: &mut &'a [u8],
436    length: usize,
437    out: &mut CowSlice<'a, T::Une>,
438) -> Result<()> {
439    T::with_output(out, length, |out| {
440        unpack_ints_sized::<T::Int>(input, length, out)
441    })
442}
443
444/// [`unpack_ints`] but after isize has been converted to i64.
445fn unpack_ints_sized<'a, T: SizedInt>(
446    input: &mut &'a [u8],
447    length: usize,
448    out: &mut CowSlice<'a, T::Une>,
449) -> Result<()> {
450    unpack_ints_sized_unsigned::<T::Unsigned>(input, length, out.cast_mut())
451}
452
453/// [`unpack_ints_sized`] but after signed integers have been cast to unsigned.
454fn unpack_ints_sized_unsigned<'a, T: SizedUInt>(
455    input: &mut &'a [u8],
456    length: usize,
457    out: &mut CowSlice<'a, T::Une>,
458) -> Result<()> {
459    let (p, min) = if skip_packing::<T>(length) {
460        (Packing::new(T::MAX), None)
461    } else {
462        let (p, offset_by_min) = Packing::read::<T>(input)?;
463        (p, offset_by_min.then(|| T::read(input)).transpose()?)
464    };
465
466    match p {
467        Packing::_128 => T::unpack128(consume_byte_arrays(input, length)?, out),
468        Packing::_64 => T::unpack64(consume_byte_arrays(input, length)?, out),
469        Packing::_32 => T::unpack32(consume_byte_arrays(input, length)?, out),
470        Packing::_16 => T::unpack16(consume_byte_arrays(input, length)?, out),
471        Packing::_8 => T::unpack8(input, length, out),
472    }?;
473    if let Some(min) = min {
474        // Has to be owned to have min.
475        out.mut_owned(|out| {
476            for v in out.iter_mut() {
477                *v = min.wrapping_add(*v);
478            }
479        });
480    }
481    Ok(())
482}
483
484#[cfg(test)]
485mod tests {
486    use super::{usize_too_big, CowSlice, Int, Result};
487    use crate::error::err;
488    use alloc::borrow::ToOwned;
489    use alloc::vec::Vec;
490    use test::{black_box, Bencher};
491
492    pub fn pack_ints<T: Int>(ints: &[T]) -> Vec<u8> {
493        let mut out = vec![];
494        super::pack_ints(&mut ints.to_vec(), &mut out);
495        assert_eq!(ints, unpack_ints(&out, ints.len()).unwrap());
496        out
497    }
498    pub fn unpack_ints<T: Int>(mut packed: &[u8], length: usize) -> Result<Vec<T>> {
499        let mut out = CowSlice::default();
500        super::unpack_ints::<T>(&mut packed, length, &mut out)?;
501        assert!(packed.is_empty());
502        let unpacked = unsafe { out.as_slice(length) };
503        Ok(unpacked.iter().copied().map(T::from_unaligned).collect())
504    }
505    const COUNTING: [usize; 8] = [0usize, 1, 2, 3, 4, 5, 6, 7];
506
507    #[test]
508    fn test_usize_eq_u64() {
509        let a = COUNTING;
510        let b = a.map(|v| v as u64);
511        assert_eq!(pack_ints(&a), pack_ints(&b));
512        let a = COUNTING.map(|v| v + 1000);
513        let b = a.map(|a| a as u64);
514        assert_eq!(pack_ints(&a), pack_ints(&b));
515    }
516
517    #[test]
518    fn test_usize_too_big() {
519        for scale in [1, 1 << 8, 1 << 16, 1 << 32] {
520            let a = COUNTING.map(|v| v as u64 * scale + u32::MAX as u64);
521            let packed = pack_ints(&a);
522            let b = unpack_ints::<usize>(&packed, a.len());
523            if cfg!(target_pointer_width = "64") {
524                let b = b.unwrap();
525                assert_eq!(a, core::array::from_fn(|i| b[i] as u64));
526            } else {
527                assert_eq!(b.unwrap_err(), usize_too_big());
528            }
529        }
530    }
531
532    #[test]
533    fn test_isize_too_big() {
534        for scale in [1, 1 << 8, 1 << 16, 1 << 32] {
535            let a = COUNTING.map(|v| v as i64 * scale + i32::MAX as i64);
536            let packed = pack_ints(&a);
537            let b = unpack_ints::<isize>(&packed, a.len());
538            if cfg!(target_pointer_width = "64") {
539                let b = b.unwrap();
540                assert_eq!(a, core::array::from_fn(|i| b[i] as i64));
541            } else {
542                assert_eq!(b.unwrap_err(), usize_too_big());
543            }
544        }
545    }
546
547    #[test]
548    fn test_i8_special_case() {
549        assert_eq!(
550            pack_ints(&[0i8, -1, 0, -1, 0, -1, 0]),
551            [9, (-1i8) as u8, 0b1010101]
552        );
553    }
554
555    #[test]
556    fn test_isize_sign_extension() {
557        assert_eq!(
558            pack_ints(&[0isize, -1, 0, -1, 0, -1, 0]),
559            [5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 10, 0b1010101]
560        );
561    }
562
563    #[test]
564    fn unpack_ints_errors() {
565        assert_eq!(
566            super::unpack_ints::<u16>(&mut [1].as_slice(), 5, &mut Default::default()),
567            err("EOF")
568        );
569        assert_eq!(
570            super::unpack_ints::<u16>(&mut [255].as_slice(), 5, &mut Default::default()),
571            super::invalid_packing()
572        );
573    }
574
575    fn test_inner<T: Int>(ints: &[T]) -> Vec<u8> {
576        let out = pack_ints(&mut ints.to_owned());
577        let unpacked = unpack_ints::<T>(&out, ints.len()).unwrap();
578        assert_eq!(unpacked, ints);
579        #[cfg(feature = "std")]
580        {
581            let packing = out[0];
582            let size = 100.0 * out.len() as f32 / core::mem::size_of_val(ints) as f32;
583            println!("{packing} {size:>5.1}%");
584        }
585        out
586    }
587
588    #[rustfmt::skip]
589    macro_rules! test {
590        ($name:ident, $t:ty) => {
591            #[test]
592            fn $name() {
593                type T = $t;
594                for increment in [0, 1, u8::MAX as u128 + 1, u16::MAX as u128 + 1, u32::MAX as u128 + 1, u64::MAX as u128 + 1] {
595                    #[allow(irrefutable_let_patterns)]
596                    let Ok(increment) = T::try_from(increment) else {
597                        continue;
598                    };
599
600                    for max in [
601                        i128::MIN, i64::MIN as i128, i32::MIN as i128, i16::MIN as i128, i8::MIN as i128, -1,
602                        0, i8::MAX as i128, i16::MAX as i128, i32::MAX as i128, i64::MAX as i128, i128::MAX
603                    ] {
604                        if max == T::MAX as i128 {
605                            continue;
606                        }
607                        #[allow(irrefutable_let_patterns)]
608                        let Ok(start) = T::try_from(max) else {
609                            continue;
610                        };
611                        #[cfg(feature = "std")]
612                        let s = format!("{start} {increment}");
613                        if increment == 1 {
614                            #[cfg(feature = "std")]
615                            print!("{s:<19} mod 2 => ");
616                            test_inner::<T>(&core::array::from_fn::<_, 100, _>(|i| {
617                                start + (i as T % 2) * increment
618                            }));
619                        }
620                        #[cfg(feature = "std")]
621                        print!("{s:<25} => ");
622                        test_inner::<T>(&core::array::from_fn::<_, 100, _>(|i| {
623                            start + i as T * increment
624                        }));
625                    }
626                }
627            }
628        };
629    }
630    test!(test_u008, u8);
631    test!(test_u016, u16);
632    test!(test_u032, u32);
633    test!(test_u064, u64);
634    test!(test_u128, u128);
635    test!(test_usize, usize);
636    test!(test_i008, i8);
637    test!(test_i016, i16);
638    test!(test_i032, i32);
639    test!(test_i064, i64);
640    test!(test_i128, i128);
641    test!(test_isize, isize);
642
643    fn bench_pack_ints<T: Int>(b: &mut Bencher, src: &[T]) {
644        let mut ints = src.to_vec();
645        let mut out = Vec::with_capacity(core::mem::size_of_val(src) + 10);
646        let starting_cap = out.capacity();
647        b.iter(|| {
648            ints.copy_from_slice(&src);
649            out.clear();
650            super::pack_ints(black_box(&mut ints), black_box(&mut out));
651        });
652        assert_eq!(out.capacity(), starting_cap);
653    }
654
655    fn bench_unpack_ints<T: Int>(b: &mut Bencher, src: &[T]) {
656        let packed = pack_ints(&mut src.to_vec());
657        let mut out = CowSlice::with_allocation(Vec::<T::Une>::with_capacity(src.len()));
658        b.iter(|| {
659            let length = src.len();
660            super::unpack_ints::<T>(
661                black_box(&mut packed.as_slice()),
662                length,
663                black_box(&mut out),
664            )
665            .unwrap();
666            debug_assert_eq!(
667                unsafe { out.as_slice(length) }
668                    .iter()
669                    .copied()
670                    .map(T::from_unaligned)
671                    .collect::<Vec<_>>(),
672                src
673            );
674        });
675    }
676
677    macro_rules! bench {
678        ($name:ident, $t:ident) => {
679            paste::paste! {
680                #[bench]
681                fn [<bench_pack_ $name _zero>](b: &mut Bencher) {
682                    bench_pack_ints::<$t>(b, &[0; 1000]);
683                }
684
685                #[bench]
686                fn [<bench_pack_ $name _max>](b: &mut Bencher) {
687                    bench_pack_ints::<$t>(b, &[$t::MAX; 1000]);
688                }
689
690                #[bench]
691                fn [<bench_pack_ $name _random>](b: &mut Bencher) {
692                    bench_pack_ints::<$t>(b, &crate::random_data(1000));
693                }
694
695                #[bench]
696                fn [<bench_pack_ $name _no_pack>](b: &mut Bencher) {
697                    let src = vec![$t::MIN; 1000];
698                    let mut ints = src.clone();
699                    let mut out: Vec<u8> = Vec::with_capacity(core::mem::size_of_val(&ints) + 10);
700                    b.iter(|| {
701                        ints.copy_from_slice(&src);
702                        let input = black_box(&mut ints);
703                        out.clear();
704                        let out = black_box(&mut out);
705                        out.extend_from_slice(bytemuck::must_cast_slice(&input));
706                    });
707                }
708
709                #[bench]
710                fn [<bench_unpack_ $name _zero>](b: &mut Bencher) {
711                    bench_unpack_ints::<$t>(b, &[0; 1000]);
712                }
713
714                #[bench]
715                fn [<bench_unpack_ $name _max>](b: &mut Bencher) {
716                    bench_unpack_ints::<$t>(b, &[$t::MAX; 1000]);
717                }
718
719                #[bench]
720                fn [<bench_unpack_ $name _random>](b: &mut Bencher) {
721                    bench_unpack_ints::<$t>(b, &crate::random_data(1000));
722                }
723            }
724        };
725    }
726    bench!(u008, u8);
727    bench!(u016, u16);
728    bench!(u032, u32);
729    bench!(u064, u64);
730    bench!(u128, u128);
731    bench!(usize, usize);
732}