blake3/
lib.rs

1//! The official Rust implementation of the [BLAKE3] cryptographic hash
2//! function.
3//!
4//! # Examples
5//!
6//! ```
7//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8//! // Hash an input all at once.
9//! let hash1 = blake3::hash(b"foobarbaz");
10//!
11//! // Hash an input incrementally.
12//! let mut hasher = blake3::Hasher::new();
13//! hasher.update(b"foo");
14//! hasher.update(b"bar");
15//! hasher.update(b"baz");
16//! let hash2 = hasher.finalize();
17//! assert_eq!(hash1, hash2);
18//!
19//! // Extended output. OutputReader also implements Read and Seek.
20//! # #[cfg(feature = "std")] {
21//! let mut output = [0; 1000];
22//! let mut output_reader = hasher.finalize_xof();
23//! output_reader.fill(&mut output);
24//! assert_eq!(hash1, output[..32]);
25//! # }
26//!
27//! // Print a hash as hex.
28//! println!("{}", hash1);
29//! # Ok(())
30//! # }
31//! ```
32//!
33//! # Cargo Features
34//!
35//! The `std` feature (the only feature enabled by default) is required for
36//! implementations of the [`Write`] and [`Seek`] traits, the
37//! [`update_reader`](Hasher::update_reader) helper method, and runtime CPU
38//! feature detection on x86. If this feature is disabled, the only way to use
39//! the x86 SIMD implementations is to enable the corresponding instruction sets
40//! globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The resulting binary
41//! will not be portable to other machines.
42//!
43//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
44//! the [`update_rayon`](Hasher::update_rayon) and (in combination with `mmap`
45//! below) [`update_mmap_rayon`](Hasher::update_mmap_rayon) methods, for
46//! multithreaded hashing. However, even if this feature is enabled, all other
47//! APIs remain single-threaded.
48//!
49//! The `mmap` feature (disabled by default, but enabled for [docs.rs]) adds the
50//! [`update_mmap`](Hasher::update_mmap) and (in combination with `rayon` above)
51//! [`update_mmap_rayon`](Hasher::update_mmap_rayon) helper methods for
52//! memory-mapped IO.
53//!
54//! The `zeroize` feature (disabled by default, but enabled for [docs.rs])
55//! implements
56//! [`Zeroize`](https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html) for
57//! this crate's types.
58//!
59//! The `serde` feature (disabled by default, but enabled for [docs.rs]) implements
60//! [`serde::Serialize`](https://docs.rs/serde/latest/serde/trait.Serialize.html) and
61//! [`serde::Deserialize`](https://docs.rs/serde/latest/serde/trait.Deserialize.html)
62//! for [`Hash`](struct@Hash).
63//!
64//! The NEON implementation is enabled by default for AArch64 but requires the
65//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
66//! enabling this feature will produce a binary that's not portable to CPUs
67//! without NEON support.
68//!
69//! The `traits-preview` feature enables implementations of traits from the
70//! RustCrypto [`digest`] crate, and re-exports that crate as `traits::digest`.
71//! However, the traits aren't stable, and they're expected to change in
72//! incompatible ways before that crate reaches 1.0. For that reason, this crate
73//! makes no SemVer guarantees for this feature, and callers who use it should
74//! expect breaking changes between patch versions. (The "-preview" feature name
75//! follows the conventions of the RustCrypto [`signature`] crate.)
76//!
77//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
78//! [BLAKE3]: https://blake3.io
79//! [Rayon]: https://github.com/rayon-rs/rayon
80//! [docs.rs]: https://docs.rs/
81//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
82//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
83//! [`digest`]: https://crates.io/crates/digest
84//! [`signature`]: https://crates.io/crates/signature
85
86#![cfg_attr(not(feature = "std"), no_std)]
87
88#[cfg(test)]
89mod test;
90
91// The guts module is for incremental use cases like the `bao` crate that need
92// to explicitly compute chunk and parent chaining values. It is semi-stable
93// and likely to keep working, but largely undocumented and not intended for
94// widespread use.
95#[doc(hidden)]
96pub mod guts;
97
98/// Undocumented and unstable, for benchmarks only.
99#[doc(hidden)]
100pub mod platform;
101
102// Platform-specific implementations of the compression function. These
103// BLAKE3-specific cfg flags are set in build.rs.
104#[cfg(blake3_avx2_rust)]
105#[path = "rust_avx2.rs"]
106mod avx2;
107#[cfg(blake3_avx2_ffi)]
108#[path = "ffi_avx2.rs"]
109mod avx2;
110#[cfg(blake3_avx512_ffi)]
111#[path = "ffi_avx512.rs"]
112mod avx512;
113#[cfg(blake3_neon)]
114#[path = "ffi_neon.rs"]
115mod neon;
116mod portable;
117#[cfg(blake3_sse2_rust)]
118#[path = "rust_sse2.rs"]
119mod sse2;
120#[cfg(blake3_sse2_ffi)]
121#[path = "ffi_sse2.rs"]
122mod sse2;
123#[cfg(blake3_sse41_rust)]
124#[path = "rust_sse41.rs"]
125mod sse41;
126#[cfg(blake3_sse41_ffi)]
127#[path = "ffi_sse41.rs"]
128mod sse41;
129
130#[cfg(feature = "traits-preview")]
131pub mod traits;
132
133mod io;
134mod join;
135
136use arrayref::{array_mut_ref, array_ref};
137use arrayvec::{ArrayString, ArrayVec};
138use core::cmp;
139use core::fmt;
140use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
141#[cfg(feature = "zeroize")]
142use zeroize::Zeroize;
143
144/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
145pub const OUT_LEN: usize = 32;
146
147/// The number of bytes in a key, 32.
148pub const KEY_LEN: usize = 32;
149
150const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
151use guts::{BLOCK_LEN, CHUNK_LEN};
152
153// While iterating the compression function within a chunk, the CV is
154// represented as words, to avoid doing two extra endianness conversions for
155// each compression in the portable implementation. But the hash_many interface
156// needs to hash both input bytes and parent nodes, so its better for its
157// output CVs to be represented as bytes.
158type CVWords = [u32; 8];
159type CVBytes = [u8; 32]; // little-endian
160
161const IV: &CVWords = &[
162    0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
163];
164
165const MSG_SCHEDULE: [[usize; 16]; 7] = [
166    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
167    [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
168    [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
169    [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
170    [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
171    [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
172    [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
173];
174
175// These are the internal flags that we use to domain separate root/non-root,
176// chunk/parent, and chunk beginning/middle/end. These get set at the high end
177// of the block flags word in the compression function, so their values start
178// high and go down.
179const CHUNK_START: u8 = 1 << 0;
180const CHUNK_END: u8 = 1 << 1;
181const PARENT: u8 = 1 << 2;
182const ROOT: u8 = 1 << 3;
183const KEYED_HASH: u8 = 1 << 4;
184const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
185const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
186
187#[inline]
188fn counter_low(counter: u64) -> u32 {
189    counter as u32
190}
191
192#[inline]
193fn counter_high(counter: u64) -> u32 {
194    (counter >> 32) as u32
195}
196
197/// An output of the default size, 32 bytes, which provides constant-time
198/// equality checking.
199///
200/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
201/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
202/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
203/// equality checking, which is often a security requirement in software that
204/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
205/// avoid situations where a type conversion happens implicitly and the
206/// constant-time property is accidentally lost.
207///
208/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
209/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
210///
211/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
212/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
213/// [`as_bytes`]: #method.as_bytes
214/// [`from_bytes`]: #method.from_bytes
215/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
216/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
217/// [`to_hex`]: #method.to_hex
218/// [`from_hex`]: #method.from_hex
219/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
220/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
221#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
222#[derive(Clone, Copy, Hash)]
223pub struct Hash([u8; OUT_LEN]);
224
225impl Hash {
226    /// The raw bytes of the `Hash`. Note that byte arrays don't provide
227    /// constant-time equality checking, so if  you need to compare hashes,
228    /// prefer the `Hash` type.
229    #[inline]
230    pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
231        &self.0
232    }
233
234    /// Create a `Hash` from its raw bytes representation.
235    pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
236        Self(bytes)
237    }
238
239    /// Create a `Hash` from its raw bytes representation as a slice.
240    ///
241    /// Returns an error if the slice is not exactly 32 bytes long.
242    pub fn from_slice(bytes: &[u8]) -> Result<Self, core::array::TryFromSliceError> {
243        Ok(Self::from_bytes(bytes.try_into()?))
244    }
245
246    /// Encode a `Hash` in lowercase hexadecimal.
247    ///
248    /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
249    /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
250    /// equality checking, so if you need to compare hashes, prefer the `Hash`
251    /// type.
252    ///
253    /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
254    pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
255        let mut s = ArrayString::new();
256        let table = b"0123456789abcdef";
257        for &b in self.0.iter() {
258            s.push(table[(b >> 4) as usize] as char);
259            s.push(table[(b & 0xf) as usize] as char);
260        }
261        s
262    }
263
264    /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
265    /// bytes are supported.
266    ///
267    /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
268    /// results in an error. An input length other than 64 also results in an
269    /// error.
270    ///
271    /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
272    /// is equivalent to `"...".parse()`.
273    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
274        fn hex_val(byte: u8) -> Result<u8, HexError> {
275            match byte {
276                b'A'..=b'F' => Ok(byte - b'A' + 10),
277                b'a'..=b'f' => Ok(byte - b'a' + 10),
278                b'0'..=b'9' => Ok(byte - b'0'),
279                _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
280            }
281        }
282        let hex_bytes: &[u8] = hex.as_ref();
283        if hex_bytes.len() != OUT_LEN * 2 {
284            return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
285        }
286        let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
287        for i in 0..OUT_LEN {
288            hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
289        }
290        Ok(Hash::from(hash_bytes))
291    }
292}
293
294impl From<[u8; OUT_LEN]> for Hash {
295    #[inline]
296    fn from(bytes: [u8; OUT_LEN]) -> Self {
297        Self::from_bytes(bytes)
298    }
299}
300
301impl From<Hash> for [u8; OUT_LEN] {
302    #[inline]
303    fn from(hash: Hash) -> Self {
304        hash.0
305    }
306}
307
308impl core::str::FromStr for Hash {
309    type Err = HexError;
310
311    fn from_str(s: &str) -> Result<Self, Self::Err> {
312        Hash::from_hex(s)
313    }
314}
315
316#[cfg(feature = "zeroize")]
317impl Zeroize for Hash {
318    fn zeroize(&mut self) {
319        // Destructuring to trigger compile error as a reminder to update this impl.
320        let Self(bytes) = self;
321        bytes.zeroize();
322    }
323}
324
325/// This implementation is constant-time.
326impl PartialEq for Hash {
327    #[inline]
328    fn eq(&self, other: &Hash) -> bool {
329        constant_time_eq::constant_time_eq_32(&self.0, &other.0)
330    }
331}
332
333/// This implementation is constant-time.
334impl PartialEq<[u8; OUT_LEN]> for Hash {
335    #[inline]
336    fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
337        constant_time_eq::constant_time_eq_32(&self.0, other)
338    }
339}
340
341/// This implementation is constant-time if the target is 32 bytes long.
342impl PartialEq<[u8]> for Hash {
343    #[inline]
344    fn eq(&self, other: &[u8]) -> bool {
345        constant_time_eq::constant_time_eq(&self.0, other)
346    }
347}
348
349impl Eq for Hash {}
350
351impl fmt::Display for Hash {
352    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
353        // Formatting field as `&str` to reduce code size since the `Debug`
354        // dynamic dispatch table for `&str` is likely needed elsewhere already,
355        // but that for `ArrayString<[u8; 64]>` is not.
356        let hex = self.to_hex();
357        let hex: &str = hex.as_str();
358
359        f.write_str(hex)
360    }
361}
362
363impl fmt::Debug for Hash {
364    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
365        // Formatting field as `&str` to reduce code size since the `Debug`
366        // dynamic dispatch table for `&str` is likely needed elsewhere already,
367        // but that for `ArrayString<[u8; 64]>` is not.
368        let hex = self.to_hex();
369        let hex: &str = hex.as_str();
370
371        f.debug_tuple("Hash").field(&hex).finish()
372    }
373}
374
375/// The error type for [`Hash::from_hex`].
376///
377/// The `.to_string()` representation of this error currently distinguishes between bad length
378/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
379/// stable API detail, and it may change at any time.
380#[derive(Clone, Debug)]
381pub struct HexError(HexErrorInner);
382
383#[derive(Clone, Debug)]
384enum HexErrorInner {
385    InvalidByte(u8),
386    InvalidLen(usize),
387}
388
389impl fmt::Display for HexError {
390    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
391        match self.0 {
392            HexErrorInner::InvalidByte(byte) => {
393                if byte < 128 {
394                    write!(f, "invalid hex character: {:?}", byte as char)
395                } else {
396                    write!(f, "invalid hex character: 0x{:x}", byte)
397                }
398            }
399            HexErrorInner::InvalidLen(len) => {
400                write!(f, "expected 64 hex bytes, received {}", len)
401            }
402        }
403    }
404}
405
406#[cfg(feature = "std")]
407impl std::error::Error for HexError {}
408
409// Each chunk or parent node can produce either a 32-byte chaining value or, by
410// setting the ROOT flag, any number of final output bytes. The Output struct
411// captures the state just prior to choosing between those two possibilities.
412#[derive(Clone)]
413struct Output {
414    input_chaining_value: CVWords,
415    block: [u8; 64],
416    block_len: u8,
417    counter: u64,
418    flags: u8,
419    platform: Platform,
420}
421
422impl Output {
423    fn chaining_value(&self) -> CVBytes {
424        let mut cv = self.input_chaining_value;
425        self.platform.compress_in_place(
426            &mut cv,
427            &self.block,
428            self.block_len,
429            self.counter,
430            self.flags,
431        );
432        platform::le_bytes_from_words_32(&cv)
433    }
434
435    fn root_hash(&self) -> Hash {
436        debug_assert_eq!(self.counter, 0);
437        let mut cv = self.input_chaining_value;
438        self.platform
439            .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
440        Hash(platform::le_bytes_from_words_32(&cv))
441    }
442
443    fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
444        self.platform.compress_xof(
445            &self.input_chaining_value,
446            &self.block,
447            self.block_len,
448            self.counter,
449            self.flags | ROOT,
450        )
451    }
452}
453
454#[cfg(feature = "zeroize")]
455impl Zeroize for Output {
456    fn zeroize(&mut self) {
457        // Destructuring to trigger compile error as a reminder to update this impl.
458        let Self {
459            input_chaining_value,
460            block,
461            block_len,
462            counter,
463            flags,
464            platform: _,
465        } = self;
466
467        input_chaining_value.zeroize();
468        block.zeroize();
469        block_len.zeroize();
470        counter.zeroize();
471        flags.zeroize();
472    }
473}
474
475#[derive(Clone)]
476struct ChunkState {
477    cv: CVWords,
478    chunk_counter: u64,
479    buf: [u8; BLOCK_LEN],
480    buf_len: u8,
481    blocks_compressed: u8,
482    flags: u8,
483    platform: Platform,
484}
485
486impl ChunkState {
487    fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
488        Self {
489            cv: *key,
490            chunk_counter,
491            buf: [0; BLOCK_LEN],
492            buf_len: 0,
493            blocks_compressed: 0,
494            flags,
495            platform,
496        }
497    }
498
499    fn len(&self) -> usize {
500        BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
501    }
502
503    fn fill_buf(&mut self, input: &mut &[u8]) {
504        let want = BLOCK_LEN - self.buf_len as usize;
505        let take = cmp::min(want, input.len());
506        self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
507        self.buf_len += take as u8;
508        *input = &input[take..];
509    }
510
511    fn start_flag(&self) -> u8 {
512        if self.blocks_compressed == 0 {
513            CHUNK_START
514        } else {
515            0
516        }
517    }
518
519    // Try to avoid buffering as much as possible, by compressing directly from
520    // the input slice when full blocks are available.
521    fn update(&mut self, mut input: &[u8]) -> &mut Self {
522        if self.buf_len > 0 {
523            self.fill_buf(&mut input);
524            if !input.is_empty() {
525                debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
526                let block_flags = self.flags | self.start_flag(); // borrowck
527                self.platform.compress_in_place(
528                    &mut self.cv,
529                    &self.buf,
530                    BLOCK_LEN as u8,
531                    self.chunk_counter,
532                    block_flags,
533                );
534                self.buf_len = 0;
535                self.buf = [0; BLOCK_LEN];
536                self.blocks_compressed += 1;
537            }
538        }
539
540        while input.len() > BLOCK_LEN {
541            debug_assert_eq!(self.buf_len, 0);
542            let block_flags = self.flags | self.start_flag(); // borrowck
543            self.platform.compress_in_place(
544                &mut self.cv,
545                array_ref!(input, 0, BLOCK_LEN),
546                BLOCK_LEN as u8,
547                self.chunk_counter,
548                block_flags,
549            );
550            self.blocks_compressed += 1;
551            input = &input[BLOCK_LEN..];
552        }
553
554        self.fill_buf(&mut input);
555        debug_assert!(input.is_empty());
556        debug_assert!(self.len() <= CHUNK_LEN);
557        self
558    }
559
560    fn output(&self) -> Output {
561        let block_flags = self.flags | self.start_flag() | CHUNK_END;
562        Output {
563            input_chaining_value: self.cv,
564            block: self.buf,
565            block_len: self.buf_len,
566            counter: self.chunk_counter,
567            flags: block_flags,
568            platform: self.platform,
569        }
570    }
571}
572
573// Don't derive(Debug), because the state may be secret.
574impl fmt::Debug for ChunkState {
575    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
576        f.debug_struct("ChunkState")
577            .field("len", &self.len())
578            .field("chunk_counter", &self.chunk_counter)
579            .field("flags", &self.flags)
580            .field("platform", &self.platform)
581            .finish()
582    }
583}
584
585#[cfg(feature = "zeroize")]
586impl Zeroize for ChunkState {
587    fn zeroize(&mut self) {
588        // Destructuring to trigger compile error as a reminder to update this impl.
589        let Self {
590            cv,
591            chunk_counter,
592            buf,
593            buf_len,
594            blocks_compressed,
595            flags,
596            platform: _,
597        } = self;
598
599        cv.zeroize();
600        chunk_counter.zeroize();
601        buf.zeroize();
602        buf_len.zeroize();
603        blocks_compressed.zeroize();
604        flags.zeroize();
605    }
606}
607
608// IMPLEMENTATION NOTE
609// ===================
610// The recursive function compress_subtree_wide(), implemented below, is the
611// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
612// and for the incremental input with Hasher (though we have to be careful with
613// subtree boundaries in the incremental case). compress_subtree_wide() applies
614// several optimizations at the same time:
615// - Multithreading with Rayon.
616// - Parallel chunk hashing with SIMD.
617// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
618//   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
619//   to benefit from larger inputs, because more levels of the tree benefit can
620//   use full-width SIMD vectors for parent hashing. Without parallel parent
621//   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
622
623/// Undocumented and unstable, for benchmarks only.
624#[doc(hidden)]
625#[derive(Clone, Copy)]
626pub enum IncrementCounter {
627    Yes,
628    No,
629}
630
631impl IncrementCounter {
632    #[inline]
633    fn yes(&self) -> bool {
634        match self {
635            IncrementCounter::Yes => true,
636            IncrementCounter::No => false,
637        }
638    }
639}
640
641// The largest power of two less than or equal to `n`, used for left_len()
642// immediately below, and also directly in Hasher::update().
643fn largest_power_of_two_leq(n: usize) -> usize {
644    ((n / 2) + 1).next_power_of_two()
645}
646
647// Given some input larger than one chunk, return the number of bytes that
648// should go in the left subtree. This is the largest power-of-2 number of
649// chunks that leaves at least 1 byte for the right subtree.
650fn left_len(content_len: usize) -> usize {
651    debug_assert!(content_len > CHUNK_LEN);
652    // Subtract 1 to reserve at least one byte for the right side.
653    let full_chunks = (content_len - 1) / CHUNK_LEN;
654    largest_power_of_two_leq(full_chunks) * CHUNK_LEN
655}
656
657// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
658// on a single thread. Write out the chunk chaining values and return the
659// number of chunks hashed. These chunks are never the root and never empty;
660// those cases use a different codepath.
661fn compress_chunks_parallel(
662    input: &[u8],
663    key: &CVWords,
664    chunk_counter: u64,
665    flags: u8,
666    platform: Platform,
667    out: &mut [u8],
668) -> usize {
669    debug_assert!(!input.is_empty(), "empty chunks below the root");
670    debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
671
672    let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
673    let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
674    for chunk in &mut chunks_exact {
675        chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
676    }
677    platform.hash_many(
678        &chunks_array,
679        key,
680        chunk_counter,
681        IncrementCounter::Yes,
682        flags,
683        CHUNK_START,
684        CHUNK_END,
685        out,
686    );
687
688    // Hash the remaining partial chunk, if there is one. Note that the empty
689    // chunk (meaning the empty message) is a different codepath.
690    let chunks_so_far = chunks_array.len();
691    if !chunks_exact.remainder().is_empty() {
692        let counter = chunk_counter + chunks_so_far as u64;
693        let mut chunk_state = ChunkState::new(key, counter, flags, platform);
694        chunk_state.update(chunks_exact.remainder());
695        *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
696            chunk_state.output().chaining_value();
697        chunks_so_far + 1
698    } else {
699        chunks_so_far
700    }
701}
702
703// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
704// on a single thread. Write out the parent chaining values and return the
705// number of parents hashed. (If there's an odd input chaining value left over,
706// return it as an additional output.) These parents are never the root and
707// never empty; those cases use a different codepath.
708fn compress_parents_parallel(
709    child_chaining_values: &[u8],
710    key: &CVWords,
711    flags: u8,
712    platform: Platform,
713    out: &mut [u8],
714) -> usize {
715    debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
716    let num_children = child_chaining_values.len() / OUT_LEN;
717    debug_assert!(num_children >= 2, "not enough children");
718    debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
719
720    let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
721    // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
722    // the requirements of compress_subtree_wide().
723    let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
724    for parent in &mut parents_exact {
725        parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
726    }
727    platform.hash_many(
728        &parents_array,
729        key,
730        0, // Parents always use counter 0.
731        IncrementCounter::No,
732        flags | PARENT,
733        0, // Parents have no start flags.
734        0, // Parents have no end flags.
735        out,
736    );
737
738    // If there's an odd child left over, it becomes an output.
739    let parents_so_far = parents_array.len();
740    if !parents_exact.remainder().is_empty() {
741        out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
742        parents_so_far + 1
743    } else {
744        parents_so_far
745    }
746}
747
748// The wide helper function returns (writes out) an array of chaining values
749// and returns the length of that array. The number of chaining values returned
750// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
751// if the input is shorter than that many chunks. The reason for maintaining a
752// wide array of chaining values going back up the tree, is to allow the
753// implementation to hash as many parents in parallel as possible.
754//
755// As a special case when the SIMD degree is 1, this function will still return
756// at least 2 outputs. This guarantees that this function doesn't perform the
757// root compression. (If it did, it would use the wrong flags, and also we
758// wouldn't be able to implement extendable output.) Note that this function is
759// not used when the whole input is only 1 chunk long; that's a different
760// codepath.
761//
762// Why not just have the caller split the input on the first update(), instead
763// of implementing this special rule? Because we don't want to limit SIMD or
764// multithreading parallelism for that update().
765fn compress_subtree_wide<J: join::Join>(
766    input: &[u8],
767    key: &CVWords,
768    chunk_counter: u64,
769    flags: u8,
770    platform: Platform,
771    out: &mut [u8],
772) -> usize {
773    // Note that the single chunk case does *not* bump the SIMD degree up to 2
774    // when it is 1. This allows Rayon the option of multithreading even the
775    // 2-chunk case, which can help performance on smaller platforms.
776    if input.len() <= platform.simd_degree() * CHUNK_LEN {
777        return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
778    }
779
780    // With more than simd_degree chunks, we need to recurse. Start by dividing
781    // the input into left and right subtrees. (Note that this is only optimal
782    // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
783    // of 3 or something, we'll need a more complicated strategy.)
784    debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
785    let (left, right) = input.split_at(left_len(input.len()));
786    let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
787
788    // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
789    // account for the special case of returning 2 outputs when the SIMD degree
790    // is 1.
791    let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
792    let degree = if left.len() == CHUNK_LEN {
793        // The "simd_degree=1 and we're at the leaf nodes" case.
794        debug_assert_eq!(platform.simd_degree(), 1);
795        1
796    } else {
797        cmp::max(platform.simd_degree(), 2)
798    };
799    let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
800
801    // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
802    // threads.
803    let (left_n, right_n) = J::join(
804        || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
805        || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
806    );
807
808    // The special case again. If simd_degree=1, then we'll have left_n=1 and
809    // right_n=1. Rather than compressing them into a single output, return
810    // them directly, to make sure we always have at least two outputs.
811    debug_assert_eq!(left_n, degree);
812    debug_assert!(right_n >= 1 && right_n <= left_n);
813    if left_n == 1 {
814        out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
815        return 2;
816    }
817
818    // Otherwise, do one layer of parent node compression.
819    let num_children = left_n + right_n;
820    compress_parents_parallel(
821        &cv_array[..num_children * OUT_LEN],
822        key,
823        flags,
824        platform,
825        out,
826    )
827}
828
829// Hash a subtree with compress_subtree_wide(), and then condense the resulting
830// list of chaining values down to a single parent node. Don't compress that
831// last parent node, however. Instead, return its message bytes (the
832// concatenated chaining values of its children). This is necessary when the
833// first call to update() supplies a complete subtree, because the topmost
834// parent node of that subtree could end up being the root. It's also necessary
835// for extended output in the general case.
836//
837// As with compress_subtree_wide(), this function is not used on inputs of 1
838// chunk or less. That's a different codepath.
839fn compress_subtree_to_parent_node<J: join::Join>(
840    input: &[u8],
841    key: &CVWords,
842    chunk_counter: u64,
843    flags: u8,
844    platform: Platform,
845) -> [u8; BLOCK_LEN] {
846    debug_assert!(input.len() > CHUNK_LEN);
847    let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
848    let mut num_cvs =
849        compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
850    debug_assert!(num_cvs >= 2);
851
852    // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
853    // compress_subtree_wide() returns more than 2 chaining values. Condense
854    // them into 2 by forming parent nodes repeatedly.
855    let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
856    while num_cvs > 2 {
857        let cv_slice = &cv_array[..num_cvs * OUT_LEN];
858        num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
859        cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
860    }
861    *array_ref!(cv_array, 0, 2 * OUT_LEN)
862}
863
864// Hash a complete input all at once. Unlike compress_subtree_wide() and
865// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
866fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
867    let platform = Platform::detect();
868
869    // If the whole subtree is one chunk, hash it directly with a ChunkState.
870    if input.len() <= CHUNK_LEN {
871        return ChunkState::new(key, 0, flags, platform)
872            .update(input)
873            .output();
874    }
875
876    // Otherwise construct an Output object from the parent node returned by
877    // compress_subtree_to_parent_node().
878    Output {
879        input_chaining_value: *key,
880        block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
881        block_len: BLOCK_LEN as u8,
882        counter: 0,
883        flags: flags | PARENT,
884        platform,
885    }
886}
887
888/// The default hash function.
889///
890/// For an incremental version that accepts multiple writes, see [`Hasher::new`],
891/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
892///
893/// ```
894/// let hash = blake3::hash(b"foo");
895/// # let hash1 = hash;
896///
897/// let hash = blake3::Hasher::new().update(b"foo").finalize();
898/// # let hash2 = hash;
899/// # assert_eq!(hash1, hash2);
900/// ```
901///
902/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
903/// [`OutputReader`].
904///
905/// This function is always single-threaded. For multithreading support, see
906/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
907pub fn hash(input: &[u8]) -> Hash {
908    hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
909}
910
911/// The keyed hash function.
912///
913/// This is suitable for use as a message authentication code, for example to
914/// replace an HMAC instance. In that use case, the constant-time equality
915/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
916/// requirement, and callers need to be careful not to compare MACs as raw
917/// bytes.
918///
919/// For an incremental version that accepts multiple writes, see [`Hasher::new_keyed`],
920/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
921///
922/// ```
923/// # const KEY: &[u8; 32] = &[0; 32];
924/// let mac = blake3::keyed_hash(KEY, b"foo");
925/// # let mac1 = mac;
926///
927/// let mac = blake3::Hasher::new_keyed(KEY).update(b"foo").finalize();
928/// # let mac2 = mac;
929/// # assert_eq!(mac1, mac2);
930/// ```
931///
932/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
933///
934/// This function is always single-threaded. For multithreading support, see
935/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
936pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
937    let key_words = platform::words_from_le_bytes_32(key);
938    hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
939}
940
941/// The key derivation function.
942///
943/// Given cryptographic key material of any length and a context string of any
944/// length, this function outputs a 32-byte derived subkey. **The context string
945/// should be hardcoded, globally unique, and application-specific.** A good
946/// default format for such strings is `"[application] [commit timestamp]
947/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
948///
949/// Key derivation is important when you want to use the same key in multiple
950/// algorithms or use cases. Using the same key with different cryptographic
951/// algorithms is generally forbidden, and deriving a separate subkey for each
952/// use case protects you from bad interactions. Derived keys also mitigate the
953/// damage from one part of your application accidentally leaking its key.
954///
955/// As a rare exception to that general rule, however, it is possible to use
956/// `derive_key` itself with key material that you are already using with
957/// another algorithm. You might need to do this if you're adding features to
958/// an existing application, which does not yet use key derivation internally.
959/// However, you still must not share key material with algorithms that forbid
960/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
961/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
962///
963/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
964/// used with passwords.** Instead, use a dedicated password hash like
965/// [Argon2]. Password hashes are entirely different from generic hash
966/// functions, with opposite design requirements.
967///
968/// For an incremental version that accepts multiple writes, see [`Hasher::new_derive_key`],
969/// [`Hasher::update`], and [`Hasher::finalize`]. These two statements are equivalent:
970///
971/// ```
972/// # const CONTEXT: &str = "example.com 2019-12-25 16:18:03 session tokens v1";
973/// let key = blake3::derive_key(CONTEXT, b"key material, not a password");
974/// # let key1 = key;
975///
976/// let key: [u8; 32] = blake3::Hasher::new_derive_key(CONTEXT)
977///     .update(b"key material, not a password")
978///     .finalize()
979///     .into();
980/// # let key2 = key;
981/// # assert_eq!(key1, key2);
982/// ```
983///
984/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
985///
986/// This function is always single-threaded. For multithreading support, see
987/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
988///
989/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
990pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
991    let context_key =
992        hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
993            .root_hash();
994    let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
995    hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
996        .root_hash()
997        .0
998}
999
1000fn parent_node_output(
1001    left_child: &CVBytes,
1002    right_child: &CVBytes,
1003    key: &CVWords,
1004    flags: u8,
1005    platform: Platform,
1006) -> Output {
1007    let mut block = [0; BLOCK_LEN];
1008    block[..32].copy_from_slice(left_child);
1009    block[32..].copy_from_slice(right_child);
1010    Output {
1011        input_chaining_value: *key,
1012        block,
1013        block_len: BLOCK_LEN as u8,
1014        counter: 0,
1015        flags: flags | PARENT,
1016        platform,
1017    }
1018}
1019
1020/// An incremental hash state that can accept any number of writes.
1021///
1022/// The `rayon` and `mmap` Cargo features enable additional methods on this
1023/// type related to multithreading and memory-mapped IO.
1024///
1025/// When the `traits-preview` Cargo feature is enabled, this type implements
1026/// several commonly used traits from the
1027/// [`digest`](https://crates.io/crates/digest) crate. However, those
1028/// traits aren't stable, and they're expected to change in incompatible ways
1029/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
1030/// guarantees for this feature, and callers who use it should expect breaking
1031/// changes between patch versions.
1032///
1033/// # Examples
1034///
1035/// ```
1036/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1037/// // Hash an input incrementally.
1038/// let mut hasher = blake3::Hasher::new();
1039/// hasher.update(b"foo");
1040/// hasher.update(b"bar");
1041/// hasher.update(b"baz");
1042/// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
1043///
1044/// // Extended output. OutputReader also implements Read and Seek.
1045/// # #[cfg(feature = "std")] {
1046/// let mut output = [0; 1000];
1047/// let mut output_reader = hasher.finalize_xof();
1048/// output_reader.fill(&mut output);
1049/// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
1050/// # }
1051/// # Ok(())
1052/// # }
1053/// ```
1054#[derive(Clone)]
1055pub struct Hasher {
1056    key: CVWords,
1057    chunk_state: ChunkState,
1058    // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
1059    // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
1060    // requires a 4th entry, rather than merging everything down to 1, because
1061    // we don't know whether more input is coming. This is different from how
1062    // the reference implementation does things.
1063    cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
1064}
1065
1066impl Hasher {
1067    fn new_internal(key: &CVWords, flags: u8) -> Self {
1068        Self {
1069            key: *key,
1070            chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
1071            cv_stack: ArrayVec::new(),
1072        }
1073    }
1074
1075    /// Construct a new `Hasher` for the regular hash function.
1076    pub fn new() -> Self {
1077        Self::new_internal(IV, 0)
1078    }
1079
1080    /// Construct a new `Hasher` for the keyed hash function. See
1081    /// [`keyed_hash`].
1082    ///
1083    /// [`keyed_hash`]: fn.keyed_hash.html
1084    pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
1085        let key_words = platform::words_from_le_bytes_32(key);
1086        Self::new_internal(&key_words, KEYED_HASH)
1087    }
1088
1089    /// Construct a new `Hasher` for the key derivation function. See
1090    /// [`derive_key`]. The context string should be hardcoded, globally
1091    /// unique, and application-specific.
1092    ///
1093    /// [`derive_key`]: fn.derive_key.html
1094    pub fn new_derive_key(context: &str) -> Self {
1095        let context_key =
1096            hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
1097                .root_hash();
1098        let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
1099        Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1100    }
1101
1102    /// Reset the `Hasher` to its initial state.
1103    ///
1104    /// This is functionally the same as overwriting the `Hasher` with a new
1105    /// one, using the same key or context string if any.
1106    pub fn reset(&mut self) -> &mut Self {
1107        self.chunk_state = ChunkState::new(
1108            &self.key,
1109            0,
1110            self.chunk_state.flags,
1111            self.chunk_state.platform,
1112        );
1113        self.cv_stack.clear();
1114        self
1115    }
1116
1117    // As described in push_cv() below, we do "lazy merging", delaying merges
1118    // until right before the next CV is about to be added. This is different
1119    // from the reference implementation. Another difference is that we aren't
1120    // always merging 1 chunk at a time. Instead, each CV might represent any
1121    // power-of-two number of chunks, as long as the smaller-above-larger stack
1122    // order is maintained. Instead of the "count the trailing 0-bits"
1123    // algorithm described in the spec, we use a "count the total number of
1124    // 1-bits" variant that doesn't require us to retain the subtree size of
1125    // the CV on top of the stack. The principle is the same: each CV that
1126    // should remain in the stack is represented by a 1-bit in the total number
1127    // of chunks (or bytes) so far.
1128    fn merge_cv_stack(&mut self, total_len: u64) {
1129        let post_merge_stack_len = total_len.count_ones() as usize;
1130        while self.cv_stack.len() > post_merge_stack_len {
1131            let right_child = self.cv_stack.pop().unwrap();
1132            let left_child = self.cv_stack.pop().unwrap();
1133            let parent_output = parent_node_output(
1134                &left_child,
1135                &right_child,
1136                &self.key,
1137                self.chunk_state.flags,
1138                self.chunk_state.platform,
1139            );
1140            self.cv_stack.push(parent_output.chaining_value());
1141        }
1142    }
1143
1144    // In reference_impl.rs, we merge the new CV with existing CVs from the
1145    // stack before pushing it. We can do that because we know more input is
1146    // coming, so we know none of the merges are root.
1147    //
1148    // This setting is different. We want to feed as much input as possible to
1149    // compress_subtree_wide(), without setting aside anything for the
1150    // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1151    // all 64 KiB at once as a single subtree, if at all possible.
1152    //
1153    // This leads to two problems:
1154    // 1) This 64 KiB input might be the only call that ever gets made to
1155    //    update. In this case, the root node of the 64 KiB subtree would be
1156    //    the root node of the whole tree, and it would need to be ROOT
1157    //    finalized. We can't compress it until we know.
1158    // 2) This 64 KiB input might complete a larger tree, whose root node is
1159    //    similarly going to be the root of the whole tree. For example,
1160    //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1161    //    compress the node at the root of the 256 KiB subtree until we know
1162    //    how to finalize it.
1163    //
1164    // The second problem is solved with "lazy merging". That is, when we're
1165    // about to add a CV to the stack, we don't merge it with anything first,
1166    // as the reference impl does. Instead we do merges using the *previous* CV
1167    // that was added, which is sitting on top of the stack, and we put the new
1168    // CV (unmerged) on top of the stack afterwards. This guarantees that we
1169    // never merge the root node until finalize().
1170    //
1171    // Solving the first problem requires an additional tool,
1172    // compress_subtree_to_parent_node(). That function always returns the top
1173    // *two* chaining values of the subtree it's compressing. We then do lazy
1174    // merging with each of them separately, so that the second CV will always
1175    // remain unmerged. (That also helps us support extendable output when
1176    // we're hashing an input all-at-once.)
1177    fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1178        self.merge_cv_stack(chunk_counter);
1179        self.cv_stack.push(*new_cv);
1180    }
1181
1182    /// Add input bytes to the hash state. You can call this any number of times.
1183    ///
1184    /// This method is always single-threaded. For multithreading support, see
1185    /// [`update_rayon`](#method.update_rayon) (enabled with the `rayon` Cargo feature).
1186    ///
1187    /// Note that the degree of SIMD parallelism that `update` can use is limited by the size of
1188    /// this input buffer. See [`update_reader`](#method.update_reader).
1189    pub fn update(&mut self, input: &[u8]) -> &mut Self {
1190        self.update_with_join::<join::SerialJoin>(input)
1191    }
1192
1193    fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1194        // If we have some partial chunk bytes in the internal chunk_state, we
1195        // need to finish that chunk first.
1196        if self.chunk_state.len() > 0 {
1197            let want = CHUNK_LEN - self.chunk_state.len();
1198            let take = cmp::min(want, input.len());
1199            self.chunk_state.update(&input[..take]);
1200            input = &input[take..];
1201            if !input.is_empty() {
1202                // We've filled the current chunk, and there's more input
1203                // coming, so we know it's not the root and we can finalize it.
1204                // Then we'll proceed to hashing whole chunks below.
1205                debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1206                let chunk_cv = self.chunk_state.output().chaining_value();
1207                self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1208                self.chunk_state = ChunkState::new(
1209                    &self.key,
1210                    self.chunk_state.chunk_counter + 1,
1211                    self.chunk_state.flags,
1212                    self.chunk_state.platform,
1213                );
1214            } else {
1215                return self;
1216            }
1217        }
1218
1219        // Now the chunk_state is clear, and we have more input. If there's
1220        // more than a single chunk (so, definitely not the root chunk), hash
1221        // the largest whole subtree we can, with the full benefits of SIMD and
1222        // multithreading parallelism. Two restrictions:
1223        // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1224        //   along the right edge can be incomplete, and we don't know where
1225        //   the right edge is going to be until we get to finalize().
1226        // - The subtree must evenly divide the total number of chunks up until
1227        //   this point (if total is not 0). If the current incomplete subtree
1228        //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1229        //   chunks. We have to complete the current subtree first.
1230        // Because we might need to break up the input to form powers of 2, or
1231        // to evenly divide what we already have, this part runs in a loop.
1232        while input.len() > CHUNK_LEN {
1233            debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1234            debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1235            let mut subtree_len = largest_power_of_two_leq(input.len());
1236            let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1237            // Shrink the subtree_len until it evenly divides the count so far.
1238            // We know that subtree_len itself is a power of 2, so we can use a
1239            // bitmasking trick instead of an actual remainder operation. (Note
1240            // that if the caller consistently passes power-of-2 inputs of the
1241            // same size, as is hopefully typical, this loop condition will
1242            // always fail, and subtree_len will always be the full length of
1243            // the input.)
1244            //
1245            // An aside: We don't have to shrink subtree_len quite this much.
1246            // For example, if count_so_far is 1, we could pass 2 chunks to
1247            // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1248            // we'll still get the right answer in the end, and we might get to
1249            // use 2-way SIMD parallelism. The problem with this optimization,
1250            // is that it gets us stuck always hashing 2 chunks. The total
1251            // number of chunks will remain odd, and we'll never graduate to
1252            // higher degrees of parallelism. See
1253            // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1254            while (subtree_len - 1) as u64 & count_so_far != 0 {
1255                subtree_len /= 2;
1256            }
1257            // The shrunken subtree_len might now be 1 chunk long. If so, hash
1258            // that one chunk by itself. Otherwise, compress the subtree into a
1259            // pair of CVs.
1260            let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1261            if subtree_len <= CHUNK_LEN {
1262                debug_assert_eq!(subtree_len, CHUNK_LEN);
1263                self.push_cv(
1264                    &ChunkState::new(
1265                        &self.key,
1266                        self.chunk_state.chunk_counter,
1267                        self.chunk_state.flags,
1268                        self.chunk_state.platform,
1269                    )
1270                    .update(&input[..subtree_len])
1271                    .output()
1272                    .chaining_value(),
1273                    self.chunk_state.chunk_counter,
1274                );
1275            } else {
1276                // This is the high-performance happy path, though getting here
1277                // depends on the caller giving us a long enough input.
1278                let cv_pair = compress_subtree_to_parent_node::<J>(
1279                    &input[..subtree_len],
1280                    &self.key,
1281                    self.chunk_state.chunk_counter,
1282                    self.chunk_state.flags,
1283                    self.chunk_state.platform,
1284                );
1285                let left_cv = array_ref!(cv_pair, 0, 32);
1286                let right_cv = array_ref!(cv_pair, 32, 32);
1287                // Push the two CVs we received into the CV stack in order. Because
1288                // the stack merges lazily, this guarantees we aren't merging the
1289                // root.
1290                self.push_cv(left_cv, self.chunk_state.chunk_counter);
1291                self.push_cv(
1292                    right_cv,
1293                    self.chunk_state.chunk_counter + (subtree_chunks / 2),
1294                );
1295            }
1296            self.chunk_state.chunk_counter += subtree_chunks;
1297            input = &input[subtree_len..];
1298        }
1299
1300        // What remains is 1 chunk or less. Add it to the chunk state.
1301        debug_assert!(input.len() <= CHUNK_LEN);
1302        if !input.is_empty() {
1303            self.chunk_state.update(input);
1304            // Having added some input to the chunk_state, we know what's in
1305            // the CV stack won't become the root node, and we can do an extra
1306            // merge. This simplifies finalize().
1307            self.merge_cv_stack(self.chunk_state.chunk_counter);
1308        }
1309
1310        self
1311    }
1312
1313    fn final_output(&self) -> Output {
1314        // If the current chunk is the only chunk, that makes it the root node
1315        // also. Convert it directly into an Output. Otherwise, we need to
1316        // merge subtrees below.
1317        if self.cv_stack.is_empty() {
1318            debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1319            return self.chunk_state.output();
1320        }
1321
1322        // If there are any bytes in the ChunkState, finalize that chunk and
1323        // merge its CV with everything in the CV stack. In that case, the work
1324        // we did at the end of update() above guarantees that the stack
1325        // doesn't contain any unmerged subtrees that need to be merged first.
1326        // (This is important, because if there were two chunk hashes sitting
1327        // on top of the stack, they would need to merge with each other, and
1328        // merging a new chunk hash into them would be incorrect.)
1329        //
1330        // If there are no bytes in the ChunkState, we'll merge what's already
1331        // in the stack. In this case it's fine if there are unmerged chunks on
1332        // top, because we'll merge them with each other. Note that the case of
1333        // the empty chunk is taken care of above.
1334        let mut output: Output;
1335        let mut num_cvs_remaining = self.cv_stack.len();
1336        if self.chunk_state.len() > 0 {
1337            debug_assert_eq!(
1338                self.cv_stack.len(),
1339                self.chunk_state.chunk_counter.count_ones() as usize,
1340                "cv stack does not need a merge"
1341            );
1342            output = self.chunk_state.output();
1343        } else {
1344            debug_assert!(self.cv_stack.len() >= 2);
1345            output = parent_node_output(
1346                &self.cv_stack[num_cvs_remaining - 2],
1347                &self.cv_stack[num_cvs_remaining - 1],
1348                &self.key,
1349                self.chunk_state.flags,
1350                self.chunk_state.platform,
1351            );
1352            num_cvs_remaining -= 2;
1353        }
1354        while num_cvs_remaining > 0 {
1355            output = parent_node_output(
1356                &self.cv_stack[num_cvs_remaining - 1],
1357                &output.chaining_value(),
1358                &self.key,
1359                self.chunk_state.flags,
1360                self.chunk_state.platform,
1361            );
1362            num_cvs_remaining -= 1;
1363        }
1364        output
1365    }
1366
1367    /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1368    /// the input.
1369    ///
1370    /// This method is idempotent. Calling it twice will give the same result.
1371    /// You can also add more input and finalize again.
1372    pub fn finalize(&self) -> Hash {
1373        self.final_output().root_hash()
1374    }
1375
1376    /// Finalize the hash state and return an [`OutputReader`], which can
1377    /// supply any number of output bytes.
1378    ///
1379    /// This method is idempotent. Calling it twice will give the same result.
1380    /// You can also add more input and finalize again.
1381    ///
1382    /// [`OutputReader`]: struct.OutputReader.html
1383    pub fn finalize_xof(&self) -> OutputReader {
1384        OutputReader::new(self.final_output())
1385    }
1386
1387    /// Return the total number of bytes hashed so far.
1388    pub fn count(&self) -> u64 {
1389        self.chunk_state.chunk_counter * CHUNK_LEN as u64 + self.chunk_state.len() as u64
1390    }
1391
1392    /// As [`update`](Hasher::update), but reading from a
1393    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) implementation.
1394    ///
1395    /// [`Hasher`] implements
1396    /// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html), so it's possible to
1397    /// use [`std::io::copy`](https://doc.rust-lang.org/std/io/fn.copy.html) to update a [`Hasher`]
1398    /// from any reader. Unfortunately, this standard approach can limit performance, because
1399    /// `copy` currently uses an internal 8 KiB buffer that isn't big enough to take advantage of
1400    /// all SIMD instruction sets. (In particular, [AVX-512](https://en.wikipedia.org/wiki/AVX-512)
1401    /// needs a 16 KiB buffer.) `update_reader` avoids this performance problem and is slightly
1402    /// more convenient.
1403    ///
1404    /// The internal buffer size this method uses may change at any time, and it may be different
1405    /// for different targets. The only guarantee is that it will be large enough for all of this
1406    /// crate's SIMD implementations on the current platform.
1407    ///
1408    /// The most common implementer of
1409    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) might be
1410    /// [`std::fs::File`](https://doc.rust-lang.org/std/fs/struct.File.html), but note that memory
1411    /// mapping can be faster than this method for hashing large files. See
1412    /// [`update_mmap`](Hasher::update_mmap) and [`update_mmap_rayon`](Hasher::update_mmap_rayon),
1413    /// which require the `mmap` and (for the latter) `rayon` Cargo features.
1414    ///
1415    /// This method requires the `std` Cargo feature, which is enabled by default.
1416    ///
1417    /// # Example
1418    ///
1419    /// ```no_run
1420    /// # use std::fs::File;
1421    /// # use std::io;
1422    /// # fn main() -> io::Result<()> {
1423    /// // Hash standard input.
1424    /// let mut hasher = blake3::Hasher::new();
1425    /// hasher.update_reader(std::io::stdin().lock())?;
1426    /// println!("{}", hasher.finalize());
1427    /// # Ok(())
1428    /// # }
1429    /// ```
1430    #[cfg(feature = "std")]
1431    pub fn update_reader(&mut self, reader: impl std::io::Read) -> std::io::Result<&mut Self> {
1432        io::copy_wide(reader, self)?;
1433        Ok(self)
1434    }
1435
1436    /// As [`update`](Hasher::update), but using Rayon-based multithreading
1437    /// internally.
1438    ///
1439    /// This method is gated by the `rayon` Cargo feature, which is disabled by
1440    /// default but enabled on [docs.rs](https://docs.rs).
1441    ///
1442    /// To get any performance benefit from multithreading, the input buffer
1443    /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1444    /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1445    /// quite a lot across different processors, and it's important to benchmark
1446    /// your specific use case. See also the performance warning associated with
1447    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon).
1448    ///
1449    /// If you already have a large buffer in memory, and you want to hash it
1450    /// with multiple threads, this method is a good option. However, reading a
1451    /// file into memory just to call this method can be a performance mistake,
1452    /// both because it requires lots of memory and because single-threaded
1453    /// reads can be slow. For hashing whole files, see
1454    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon), which is gated by both
1455    /// the `rayon` and `mmap` Cargo features.
1456    #[cfg(feature = "rayon")]
1457    pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1458        self.update_with_join::<join::RayonJoin>(input)
1459    }
1460
1461    /// As [`update`](Hasher::update), but reading the contents of a file using memory mapping.
1462    ///
1463    /// Not all files can be memory mapped, and memory mapping small files can be slower than
1464    /// reading them the usual way. In those cases, this method will fall back to standard file IO.
1465    /// The heuristic for whether to use memory mapping is currently very simple (file size >=
1466    /// 16 KiB), and it might change at any time.
1467    ///
1468    /// Like [`update`](Hasher::update), this method is single-threaded. In this author's
1469    /// experience, memory mapping improves single-threaded performance by ~10% for large files
1470    /// that are already in cache. This probably varies between platforms, and as always it's a
1471    /// good idea to benchmark your own use case. In comparison, the multithreaded
1472    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon) method can have a much larger impact on
1473    /// performance.
1474    ///
1475    /// There's a correctness reason that this method takes
1476    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) instead of
1477    /// [`File`](https://doc.rust-lang.org/std/fs/struct.File.html): reading from a memory-mapped
1478    /// file ignores the seek position of the original file handle (it neither respects the current
1479    /// position nor updates the position). This difference in behavior would've caused
1480    /// `update_mmap` and [`update_reader`](Hasher::update_reader) to give different answers and
1481    /// have different side effects in some cases. Taking a
1482    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) avoids this problem by
1483    /// making it clear that a new [`File`](https://doc.rust-lang.org/std/fs/struct.File.html) is
1484    /// opened internally.
1485    ///
1486    /// This method requires the `mmap` Cargo feature, which is disabled by default but enabled on
1487    /// [docs.rs](https://docs.rs).
1488    ///
1489    /// # Example
1490    ///
1491    /// ```no_run
1492    /// # use std::io;
1493    /// # use std::path::Path;
1494    /// # fn main() -> io::Result<()> {
1495    /// let path = Path::new("file.dat");
1496    /// let mut hasher = blake3::Hasher::new();
1497    /// hasher.update_mmap(path)?;
1498    /// println!("{}", hasher.finalize());
1499    /// # Ok(())
1500    /// # }
1501    /// ```
1502    #[cfg(feature = "mmap")]
1503    pub fn update_mmap(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<&mut Self> {
1504        let file = std::fs::File::open(path.as_ref())?;
1505        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1506            self.update(&mmap);
1507        } else {
1508            io::copy_wide(&file, self)?;
1509        }
1510        Ok(self)
1511    }
1512
1513    /// As [`update_rayon`](Hasher::update_rayon), but reading the contents of a file using
1514    /// memory mapping. This is the default behavior of `b3sum`.
1515    ///
1516    /// For large files that are likely to be in cache, this can be much faster than
1517    /// single-threaded hashing. When benchmarks report that BLAKE3 is 10x or 20x faster than other
1518    /// cryptographic hashes, this is usually what they're measuring. However...
1519    ///
1520    /// **Performance Warning:** There are cases where multithreading hurts performance. The worst
1521    /// case is [a large file on a spinning disk](https://github.com/BLAKE3-team/BLAKE3/issues/31),
1522    /// where simultaneous reads from multiple threads can cause "thrashing" (i.e. the disk spends
1523    /// more time seeking around than reading data). Windows tends to be somewhat worse about this,
1524    /// in part because it's less likely than Linux to keep very large files in cache. More
1525    /// generally, if your CPU cores are already busy, then multithreading will add overhead
1526    /// without improving performance. If your code runs in different environments that you don't
1527    /// control and can't measure, then unfortunately there's no one-size-fits-all answer for
1528    /// whether multithreading is a good idea.
1529    ///
1530    /// The memory mapping behavior of this function is the same as
1531    /// [`update_mmap`](Hasher::update_mmap), and the heuristic for when to fall back to standard
1532    /// file IO might change at any time.
1533    ///
1534    /// This method requires both the `mmap` and `rayon` Cargo features, which are disabled by
1535    /// default but enabled on [docs.rs](https://docs.rs).
1536    ///
1537    /// # Example
1538    ///
1539    /// ```no_run
1540    /// # use std::io;
1541    /// # use std::path::Path;
1542    /// # fn main() -> io::Result<()> {
1543    /// # #[cfg(feature = "rayon")]
1544    /// # {
1545    /// let path = Path::new("big_file.dat");
1546    /// let mut hasher = blake3::Hasher::new();
1547    /// hasher.update_mmap_rayon(path)?;
1548    /// println!("{}", hasher.finalize());
1549    /// # }
1550    /// # Ok(())
1551    /// # }
1552    /// ```
1553    #[cfg(feature = "mmap")]
1554    #[cfg(feature = "rayon")]
1555    pub fn update_mmap_rayon(
1556        &mut self,
1557        path: impl AsRef<std::path::Path>,
1558    ) -> std::io::Result<&mut Self> {
1559        let file = std::fs::File::open(path.as_ref())?;
1560        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1561            self.update_rayon(&mmap);
1562        } else {
1563            io::copy_wide(&file, self)?;
1564        }
1565        Ok(self)
1566    }
1567}
1568
1569// Don't derive(Debug), because the state may be secret.
1570impl fmt::Debug for Hasher {
1571    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1572        f.debug_struct("Hasher")
1573            .field("flags", &self.chunk_state.flags)
1574            .field("platform", &self.chunk_state.platform)
1575            .finish()
1576    }
1577}
1578
1579impl Default for Hasher {
1580    #[inline]
1581    fn default() -> Self {
1582        Self::new()
1583    }
1584}
1585
1586#[cfg(feature = "std")]
1587impl std::io::Write for Hasher {
1588    /// This is equivalent to [`update`](#method.update).
1589    #[inline]
1590    fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1591        self.update(input);
1592        Ok(input.len())
1593    }
1594
1595    #[inline]
1596    fn flush(&mut self) -> std::io::Result<()> {
1597        Ok(())
1598    }
1599}
1600
1601#[cfg(feature = "zeroize")]
1602impl Zeroize for Hasher {
1603    fn zeroize(&mut self) {
1604        // Destructuring to trigger compile error as a reminder to update this impl.
1605        let Self {
1606            key,
1607            chunk_state,
1608            cv_stack,
1609        } = self;
1610
1611        key.zeroize();
1612        chunk_state.zeroize();
1613        cv_stack.zeroize();
1614    }
1615}
1616
1617/// An incremental reader for extended output, returned by
1618/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1619///
1620/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1621/// equivalent to truncating the default-length output. Note that this is a difference between
1622/// BLAKE2 and BLAKE3.
1623///
1624/// # Security notes
1625///
1626/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1627/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1628/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1629/// security.
1630///
1631/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1632/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1633/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1634/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1635/// both the message and the key (if any) can easily determine the offset of an extended output.
1636/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1637/// from an unknown position in the output stream to recover its block index. Callers with strong
1638/// secret keys aren't affected in practice, but secret offsets are a [design
1639/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1640#[derive(Clone)]
1641pub struct OutputReader {
1642    inner: Output,
1643    position_within_block: u8,
1644}
1645
1646impl OutputReader {
1647    fn new(inner: Output) -> Self {
1648        Self {
1649            inner,
1650            position_within_block: 0,
1651        }
1652    }
1653
1654    // This helper function handles both the case where the output buffer is
1655    // shorter than one block, and the case where our position_within_block is
1656    // non-zero.
1657    fn fill_one_block(&mut self, buf: &mut &mut [u8]) {
1658        let output_block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1659        let output_bytes = &output_block[self.position_within_block as usize..];
1660        let take = cmp::min(buf.len(), output_bytes.len());
1661        buf[..take].copy_from_slice(&output_bytes[..take]);
1662        self.position_within_block += take as u8;
1663        if self.position_within_block == BLOCK_LEN as u8 {
1664            self.inner.counter += 1;
1665            self.position_within_block = 0;
1666        }
1667        // Advance the dest buffer. mem::take() is a borrowck workaround.
1668        *buf = &mut core::mem::take(buf)[take..];
1669    }
1670
1671    /// Fill a buffer with output bytes and advance the position of the
1672    /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1673    /// doesn't return a `Result`. Both methods always fill the entire buffer.
1674    ///
1675    /// Note that `OutputReader` doesn't buffer output bytes internally, so
1676    /// calling `fill` repeatedly with a short-length or odd-length slice will
1677    /// end up performing the same compression multiple times. If you're
1678    /// reading output in a loop, prefer a slice length that's a multiple of
1679    /// 64.
1680    ///
1681    /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1682    /// to extract more than that, for example by seeking near the end and
1683    /// reading further, the behavior is unspecified.
1684    ///
1685    /// [`Read::read`]: #method.read
1686    pub fn fill(&mut self, mut buf: &mut [u8]) {
1687        if buf.is_empty() {
1688            return;
1689        }
1690
1691        // If we're partway through a block, try to get to a block boundary.
1692        if self.position_within_block != 0 {
1693            self.fill_one_block(&mut buf);
1694        }
1695
1696        let full_blocks = buf.len() / BLOCK_LEN;
1697        let full_blocks_len = full_blocks * BLOCK_LEN;
1698        if full_blocks > 0 {
1699            debug_assert_eq!(0, self.position_within_block);
1700            self.inner.platform.xof_many(
1701                &self.inner.input_chaining_value,
1702                &self.inner.block,
1703                self.inner.block_len,
1704                self.inner.counter,
1705                self.inner.flags | ROOT,
1706                &mut buf[..full_blocks_len],
1707            );
1708            self.inner.counter += full_blocks as u64;
1709            buf = &mut buf[full_blocks * BLOCK_LEN..];
1710        }
1711
1712        if !buf.is_empty() {
1713            debug_assert!(buf.len() < BLOCK_LEN);
1714            self.fill_one_block(&mut buf);
1715            debug_assert!(buf.is_empty());
1716        }
1717    }
1718
1719    /// Return the current read position in the output stream. This is
1720    /// equivalent to [`Seek::stream_position`], except that it doesn't return
1721    /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1722    /// call to [`fill`] or [`Read::read`] moves the position forward by the
1723    /// number of bytes read.
1724    ///
1725    /// [`Seek::stream_position`]: #method.stream_position
1726    /// [`fill`]: #method.fill
1727    /// [`Read::read`]: #method.read
1728    pub fn position(&self) -> u64 {
1729        self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1730    }
1731
1732    /// Seek to a new read position in the output stream. This is equivalent to
1733    /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1734    /// return a `Result`.
1735    ///
1736    /// [`Seek::seek`]: #method.seek
1737    /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1738    pub fn set_position(&mut self, position: u64) {
1739        self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1740        self.inner.counter = position / BLOCK_LEN as u64;
1741    }
1742}
1743
1744// Don't derive(Debug), because the state may be secret.
1745impl fmt::Debug for OutputReader {
1746    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1747        f.debug_struct("OutputReader")
1748            .field("position", &self.position())
1749            .finish()
1750    }
1751}
1752
1753#[cfg(feature = "std")]
1754impl std::io::Read for OutputReader {
1755    #[inline]
1756    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1757        self.fill(buf);
1758        Ok(buf.len())
1759    }
1760}
1761
1762#[cfg(feature = "std")]
1763impl std::io::Seek for OutputReader {
1764    fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1765        let max_position = u64::max_value() as i128;
1766        let target_position: i128 = match pos {
1767            std::io::SeekFrom::Start(x) => x as i128,
1768            std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1769            std::io::SeekFrom::End(_) => {
1770                return Err(std::io::Error::new(
1771                    std::io::ErrorKind::InvalidInput,
1772                    "seek from end not supported",
1773                ));
1774            }
1775        };
1776        if target_position < 0 {
1777            return Err(std::io::Error::new(
1778                std::io::ErrorKind::InvalidInput,
1779                "seek before start",
1780            ));
1781        }
1782        self.set_position(cmp::min(target_position, max_position) as u64);
1783        Ok(self.position())
1784    }
1785}
1786
1787#[cfg(feature = "zeroize")]
1788impl Zeroize for OutputReader {
1789    fn zeroize(&mut self) {
1790        // Destructuring to trigger compile error as a reminder to update this impl.
1791        let Self {
1792            inner,
1793            position_within_block,
1794        } = self;
1795
1796        inner.zeroize();
1797        position_within_block.zeroize();
1798    }
1799}