1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
89//! The `BlockRngCore` trait and implementation helpers
10//!
11//! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs
12//! which generate a block of data in a cache instead of returning generated
13//! values directly.
14//!
15//! Usage of this trait is optional, but provides two advantages:
16//! implementations only need to concern themselves with generation of the
17//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where
18//! the optimal implementations are not trivial), and this allows
19//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic
20//! reseeding with very low overhead.
21//!
22//! # Example
23//!
24//! ```no_run
25//! use rand_core::{RngCore, SeedableRng};
26//! use rand_core::block::{BlockRngCore, BlockRng};
27//!
28//! struct MyRngCore;
29//!
30//! impl BlockRngCore for MyRngCore {
31//! type Item = u32;
32//! type Results = [u32; 16];
33//!
34//! fn generate(&mut self, results: &mut Self::Results) {
35//! unimplemented!()
36//! }
37//! }
38//!
39//! impl SeedableRng for MyRngCore {
40//! type Seed = [u8; 32];
41//! fn from_seed(seed: Self::Seed) -> Self {
42//! unimplemented!()
43//! }
44//! }
45//!
46//! // optionally, also implement CryptoRng for MyRngCore
47//!
48//! // Final RNG.
49//! let mut rng = BlockRng::<MyRngCore>::seed_from_u64(0);
50//! println!("First value: {}", rng.next_u32());
51//! ```
52//!
53//! [`BlockRngCore`]: crate::block::BlockRngCore
54//! [`fill_bytes`]: RngCore::fill_bytes
5556use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks};
57use crate::{CryptoRng, Error, RngCore, SeedableRng};
58use core::convert::AsRef;
59use core::fmt;
60#[cfg(feature = "serde1")]
61use serde::{Deserialize, Serialize};
6263/// A trait for RNGs which do not generate random numbers individually, but in
64/// blocks (typically `[u32; N]`). This technique is commonly used by
65/// cryptographic RNGs to improve performance.
66///
67/// See the [module][crate::block] documentation for details.
68pub trait BlockRngCore {
69/// Results element type, e.g. `u32`.
70type Item;
7172/// Results type. This is the 'block' an RNG implementing `BlockRngCore`
73 /// generates, which will usually be an array like `[u32; 16]`.
74type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default;
7576/// Generate a new block of results.
77fn generate(&mut self, results: &mut Self::Results);
78}
7980/// A wrapper type implementing [`RngCore`] for some type implementing
81/// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement
82/// a full RNG from just a `generate` function.
83///
84/// The `core` field may be accessed directly but the results buffer may not.
85/// PRNG implementations can simply use a type alias
86/// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a
87/// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must
88/// re-implement `RngCore` but hides the implementation details and allows
89/// extra functionality to be defined on the RNG
90/// (e.g. `impl MyRng { fn set_stream(...){...} }`).
91///
92/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods
93/// reading values from the results buffer, as well as
94/// calling [`BlockRngCore::generate`] directly on the output array when
95/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
96/// also handle the bookkeeping of when to generate a new batch of values.
97///
98/// No whole generated `u32` values are thrown away and all values are consumed
99/// in-order. [`next_u32`] simply takes the next available `u32` value.
100/// [`next_u64`] is implemented by combining two `u32` values, least
101/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
102/// number of `u32` values, converting each `u32` to a byte slice in
103/// little-endian order. If the requested byte length is not a multiple of 4,
104/// some bytes will be discarded.
105///
106/// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is
107/// no direct support for other buffer types.
108///
109/// For easy initialization `BlockRng` also implements [`SeedableRng`].
110///
111/// [`next_u32`]: RngCore::next_u32
112/// [`next_u64`]: RngCore::next_u64
113/// [`fill_bytes`]: RngCore::fill_bytes
114/// [`try_fill_bytes`]: RngCore::try_fill_bytes
115#[derive(Clone)]
116#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
117#[cfg_attr(
118 feature = "serde1",
119 serde(
120 bound = "for<'x> R: Serialize + Deserialize<'x> + Sized, for<'x> R::Results: Serialize + Deserialize<'x>"
121)
122)]
123pub struct BlockRng<R: BlockRngCore + ?Sized> {
124 results: R::Results,
125 index: usize,
126/// The *core* part of the RNG, implementing the `generate` function.
127pub core: R,
128}
129130// Custom Debug implementation that does not expose the contents of `results`.
131impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
132fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
133 fmt.debug_struct("BlockRng")
134 .field("core", &self.core)
135 .field("result_len", &self.results.as_ref().len())
136 .field("index", &self.index)
137 .finish()
138 }
139}
140141impl<R: BlockRngCore> BlockRng<R> {
142/// Create a new `BlockRng` from an existing RNG implementing
143 /// `BlockRngCore`. Results will be generated on first use.
144#[inline]
145pub fn new(core: R) -> BlockRng<R> {
146let results_empty = R::Results::default();
147 BlockRng {
148 core,
149 index: results_empty.as_ref().len(),
150 results: results_empty,
151 }
152 }
153154/// Get the index into the result buffer.
155 ///
156 /// If this is equal to or larger than the size of the result buffer then
157 /// the buffer is "empty" and `generate()` must be called to produce new
158 /// results.
159#[inline(always)]
160pub fn index(&self) -> usize {
161self.index
162 }
163164/// Reset the number of available results.
165 /// This will force a new set of results to be generated on next use.
166#[inline]
167pub fn reset(&mut self) {
168self.index = self.results.as_ref().len();
169 }
170171/// Generate a new set of results immediately, setting the index to the
172 /// given value.
173#[inline]
174pub fn generate_and_set(&mut self, index: usize) {
175assert!(index < self.results.as_ref().len());
176self.core.generate(&mut self.results);
177self.index = index;
178 }
179}
180181impl<R: BlockRngCore<Item = u32>> RngCore for BlockRng<R>
182where
183<R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>,
184{
185#[inline]
186fn next_u32(&mut self) -> u32 {
187if self.index >= self.results.as_ref().len() {
188self.generate_and_set(0);
189 }
190191let value = self.results.as_ref()[self.index];
192self.index += 1;
193 value
194 }
195196#[inline]
197fn next_u64(&mut self) -> u64 {
198let read_u64 = |results: &[u32], index| {
199let data = &results[index..=index + 1];
200 u64::from(data[1]) << 32 | u64::from(data[0])
201 };
202203let len = self.results.as_ref().len();
204205let index = self.index;
206if index < len - 1 {
207self.index += 2;
208// Read an u64 from the current index
209read_u64(self.results.as_ref(), index)
210 } else if index >= len {
211self.generate_and_set(2);
212 read_u64(self.results.as_ref(), 0)
213 } else {
214let x = u64::from(self.results.as_ref()[len - 1]);
215self.generate_and_set(1);
216let y = u64::from(self.results.as_ref()[0]);
217 (y << 32) | x
218 }
219 }
220221#[inline]
222fn fill_bytes(&mut self, dest: &mut [u8]) {
223let mut read_len = 0;
224while read_len < dest.len() {
225if self.index >= self.results.as_ref().len() {
226self.generate_and_set(0);
227 }
228let (consumed_u32, filled_u8) =
229 fill_via_u32_chunks(&self.results.as_ref()[self.index..], &mut dest[read_len..]);
230231self.index += consumed_u32;
232 read_len += filled_u8;
233 }
234 }
235236#[inline(always)]
237fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
238self.fill_bytes(dest);
239Ok(())
240 }
241}
242243impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
244type Seed = R::Seed;
245246#[inline(always)]
247fn from_seed(seed: Self::Seed) -> Self {
248Self::new(R::from_seed(seed))
249 }
250251#[inline(always)]
252fn seed_from_u64(seed: u64) -> Self {
253Self::new(R::seed_from_u64(seed))
254 }
255256#[inline(always)]
257fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
258Ok(Self::new(R::from_rng(rng)?))
259 }
260}
261262/// A wrapper type implementing [`RngCore`] for some type implementing
263/// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement
264/// a full RNG from just a `generate` function.
265///
266/// This is similar to [`BlockRng`], but specialized for algorithms that operate
267/// on `u64` values.
268///
269/// No whole generated `u64` values are thrown away and all values are consumed
270/// in-order. [`next_u64`] simply takes the next available `u64` value.
271/// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving
272/// the other half in the buffer. If the next function called is [`next_u32`]
273/// then the other half is then consumed, however both [`next_u64`] and
274/// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called.
275///
276/// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64`
277/// values. If the requested length is not a multiple of 8, some bytes will be
278/// discarded.
279///
280/// [`next_u32`]: RngCore::next_u32
281/// [`next_u64`]: RngCore::next_u64
282/// [`fill_bytes`]: RngCore::fill_bytes
283/// [`try_fill_bytes`]: RngCore::try_fill_bytes
284#[derive(Clone)]
285#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
286pub struct BlockRng64<R: BlockRngCore + ?Sized> {
287 results: R::Results,
288 index: usize,
289 half_used: bool, // true if only half of the previous result is used
290/// The *core* part of the RNG, implementing the `generate` function.
291pub core: R,
292}
293294// Custom Debug implementation that does not expose the contents of `results`.
295impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> {
296fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
297 fmt.debug_struct("BlockRng64")
298 .field("core", &self.core)
299 .field("result_len", &self.results.as_ref().len())
300 .field("index", &self.index)
301 .field("half_used", &self.half_used)
302 .finish()
303 }
304}
305306impl<R: BlockRngCore> BlockRng64<R> {
307/// Create a new `BlockRng` from an existing RNG implementing
308 /// `BlockRngCore`. Results will be generated on first use.
309#[inline]
310pub fn new(core: R) -> BlockRng64<R> {
311let results_empty = R::Results::default();
312 BlockRng64 {
313 core,
314 index: results_empty.as_ref().len(),
315 half_used: false,
316 results: results_empty,
317 }
318 }
319320/// Get the index into the result buffer.
321 ///
322 /// If this is equal to or larger than the size of the result buffer then
323 /// the buffer is "empty" and `generate()` must be called to produce new
324 /// results.
325#[inline(always)]
326pub fn index(&self) -> usize {
327self.index
328 }
329330/// Reset the number of available results.
331 /// This will force a new set of results to be generated on next use.
332#[inline]
333pub fn reset(&mut self) {
334self.index = self.results.as_ref().len();
335self.half_used = false;
336 }
337338/// Generate a new set of results immediately, setting the index to the
339 /// given value.
340#[inline]
341pub fn generate_and_set(&mut self, index: usize) {
342assert!(index < self.results.as_ref().len());
343self.core.generate(&mut self.results);
344self.index = index;
345self.half_used = false;
346 }
347}
348349impl<R: BlockRngCore<Item = u64>> RngCore for BlockRng64<R>
350where
351<R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>,
352{
353#[inline]
354fn next_u32(&mut self) -> u32 {
355let mut index = self.index - self.half_used as usize;
356if index >= self.results.as_ref().len() {
357self.core.generate(&mut self.results);
358self.index = 0;
359 index = 0;
360// `self.half_used` is by definition `false`
361self.half_used = false;
362 }
363364let shift = 32 * (self.half_used as usize);
365366self.half_used = !self.half_used;
367self.index += self.half_used as usize;
368369 (self.results.as_ref()[index] >> shift) as u32
370 }
371372#[inline]
373fn next_u64(&mut self) -> u64 {
374if self.index >= self.results.as_ref().len() {
375self.core.generate(&mut self.results);
376self.index = 0;
377 }
378379let value = self.results.as_ref()[self.index];
380self.index += 1;
381self.half_used = false;
382 value
383 }
384385#[inline]
386fn fill_bytes(&mut self, dest: &mut [u8]) {
387let mut read_len = 0;
388self.half_used = false;
389while read_len < dest.len() {
390if self.index as usize >= self.results.as_ref().len() {
391self.core.generate(&mut self.results);
392self.index = 0;
393 }
394395let (consumed_u64, filled_u8) = fill_via_u64_chunks(
396&self.results.as_ref()[self.index as usize..],
397&mut dest[read_len..],
398 );
399400self.index += consumed_u64;
401 read_len += filled_u8;
402 }
403 }
404405#[inline(always)]
406fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
407self.fill_bytes(dest);
408Ok(())
409 }
410}
411412impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
413type Seed = R::Seed;
414415#[inline(always)]
416fn from_seed(seed: Self::Seed) -> Self {
417Self::new(R::from_seed(seed))
418 }
419420#[inline(always)]
421fn seed_from_u64(seed: u64) -> Self {
422Self::new(R::seed_from_u64(seed))
423 }
424425#[inline(always)]
426fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
427Ok(Self::new(R::from_rng(rng)?))
428 }
429}
430431impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}
432433#[cfg(test)]
434mod test {
435use crate::{SeedableRng, RngCore};
436use crate::block::{BlockRng, BlockRng64, BlockRngCore};
437438#[derive(Debug, Clone)]
439struct DummyRng {
440 counter: u32,
441 }
442443impl BlockRngCore for DummyRng {
444type Item = u32;
445446type Results = [u32; 16];
447448fn generate(&mut self, results: &mut Self::Results) {
449for r in results {
450*r = self.counter;
451self.counter = self.counter.wrapping_add(3511615421);
452 }
453 }
454 }
455456impl SeedableRng for DummyRng {
457type Seed = [u8; 4];
458459fn from_seed(seed: Self::Seed) -> Self {
460 DummyRng { counter: u32::from_le_bytes(seed) }
461 }
462 }
463464#[test]
465fn blockrng_next_u32_vs_next_u64() {
466let mut rng1 = BlockRng::<DummyRng>::from_seed([1, 2, 3, 4]);
467let mut rng2 = rng1.clone();
468let mut rng3 = rng1.clone();
469470let mut a = [0; 16];
471 (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
472 (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
473 (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
474475let mut b = [0; 16];
476 (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
477 (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
478 (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
479assert_eq!(a, b);
480481let mut c = [0; 16];
482 (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
483 (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
484 (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
485assert_eq!(a, c);
486 }
487488#[derive(Debug, Clone)]
489struct DummyRng64 {
490 counter: u64,
491 }
492493impl BlockRngCore for DummyRng64 {
494type Item = u64;
495496type Results = [u64; 8];
497498fn generate(&mut self, results: &mut Self::Results) {
499for r in results {
500*r = self.counter;
501self.counter = self.counter.wrapping_add(2781463553396133981);
502 }
503 }
504 }
505506impl SeedableRng for DummyRng64 {
507type Seed = [u8; 8];
508509fn from_seed(seed: Self::Seed) -> Self {
510 DummyRng64 { counter: u64::from_le_bytes(seed) }
511 }
512 }
513514#[test]
515fn blockrng64_next_u32_vs_next_u64() {
516let mut rng1 = BlockRng64::<DummyRng64>::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
517let mut rng2 = rng1.clone();
518let mut rng3 = rng1.clone();
519520let mut a = [0; 16];
521 (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
522 (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
523 (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
524525let mut b = [0; 16];
526 (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
527 (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
528 (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
529assert_ne!(a, b);
530assert_eq!(&a[..4], &b[..4]);
531assert_eq!(&a[4..12], &b[8..]);
532533let mut c = [0; 16];
534 (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
535 (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
536 (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
537assert_eq!(b, c);
538 }
539}