1#![doc = include_str!("../doc/store.md")]
2
3use core::{
4 cell::Cell,
5 fmt::Debug,
6};
7
8use funty::Integral;
9
10use crate::{
11 access::*,
12 index::BitIdx,
13 mem::{
14 self,
15 BitRegister,
16 },
17 order::BitOrder,
18};
19
20#[doc = include_str!("../doc/store/BitStore.md")]
21pub trait BitStore: 'static + Debug {
22 type Mem: BitRegister + BitStore<Mem = Self::Mem>;
25 type Access: BitAccess<Item = Self::Mem> + BitStore<Mem = Self::Mem>;
31 type Alias: BitStore<Mem = Self::Mem>;
37 type Unalias: BitStore<Mem = Self::Mem>;
40
41 const ZERO: Self;
43
44 fn new(value: Self::Mem) -> Self;
46
47 fn load_value(&self) -> Self::Mem;
51
52 fn store_value(&mut self, value: Self::Mem);
56
57 #[inline]
75 fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool
76 where O: BitOrder {
77 self.load_value() & index.select::<O>().into_inner()
78 != <Self::Mem as Integral>::ZERO
79 }
80
81 const ALIGNED_TO_SIZE: [(); 1];
87
88 const ALIAS_WIDTH: [(); 1];
96}
97
98macro_rules! store {
100 ($($base:ty => $safe:ty);+ $(;)?) => { $(
101 impl BitStore for $base {
102 type Mem = Self;
103 type Access = Cell<Self>;
107 type Alias = $safe;
108 type Unalias = Self;
109
110 const ZERO: Self = 0;
111
112 #[inline]
113 fn new(value: Self::Mem) -> Self { value }
114
115 #[inline]
116 fn load_value(&self) -> Self::Mem {
117 *self
118 }
119
120 #[inline]
121 fn store_value(&mut self, value: Self::Mem) {
122 *self = value;
123 }
124
125 const ALIGNED_TO_SIZE: [(); 1]
126 = [(); mem::aligned_to_size::<Self>() as usize];
127
128 const ALIAS_WIDTH: [(); 1]
129 = [(); mem::layout_eq::<Self, Self::Alias>() as usize];
130 }
131
132 impl BitStore for $safe {
133 type Mem = $base;
134 type Access = <Self as BitSafe>::Rad;
135 type Alias = Self;
136 type Unalias = $base;
137
138 const ZERO: Self = <Self as BitSafe>::ZERO;
139
140 #[inline]
141 fn new(value: Self::Mem) -> Self { <Self>::new(value) }
142
143 #[inline]
144 fn load_value(&self) -> Self::Mem {
145 self.load()
146 }
147
148 #[inline]
149 fn store_value(&mut self, value: Self::Mem) {
150 *self = Self::new(value);
151 }
152
153 const ALIGNED_TO_SIZE: [(); 1]
154 = [(); mem::aligned_to_size::<Self>() as usize];
155
156 const ALIAS_WIDTH: [(); 1] = [()];
157 }
158
159 impl BitStore for Cell<$base> {
160 type Mem = $base;
161 type Access = Self;
162 type Alias = Self;
163 type Unalias = Self;
164
165 const ZERO: Self = Self::new(0);
166
167 #[inline]
168 fn new(value: Self::Mem) -> Self { <Self>::new(value) }
169
170 #[inline]
171 fn load_value(&self) -> Self::Mem {
172 self.get()
173 }
174
175 #[inline]
176 fn store_value(&mut self, value: Self::Mem) {
177 *self = Self::new(value);
178 }
179
180 const ALIGNED_TO_SIZE: [(); 1]
181 = [(); mem::aligned_to_size::<Self>() as usize];
182
183 const ALIAS_WIDTH: [(); 1] = [()];
184 }
185 )+ };
186}
187
188store! {
189 u8 => BitSafeU8;
190 u16 => BitSafeU16;
191 u32 => BitSafeU32;
192}
193
194#[cfg(target_pointer_width = "64")]
195store!(u64 => BitSafeU64);
196
197store!(usize => BitSafeUsize);
198
199macro_rules! atomic {
201 ($($size:tt, $base:ty => $atom:ident);+ $(;)?) => { $(
202 radium::if_atomic!(if atomic($size) {
203 use core::sync::atomic::$atom;
204
205 impl BitStore for $atom {
206 type Mem = $base;
207 type Access = Self;
208 type Alias = Self;
209 type Unalias = Self;
210
211 const ZERO: Self = <Self>::new(0);
212
213 #[inline]
214 fn new(value: Self::Mem) -> Self { <Self>::new(value) }
215
216 #[inline]
217 fn load_value(&self) -> Self::Mem {
218 self.load(core::sync::atomic::Ordering::Relaxed)
219 }
220
221 #[inline]
222 fn store_value(&mut self, value: Self::Mem) {
223 *self = Self::new(value);
224 }
225
226 const ALIGNED_TO_SIZE: [(); 1]
227 = [(); mem::aligned_to_size::<Self>() as usize];
228
229 const ALIAS_WIDTH: [(); 1] = [()];
230 }
231 });
232 )+ };
233}
234
235atomic! {
236 8, u8 => AtomicU8;
237 16, u16 => AtomicU16;
238 32, u32 => AtomicU32;
239}
240
241#[cfg(target_pointer_width = "64")]
242atomic!(64, u64 => AtomicU64);
243
244atomic!(size, usize => AtomicUsize);
245
246#[cfg(test)]
247mod tests {
248 use static_assertions::*;
249
250 use super::*;
251 use crate::prelude::*;
252
253 #[test]
254 fn load_store() {
255 let mut word = 0usize;
256
257 word.store_value(39);
258 assert_eq!(word.load_value(), 39);
259
260 let mut safe = BitSafeUsize::new(word);
261 safe.store_value(57);
262 assert_eq!(safe.load_value(), 57);
263
264 let mut cell = Cell::new(0usize);
265 cell.store_value(39);
266 assert_eq!(cell.load_value(), 39);
267
268 radium::if_atomic!(if atomic(size) {
269 let mut atom = AtomicUsize::new(0);
270 atom.store_value(57);
271 assert_eq!(atom.load_value(), 57);
272 });
273 }
274
275 #[test]
278 fn unaliased_send_sync() {
279 assert_impl_all!(BitSlice<u8, LocalBits>: Send, Sync);
280 assert_impl_all!(BitSlice<u16, LocalBits>: Send, Sync);
281 assert_impl_all!(BitSlice<u32, LocalBits>: Send, Sync);
282 assert_impl_all!(BitSlice<usize, LocalBits>: Send, Sync);
283
284 #[cfg(target_pointer_width = "64")]
285 assert_impl_all!(BitSlice<u64, LocalBits>: Send, Sync);
286 }
287
288 #[test]
289 fn cell_unsend_unsync() {
290 assert_not_impl_any!(BitSlice<Cell<u8>, LocalBits>: Send, Sync);
291 assert_not_impl_any!(BitSlice<Cell<u16>, LocalBits>: Send, Sync);
292 assert_not_impl_any!(BitSlice<Cell<u32>, LocalBits>: Send, Sync);
293 assert_not_impl_any!(BitSlice<Cell<usize>, LocalBits>: Send, Sync);
294
295 #[cfg(target_pointer_width = "64")]
296 assert_not_impl_any!(BitSlice<Cell<u64>, LocalBits>: Send, Sync);
297 }
298
299 #[test]
307 #[cfg(not(feature = "atomic"))]
308 fn aliased_non_atomic_unsend_unsync() {
309 assert_not_impl_any!(BitSlice<BitSafeU8, LocalBits>: Send, Sync);
310 assert_not_impl_any!(BitSlice<BitSafeU16, LocalBits>: Send, Sync);
311 assert_not_impl_any!(BitSlice<BitSafeU32, LocalBits>: Send, Sync);
312 assert_not_impl_any!(BitSlice<BitSafeUsize, LocalBits>: Send, Sync);
313
314 #[cfg(target_pointer_width = "64")]
315 assert_not_impl_any!(BitSlice<BitSafeU64, LocalBits>: Send, Sync);
316 }
317
318 #[test]
319 #[cfg(feature = "atomic")]
320 fn aliased_atomic_send_sync() {
321 assert_impl_all!(BitSlice<AtomicU8, LocalBits>: Send, Sync);
322 assert_impl_all!(BitSlice<AtomicU16, LocalBits>: Send, Sync);
323 assert_impl_all!(BitSlice<AtomicU32, LocalBits>: Send, Sync);
324 assert_impl_all!(BitSlice<AtomicUsize, LocalBits>: Send, Sync);
325
326 #[cfg(target_pointer_width = "64")]
327 assert_impl_all!(BitSlice<AtomicU64, LocalBits>: Send, Sync);
328 }
329}