heapless/pool/singleton/arc.rs
1//! Like [`std::sync::Arc`](https://doc.rust-lang.org/std/sync/struct.Arc.html) but backed by a
2//! memory [`Pool`](trait.Pool.html) rather than `#[global_allocator]`
3//!
4//! Note that the same limitations that apply to ["Box" pool] also apply to the "Arc" pool.
5//!
6//! ["Box" pool]: ../../index.html
7//!
8//! # Examples
9//!
10//! ``` ignore
11//! use heapless::{arc_pool, Arc};
12//!
13//! pub struct BigStruct { // <- does NOT implement Clone
14//! data: [u8; 128],
15//! // ..
16//! }
17//!
18//! // declare a memory pool
19//! arc_pool!(P: BigStruct);
20//!
21//!
22//! #[cortex_m_rt::entry]
23//! fn main() -> ! {
24//! static mut MEMORY: [u8; 1024] = [0; 1024];
25//!
26//! // give some static memory to the pool
27//! P::grow(MEMORY);
28//!
29//! let x: Arc<P> = P::alloc(BigStruct::new()).ok().expect("OOM");
30//! // ^ NOTE: this is the Pool type, not the data type
31//!
32//! // cloning is cheap; it increases the refcount
33//! let y = x.clone();
34//!
35//! // same data address
36//! assert_eq!(&*x as *const _, &*y as *const _);
37//!
38//! // auto-deref
39//! let data: &[u8] = &x.data;
40//!
41//! // decrease refcount
42//! drop(x);
43//!
44//! // refcount decreased to 0; memory is returned to the pool
45//! drop(y);
46//!
47//! // ..
48//! }
49//! ```
50//!
51//! The `grow_exact` API is also available on the "Arc pool". It requires using
52//! `Node<ArcInner<Type>>` as the array element type. Example below:
53//!
54//! ``` ignore
55//! use heapless::pool::{singleton::arc::ArcInner, Node};
56//!
57//! pub struct BigStruct { /* .. */ }
58//!
59//! arc_pool!(P: BigStruct);
60//!
61//! #[cortex_m_rt::entry]
62//! fn main() -> ! {
63//! static mut MEMORY: MaybeUninit<[Node<ArcInner<BigStruct>>; 2]> = MaybeUninit::uninit();
64//!
65//! P::grow_exact(MEMORY);
66//!
67//! // 2 allocations are guaranteed to work
68//! let x = P::alloc(BigStruct::new()).ok().expect("OOM");
69//! let y = P::alloc(BigStruct::new()).ok().expect("OOM");
70//!
71//! // ..
72//! }
73//! ```
74
75use core::{
76 cmp, fmt,
77 hash::{Hash, Hasher},
78 marker::PhantomData,
79 ops::Deref,
80 ptr,
81 sync::atomic,
82};
83
84#[cfg(cas_atomic_polyfill)]
85use atomic_polyfill::{AtomicUsize, Ordering};
86
87#[cfg(not(cas_atomic_polyfill))]
88use core::sync::atomic::{AtomicUsize, Ordering};
89
90use crate::pool::{self, stack::Ptr, Node};
91
92/// Instantiates a pool of Arc pointers as a global singleton
93// NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing)
94#[cfg(any(
95 armv6m,
96 armv7a,
97 armv7r,
98 armv7m,
99 armv8m_main,
100 all(
101 any(target_arch = "x86_64", target_arch = "x86"),
102 feature = "x86-sync-pool"
103 ),
104 test
105))]
106#[macro_export]
107macro_rules! arc_pool {
108 ($(#[$($attr:tt)*])* $ident:ident: $ty:ty) => {
109 pub struct $ident;
110
111 impl $crate::pool::singleton::arc::Pool for $ident {
112 type Data = $ty;
113
114 fn ptr() -> &'static $crate::pool::Pool<$crate::pool::singleton::arc::ArcInner<$ty>> {
115 $(#[$($attr)*])*
116 static POOL: $crate::pool::Pool<$crate::pool::singleton::arc::ArcInner<$ty>> =
117 $crate::pool::Pool::new();
118
119 &POOL
120 }
121 }
122
123 impl $ident {
124 /// Allocates a new `Arc` and writes `data` to it
125 ///
126 /// Returns an `Err`or if the backing memory pool is empty
127 pub fn alloc(data: $ty) -> Result<$crate::Arc<Self>, $ty>
128 where
129 Self: Sized,
130 {
131 $crate::Arc::new(data)
132 }
133
134 /// Increases the capacity of the pool
135 ///
136 /// This method might *not* fully utilize the given memory block due to alignment requirements
137 ///
138 /// This method returns the number of *new* blocks that can be allocated.
139 pub fn grow(memory: &'static mut [u8]) -> usize {
140 <Self as $crate::pool::singleton::arc::Pool>::ptr().grow(memory)
141 }
142
143 /// Increases the capacity of the pool
144 ///
145 /// Unlike `grow`, this method fully utilizes the given memory block
146 pub fn grow_exact<A>(memory: &'static mut MaybeUninit<A>) -> usize
147 where
148 A: AsMut<[$crate::pool::Node<$crate::pool::singleton::arc::ArcInner<$ty>>]>,
149 {
150 <Self as $crate::pool::singleton::arc::Pool>::ptr().grow_exact(memory)
151 }
152 }
153 };
154}
155
156/// Pool of Arc pointers
157pub trait Pool {
158 /// The data behind the Arc pointer
159 type Data: 'static;
160
161 #[doc(hidden)]
162 fn ptr() -> &'static pool::Pool<ArcInner<Self::Data>>;
163}
164
165// mostly a verbatim copy of liballoc(/src/sync.rs) as of v1.54.0 minus the `Weak` API
166// anything that diverges has been marked with `XXX`
167
168/// `std::sync::Arc` but backed by a memory [`Pool`] rather than `#[global_allocator]`
169///
170/// [`Pool`]: trait.Pool.html
171///
172/// An example and more details can be found in the [module level documentation](index.html).
173// XXX `Pool::Data` is not `?Sized` -- `Unsize` coercions cannot be implemented on stable
174pub struct Arc<P>
175where
176 P: Pool,
177{
178 phantom: PhantomData<ArcInner<P::Data>>,
179 ptr: Ptr<Node<ArcInner<P::Data>>>,
180 pool: PhantomData<P>,
181}
182
183impl<P> Arc<P>
184where
185 P: Pool,
186{
187 /// Constructs a new `Arc`
188 ///
189 /// Returns an `Err`or if the backing memory pool is empty
190 // XXX original API is "infallible"
191 pub fn new(data: P::Data) -> Result<Self, P::Data> {
192 if let Some(node) = P::ptr().stack.try_pop() {
193 unsafe {
194 ptr::write(
195 node.as_ref().data.get(),
196 ArcInner {
197 strong: AtomicUsize::new(1),
198 data,
199 },
200 )
201 }
202
203 Ok(Self {
204 phantom: PhantomData,
205 pool: PhantomData,
206 ptr: node,
207 })
208 } else {
209 Err(data)
210 }
211 }
212
213 fn inner(&self) -> &ArcInner<P::Data> {
214 unsafe { &*self.ptr.as_ref().data.get() }
215 }
216
217 fn from_inner(ptr: Ptr<Node<ArcInner<P::Data>>>) -> Self {
218 Self {
219 phantom: PhantomData,
220 pool: PhantomData,
221 ptr,
222 }
223 }
224
225 unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
226 &mut (*this.ptr.as_ref().data.get()).data
227 // &mut (*this.ptr.as_ptr()).data
228 }
229
230 #[inline(never)]
231 unsafe fn drop_slow(&mut self) {
232 // run `P::Data`'s destructor
233 ptr::drop_in_place(Self::get_mut_unchecked(self));
234
235 // XXX memory pool instead of `#[global_allocator]`
236 // return memory to pool
237 P::ptr().stack.push(self.ptr);
238 }
239}
240
241const MAX_REFCOUNT: usize = (isize::MAX) as usize;
242
243impl<P> AsRef<P::Data> for Arc<P>
244where
245 P: Pool,
246{
247 fn as_ref(&self) -> &P::Data {
248 &**self
249 }
250}
251
252// XXX no `Borrow` implementation due to 'conflicting implementations of trait' error
253
254impl<P> Clone for Arc<P>
255where
256 P: Pool,
257{
258 fn clone(&self) -> Self {
259 let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
260
261 if old_size > MAX_REFCOUNT {
262 // XXX original code calls `intrinsics::abort` which is unstable API
263 panic!();
264 }
265
266 Self::from_inner(self.ptr)
267 }
268}
269
270impl<P> fmt::Debug for Arc<P>
271where
272 P: Pool,
273 P::Data: fmt::Debug,
274{
275 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
276 fmt::Debug::fmt(&**self, f)
277 }
278}
279
280impl<P> Deref for Arc<P>
281where
282 P: Pool,
283{
284 type Target = P::Data;
285
286 fn deref(&self) -> &P::Data {
287 &self.inner().data
288 }
289}
290
291impl<P> fmt::Display for Arc<P>
292where
293 P: Pool,
294 P::Data: fmt::Display,
295{
296 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
297 fmt::Display::fmt(&**self, f)
298 }
299}
300
301// XXX original uses `#[may_dangle]` which is an unstable language feature
302impl<P> Drop for Arc<P>
303where
304 P: Pool,
305{
306 fn drop(&mut self) {
307 if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
308 return;
309 }
310
311 atomic::fence(Ordering::Acquire);
312
313 unsafe {
314 self.drop_slow();
315 }
316 }
317}
318
319impl<P> Eq for Arc<P>
320where
321 P: Pool,
322 P::Data: Eq,
323{
324}
325
326impl<P> Hash for Arc<P>
327where
328 P: Pool,
329 P::Data: Hash,
330{
331 fn hash<H>(&self, state: &mut H)
332 where
333 H: Hasher,
334 {
335 (**self).hash(state)
336 }
337}
338
339impl<P> Ord for Arc<P>
340where
341 P: Pool,
342 P::Data: Ord,
343{
344 fn cmp(&self, other: &Self) -> cmp::Ordering {
345 (**self).cmp(&**other)
346 }
347}
348
349impl<P> PartialEq for Arc<P>
350where
351 P: Pool,
352 P::Data: PartialEq,
353{
354 fn eq(&self, other: &Self) -> bool {
355 // XXX missing pointer equality specialization, which uses an unstable language feature
356 (**self).eq(&**other)
357 }
358}
359
360impl<P> PartialOrd for Arc<P>
361where
362 P: Pool,
363 P::Data: PartialOrd,
364{
365 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
366 (**self).partial_cmp(&**other)
367 }
368}
369
370unsafe impl<P> Send for Arc<P>
371where
372 P: Pool,
373 P::Data: Sync + Send,
374{
375}
376
377unsafe impl<P> Sync for Arc<P>
378where
379 P: Pool,
380 P::Data: Sync + Send,
381{
382}
383
384impl<P> Unpin for Arc<P> where P: Pool {}
385
386/// Unfortunate implementation detail required to use the `grow_exact` API
387pub struct ArcInner<T> {
388 data: T,
389 strong: AtomicUsize,
390 // XXX `Weak` API not implemented
391 // weak: AtomicUsize,
392}