grafos_collections/
map.rs

1//! A hash map stored in leased fabric memory with open addressing.
2//!
3//! [`FabricHashMap<K,V>`] stores key-value pairs in a remote memory region
4//! obtained via [`MemLease`]. Collision resolution uses linear probing.
5//! Keys are hashed with FNV-1a after postcard serialization.
6//!
7//! # Memory layout
8//!
9//! ```text
10//! Offset 0:        [header]  count: u64 (8) | bucket_count: u64 (8) | key_stride: u64 (8) | val_stride: u64 (8)
11//! Offset 32:       [buckets] bucket 0 | bucket 1 | ...
12//!
13//! Each bucket (1 + 8 + 4 + key_stride + 4 + val_stride bytes):
14//!   occupied: u8 (1) | hash: u64 (8) | key_len: u32 (4) | key_data: key_stride bytes | val_len: u32 (4) | val_data: val_stride bytes
15//! ```
16//!
17//! The `occupied` byte is `0` for empty, `1` for occupied, `2` for
18//! tombstoned (deleted but still part of the probe chain).
19//!
20//! # Example
21//!
22//! ```rust
23//! use grafos_collections::map::FabricHashMap;
24//! use grafos_std::mem::MemBuilder;
25//!
26//! # grafos_std::host::reset_mock();
27//! # grafos_std::host::mock_set_fbmu_arena_size(65536);
28//! let lease = MemBuilder::new().min_bytes(65536).acquire()?;
29//! let mut map: FabricHashMap<String, u64> = FabricHashMap::new(lease, 32, 16)?;
30//!
31//! map.insert(&"key".into(), &42)?;
32//! assert_eq!(map.get(&"key".into())?, Some(42));
33//! # Ok::<(), grafos_std::FabricError>(())
34//! ```
35
36extern crate alloc;
37use alloc::vec;
38use alloc::vec::Vec;
39
40use grafos_std::error::{FabricError, Result};
41use grafos_std::mem::{MemBuilder, MemLease};
42
43use serde::{de::DeserializeOwned, Serialize};
44
45const HEADER_SIZE: u64 = 32;
46
47const BUCKET_EMPTY: u8 = 0;
48const BUCKET_OCCUPIED: u8 = 1;
49const BUCKET_TOMBSTONE: u8 = 2;
50
51/// A hash map stored in leased fabric memory with open addressing.
52///
53/// Keys and values are serialized via postcard into fixed-size slots.
54/// Collision resolution uses linear probing. The map owns its [`MemLease`]
55/// and releases it on drop.
56///
57/// The maximum load factor is 75%. Inserting beyond this threshold returns
58/// [`FabricError::CapacityExceeded`].
59///
60/// # Type parameters
61///
62/// - `K`: Key type. Must implement `Serialize + DeserializeOwned + PartialEq`.
63/// - `V`: Value type. Must implement `Serialize + DeserializeOwned`.
64///
65/// # Example
66///
67/// ```rust
68/// use grafos_collections::map::FabricHashMap;
69/// use grafos_std::mem::MemBuilder;
70///
71/// # grafos_std::host::reset_mock();
72/// # grafos_std::host::mock_set_fbmu_arena_size(65536);
73/// let mut map: FabricHashMap<u32, u32> = FabricHashMap::with_capacity(64, 8, 8)?;
74/// map.insert(&1, &100)?;
75/// map.insert(&2, &200)?;
76/// assert_eq!(map.get(&1)?, Some(100));
77/// assert_eq!(map.remove(&2)?, Some(200));
78/// # Ok::<(), grafos_std::FabricError>(())
79/// ```
80pub struct FabricHashMap<K, V> {
81    lease: MemLease,
82    count: u64,
83    bucket_count: u64,
84    key_stride: u64,
85    val_stride: u64,
86    bucket_size: u64,
87    _marker: core::marker::PhantomData<(K, V)>,
88}
89
90impl<K: Serialize + DeserializeOwned + PartialEq, V: Serialize + DeserializeOwned>
91    FabricHashMap<K, V>
92{
93    /// Create a new hash map taking ownership of the given lease.
94    ///
95    /// `key_stride` and `val_stride` are the maximum serialized sizes (in
96    /// bytes) for keys and values respectively. The number of buckets is
97    /// derived from `(arena_size - 32) / bucket_size`.
98    ///
99    /// All buckets are zeroed (marked empty) during construction.
100    ///
101    /// # Errors
102    ///
103    /// Returns [`FabricError::CapacityExceeded`] if the arena is too small
104    /// to hold the 32-byte header plus at least one bucket.
105    pub fn new(lease: MemLease, key_stride: usize, val_stride: usize) -> Result<Self> {
106        let arena = lease.mem().arena_size()?;
107        let bucket_size = Self::compute_bucket_size(key_stride as u64, val_stride as u64);
108        if arena < HEADER_SIZE + bucket_size {
109            return Err(FabricError::CapacityExceeded);
110        }
111        let bucket_count = (arena - HEADER_SIZE) / bucket_size;
112        let map = FabricHashMap {
113            lease,
114            count: 0,
115            bucket_count,
116            key_stride: key_stride as u64,
117            val_stride: val_stride as u64,
118            bucket_size,
119            _marker: core::marker::PhantomData,
120        };
121        map.write_header()?;
122        // Zero out all buckets (mark as empty)
123        let zero_bucket = vec![0u8; bucket_size as usize];
124        for i in 0..bucket_count {
125            let offset = HEADER_SIZE + i * bucket_size;
126            map.lease.mem().write(offset, &zero_bucket)?;
127        }
128        Ok(map)
129    }
130
131    /// Recover a hash map from an existing memory lease.
132    ///
133    /// Reads the 32-byte header to discover `count`, `bucket_count`,
134    /// `key_stride`, and `val_stride`. This enables crash recovery: a
135    /// replacement tasklet can call `MemBuilder::attach(lease_id)` and
136    /// then `FabricHashMap::from_lease()` to reconnect to a surviving
137    /// hot-tier hash map without re-inserting data.
138    ///
139    /// # Errors
140    ///
141    /// - [`FabricError::CapacityExceeded`] if the header is inconsistent
142    ///   with the arena size.
143    /// - [`FabricError::Disconnected`] if the read fails.
144    pub fn from_lease(lease: MemLease) -> Result<Self> {
145        let arena = lease.mem().arena_size()?;
146        if arena < HEADER_SIZE {
147            return Err(FabricError::CapacityExceeded);
148        }
149        let hdr = lease.mem().read(0, HEADER_SIZE as u32)?;
150        let count = u64::from_le_bytes([
151            hdr[0], hdr[1], hdr[2], hdr[3], hdr[4], hdr[5], hdr[6], hdr[7],
152        ]);
153        let bucket_count = u64::from_le_bytes([
154            hdr[8], hdr[9], hdr[10], hdr[11], hdr[12], hdr[13], hdr[14], hdr[15],
155        ]);
156        let key_stride = u64::from_le_bytes([
157            hdr[16], hdr[17], hdr[18], hdr[19], hdr[20], hdr[21], hdr[22], hdr[23],
158        ]);
159        let val_stride = u64::from_le_bytes([
160            hdr[24], hdr[25], hdr[26], hdr[27], hdr[28], hdr[29], hdr[30], hdr[31],
161        ]);
162        let bucket_size = Self::compute_bucket_size(key_stride, val_stride);
163        if arena < HEADER_SIZE + bucket_count * bucket_size {
164            return Err(FabricError::CapacityExceeded);
165        }
166        Ok(FabricHashMap {
167            lease,
168            count,
169            bucket_count,
170            key_stride,
171            val_stride,
172            bucket_size,
173            _marker: core::marker::PhantomData,
174        })
175    }
176
177    /// Create a new hash map by acquiring a lease sized for `bucket_count`
178    /// buckets.
179    ///
180    /// This is a convenience constructor that acquires a lease via
181    /// `MemBuilder` and then calls [`FabricHashMap::new`].
182    ///
183    /// # Errors
184    ///
185    /// Returns [`FabricError::CapacityExceeded`] if the host cannot provide
186    /// an arena large enough.
187    pub fn with_capacity(
188        bucket_count: usize,
189        key_stride: usize,
190        val_stride: usize,
191    ) -> Result<Self> {
192        let bucket_size = Self::compute_bucket_size(key_stride as u64, val_stride as u64);
193        let needed = HEADER_SIZE + (bucket_count as u64) * bucket_size;
194        let lease = MemBuilder::new().min_bytes(needed).acquire()?;
195        Self::new(lease, key_stride, val_stride)
196    }
197
198    /// Insert a key-value pair. Returns the previous value if the key
199    /// already existed.
200    ///
201    /// If the key is already present, the value is overwritten and the old
202    /// value is returned as `Some(old)`. If the key is new, returns `None`.
203    ///
204    /// # Errors
205    ///
206    /// - [`FabricError::CapacityExceeded`] if inserting would exceed the
207    ///   75% load factor, or if the serialized key/value exceeds their
208    ///   respective strides.
209    /// - [`FabricError::IoError`] if postcard serialization fails.
210    pub fn insert(&mut self, key: &K, value: &V) -> Result<Option<V>> {
211        let key_bytes = postcard::to_allocvec(key).map_err(|_| FabricError::IoError(-1))?;
212        let val_bytes = postcard::to_allocvec(value).map_err(|_| FabricError::IoError(-1))?;
213
214        if key_bytes.len() as u64 > self.key_stride || val_bytes.len() as u64 > self.val_stride {
215            return Err(FabricError::CapacityExceeded);
216        }
217
218        let hash = Self::hash_bytes(&key_bytes);
219        let mut idx = (hash % self.bucket_count) as usize;
220        let mut first_tombstone: Option<usize> = None;
221
222        for _ in 0..self.bucket_count {
223            let bucket = self.read_bucket(idx)?;
224            match bucket.occupied {
225                BUCKET_EMPTY => {
226                    // New entry — check load factor (75% max)
227                    if (self.count + 1) * 4 > self.bucket_count * 3 {
228                        return Err(FabricError::CapacityExceeded);
229                    }
230                    let target = first_tombstone.unwrap_or(idx);
231                    self.write_bucket(target, BUCKET_OCCUPIED, hash, &key_bytes, &val_bytes)?;
232                    self.count += 1;
233                    self.write_header()?;
234                    return Ok(None);
235                }
236                BUCKET_TOMBSTONE => {
237                    if first_tombstone.is_none() {
238                        first_tombstone = Some(idx);
239                    }
240                }
241                BUCKET_OCCUPIED => {
242                    if bucket.hash == hash {
243                        let existing_key: K =
244                            postcard::from_bytes(&bucket.key_data[..bucket.key_len as usize])
245                                .map_err(|_| FabricError::IoError(-1))?;
246                        if &existing_key == key {
247                            let old_val: V =
248                                postcard::from_bytes(&bucket.val_data[..bucket.val_len as usize])
249                                    .map_err(|_| FabricError::IoError(-1))?;
250                            self.write_bucket(idx, BUCKET_OCCUPIED, hash, &key_bytes, &val_bytes)?;
251                            return Ok(Some(old_val));
252                        }
253                    }
254                }
255                _ => {}
256            }
257            idx = (idx + 1) % self.bucket_count as usize;
258        }
259
260        // All buckets probed, key not found. Use a tombstone if available.
261        if let Some(target) = first_tombstone {
262            if (self.count + 1) * 4 > self.bucket_count * 3 {
263                return Err(FabricError::CapacityExceeded);
264            }
265            self.write_bucket(target, BUCKET_OCCUPIED, hash, &key_bytes, &val_bytes)?;
266            self.count += 1;
267            self.write_header()?;
268            return Ok(None);
269        }
270
271        Err(FabricError::CapacityExceeded)
272    }
273
274    /// Look up a value by key.
275    ///
276    /// Returns `Some(value)` if the key exists, `None` otherwise.
277    ///
278    /// # Errors
279    ///
280    /// Returns [`FabricError::IoError`] if serialization or remote read fails.
281    pub fn get(&self, key: &K) -> Result<Option<V>> {
282        let key_bytes = postcard::to_allocvec(key).map_err(|_| FabricError::IoError(-1))?;
283        let hash = Self::hash_bytes(&key_bytes);
284        let mut idx = (hash % self.bucket_count) as usize;
285
286        for _ in 0..self.bucket_count {
287            let bucket = self.read_bucket(idx)?;
288            match bucket.occupied {
289                BUCKET_EMPTY => return Ok(None),
290                BUCKET_OCCUPIED => {
291                    if bucket.hash == hash {
292                        let existing_key: K =
293                            postcard::from_bytes(&bucket.key_data[..bucket.key_len as usize])
294                                .map_err(|_| FabricError::IoError(-1))?;
295                        if &existing_key == key {
296                            let val: V =
297                                postcard::from_bytes(&bucket.val_data[..bucket.val_len as usize])
298                                    .map_err(|_| FabricError::IoError(-1))?;
299                            return Ok(Some(val));
300                        }
301                    }
302                }
303                _ => {} // tombstone: keep probing
304            }
305            idx = (idx + 1) % self.bucket_count as usize;
306        }
307
308        Ok(None)
309    }
310
311    /// Remove a key-value pair. Returns the removed value if the key existed.
312    ///
313    /// The bucket is tombstoned rather than cleared to preserve the linear
314    /// probing chain. Tombstoned buckets are reused by subsequent inserts.
315    ///
316    /// # Errors
317    ///
318    /// Returns [`FabricError::IoError`] if serialization or remote I/O fails.
319    pub fn remove(&mut self, key: &K) -> Result<Option<V>> {
320        let key_bytes = postcard::to_allocvec(key).map_err(|_| FabricError::IoError(-1))?;
321        let hash = Self::hash_bytes(&key_bytes);
322        let mut idx = (hash % self.bucket_count) as usize;
323
324        for _ in 0..self.bucket_count {
325            let bucket = self.read_bucket(idx)?;
326            match bucket.occupied {
327                BUCKET_EMPTY => return Ok(None),
328                BUCKET_OCCUPIED => {
329                    if bucket.hash == hash {
330                        let existing_key: K =
331                            postcard::from_bytes(&bucket.key_data[..bucket.key_len as usize])
332                                .map_err(|_| FabricError::IoError(-1))?;
333                        if &existing_key == key {
334                            let val: V =
335                                postcard::from_bytes(&bucket.val_data[..bucket.val_len as usize])
336                                    .map_err(|_| FabricError::IoError(-1))?;
337                            // Tombstone the bucket
338                            let zero_key = vec![0u8; 0];
339                            let zero_val = vec![0u8; 0];
340                            self.write_bucket(idx, BUCKET_TOMBSTONE, 0, &zero_key, &zero_val)?;
341                            self.count -= 1;
342                            self.write_header()?;
343                            return Ok(Some(val));
344                        }
345                    }
346                }
347                _ => {} // tombstone: keep probing
348            }
349            idx = (idx + 1) % self.bucket_count as usize;
350        }
351
352        Ok(None)
353    }
354
355    /// Returns `true` if the map contains the given key.
356    ///
357    /// Equivalent to `self.get(key)?.is_some()`.
358    pub fn contains_key(&self, key: &K) -> Result<bool> {
359        Ok(self.get(key)?.is_some())
360    }
361
362    /// Returns the number of entries in the map.
363    pub fn len(&self) -> usize {
364        self.count as usize
365    }
366
367    /// Returns `true` if the map is empty.
368    pub fn is_empty(&self) -> bool {
369        self.count == 0
370    }
371
372    /// Returns the lease ID of the memory lease for external renewal
373    /// management (e.g. via [`grafos_leasekit::RenewalManager`]).
374    pub fn lease_id(&self) -> u128 {
375        self.lease.lease_id()
376    }
377
378    /// Returns the expiry time (unix seconds) of the memory lease for
379    /// external renewal management.
380    pub fn expires_at_unix_secs(&self) -> u64 {
381        self.lease.expires_at_unix_secs()
382    }
383
384    /// Returns an iterator over all key-value pairs.
385    ///
386    /// The iterator scans all buckets, skipping empty and tombstoned slots.
387    /// Iteration order is determined by bucket layout, not insertion order.
388    pub fn iter(&self) -> FabricHashMapIter<'_, K, V> {
389        FabricHashMapIter {
390            map: self,
391            bucket_idx: 0,
392        }
393    }
394
395    fn compute_bucket_size(key_stride: u64, val_stride: u64) -> u64 {
396        // occupied(1) + hash(8) + key_len(4) + key_data(key_stride) + val_len(4) + val_data(val_stride)
397        1 + 8 + 4 + key_stride + 4 + val_stride
398    }
399
400    fn bucket_offset(&self, idx: usize) -> u64 {
401        HEADER_SIZE + (idx as u64) * self.bucket_size
402    }
403
404    fn read_bucket(&self, idx: usize) -> Result<BucketData> {
405        let offset = self.bucket_offset(idx);
406        let raw = self.lease.mem().read(offset, self.bucket_size as u32)?;
407        let occupied = raw[0];
408        let hash = u64::from_le_bytes([
409            raw[1], raw[2], raw[3], raw[4], raw[5], raw[6], raw[7], raw[8],
410        ]);
411        let key_len = u32::from_le_bytes([raw[9], raw[10], raw[11], raw[12]]);
412        let key_start = 13usize;
413        let key_end = key_start + self.key_stride as usize;
414        let key_data = raw[key_start..key_end].to_vec();
415        let val_len_start = key_end;
416        let val_len = u32::from_le_bytes([
417            raw[val_len_start],
418            raw[val_len_start + 1],
419            raw[val_len_start + 2],
420            raw[val_len_start + 3],
421        ]);
422        let val_start = val_len_start + 4;
423        let val_end = val_start + self.val_stride as usize;
424        let val_data = raw[val_start..val_end].to_vec();
425
426        Ok(BucketData {
427            occupied,
428            hash,
429            key_len,
430            key_data,
431            val_len,
432            val_data,
433        })
434    }
435
436    fn write_bucket(
437        &self,
438        idx: usize,
439        occupied: u8,
440        hash: u64,
441        key_bytes: &[u8],
442        val_bytes: &[u8],
443    ) -> Result<()> {
444        let mut buf = vec![0u8; self.bucket_size as usize];
445        buf[0] = occupied;
446        buf[1..9].copy_from_slice(&hash.to_le_bytes());
447        let key_len = key_bytes.len() as u32;
448        buf[9..13].copy_from_slice(&key_len.to_le_bytes());
449        buf[13..13 + key_bytes.len()].copy_from_slice(key_bytes);
450        let val_len_start = 13 + self.key_stride as usize;
451        let val_len = val_bytes.len() as u32;
452        buf[val_len_start..val_len_start + 4].copy_from_slice(&val_len.to_le_bytes());
453        buf[val_len_start + 4..val_len_start + 4 + val_bytes.len()].copy_from_slice(val_bytes);
454        let offset = self.bucket_offset(idx);
455        self.lease.mem().write(offset, &buf)
456    }
457
458    fn write_header(&self) -> Result<()> {
459        let mut hdr = [0u8; HEADER_SIZE as usize];
460        hdr[0..8].copy_from_slice(&self.count.to_le_bytes());
461        hdr[8..16].copy_from_slice(&self.bucket_count.to_le_bytes());
462        hdr[16..24].copy_from_slice(&self.key_stride.to_le_bytes());
463        hdr[24..32].copy_from_slice(&self.val_stride.to_le_bytes());
464        self.lease.mem().write(0, &hdr)
465    }
466
467    /// FNV-1a hash of a byte slice.
468    fn hash_bytes(data: &[u8]) -> u64 {
469        let mut hash: u64 = 0xcbf29ce484222325;
470        for &b in data {
471            hash ^= b as u64;
472            hash = hash.wrapping_mul(0x100000001b3);
473        }
474        hash
475    }
476}
477
478struct BucketData {
479    occupied: u8,
480    hash: u64,
481    key_len: u32,
482    key_data: Vec<u8>,
483    val_len: u32,
484    val_data: Vec<u8>,
485}
486
487/// Iterator over key-value pairs in a [`FabricHashMap`].
488///
489/// Scans all buckets sequentially, reading each from remote memory and
490/// yielding only occupied entries. Empty and tombstoned buckets are
491/// skipped. Each `next()` call may read multiple buckets before finding
492/// the next occupied entry.
493pub struct FabricHashMapIter<'a, K, V> {
494    map: &'a FabricHashMap<K, V>,
495    bucket_idx: u64,
496}
497
498impl<'a, K: Serialize + DeserializeOwned + PartialEq, V: Serialize + DeserializeOwned> Iterator
499    for FabricHashMapIter<'a, K, V>
500{
501    type Item = Result<(K, V)>;
502
503    fn next(&mut self) -> Option<Self::Item> {
504        while self.bucket_idx < self.map.bucket_count {
505            let idx = self.bucket_idx as usize;
506            self.bucket_idx += 1;
507            match self.map.read_bucket(idx) {
508                Ok(bucket) if bucket.occupied == BUCKET_OCCUPIED => {
509                    let key: K =
510                        match postcard::from_bytes(&bucket.key_data[..bucket.key_len as usize]) {
511                            Ok(k) => k,
512                            Err(_) => return Some(Err(FabricError::IoError(-1))),
513                        };
514                    let val: V =
515                        match postcard::from_bytes(&bucket.val_data[..bucket.val_len as usize]) {
516                            Ok(v) => v,
517                            Err(_) => return Some(Err(FabricError::IoError(-1))),
518                        };
519                    return Some(Ok((key, val)));
520                }
521                Err(e) => return Some(Err(e)),
522                _ => continue,
523            }
524        }
525        None
526    }
527}
528
529#[cfg(test)]
530mod tests {
531    use super::*;
532    use alloc::string::String;
533    use grafos_std::host;
534
535    fn setup(arena_size: u64) -> MemLease {
536        host::reset_mock();
537        host::mock_set_fbmu_arena_size(arena_size);
538        grafos_std::mem::MemBuilder::new()
539            .acquire()
540            .expect("acquire")
541    }
542
543    #[test]
544    fn insert_get_roundtrip() {
545        let lease = setup(65536);
546        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
547
548        assert!(map.is_empty());
549        assert_eq!(map.insert(&1, &100).expect("insert"), None);
550        assert_eq!(map.insert(&2, &200).expect("insert"), None);
551        assert_eq!(map.insert(&3, &300).expect("insert"), None);
552
553        assert_eq!(map.len(), 3);
554        assert_eq!(map.get(&1).expect("get"), Some(100));
555        assert_eq!(map.get(&2).expect("get"), Some(200));
556        assert_eq!(map.get(&3).expect("get"), Some(300));
557        assert_eq!(map.get(&4).expect("get"), None);
558    }
559
560    #[test]
561    fn insert_overwrites_existing() {
562        let lease = setup(65536);
563        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
564
565        assert_eq!(map.insert(&1, &100).expect("insert"), None);
566        assert_eq!(map.insert(&1, &999).expect("insert"), Some(100));
567        assert_eq!(map.len(), 1);
568        assert_eq!(map.get(&1).expect("get"), Some(999));
569    }
570
571    #[test]
572    fn remove_returns_value() {
573        let lease = setup(65536);
574        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
575
576        map.insert(&1, &100).expect("insert");
577        map.insert(&2, &200).expect("insert");
578
579        assert_eq!(map.remove(&1).expect("remove"), Some(100));
580        assert_eq!(map.len(), 1);
581        assert_eq!(map.get(&1).expect("get"), None);
582        assert_eq!(map.get(&2).expect("get"), Some(200));
583    }
584
585    #[test]
586    fn remove_nonexistent_returns_none() {
587        let lease = setup(65536);
588        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
589        assert_eq!(map.remove(&999).expect("remove"), None);
590    }
591
592    #[test]
593    fn contains_key() {
594        let lease = setup(65536);
595        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
596        map.insert(&42, &1).expect("insert");
597        assert!(map.contains_key(&42).expect("contains"));
598        assert!(!map.contains_key(&99).expect("contains"));
599    }
600
601    #[test]
602    fn string_keys() {
603        let lease = setup(65536);
604        let mut map: FabricHashMap<String, u64> = FabricHashMap::new(lease, 32, 16).expect("new");
605
606        map.insert(&String::from("alpha"), &1).expect("insert");
607        map.insert(&String::from("beta"), &2).expect("insert");
608        map.insert(&String::from("gamma"), &3).expect("insert");
609
610        assert_eq!(map.get(&String::from("beta")).expect("get"), Some(2));
611    }
612
613    #[test]
614    fn collision_handling() {
615        // Use a small number of buckets to force collisions
616        // bucket_size = 1 + 8 + 4 + 8 + 4 + 8 = 33
617        // 8 buckets = header(32) + 8 * 33 = 296 bytes
618        let lease = setup(296);
619        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
620        assert_eq!(map.bucket_count, 8);
621
622        // Insert up to 75% load factor (6 out of 8)
623        for i in 0..6u32 {
624            map.insert(&i, &(i * 10)).expect("insert");
625        }
626        assert_eq!(map.len(), 6);
627
628        // Verify all are retrievable
629        for i in 0..6u32 {
630            assert_eq!(map.get(&i).expect("get"), Some(i * 10));
631        }
632    }
633
634    #[test]
635    fn iter_all_entries() {
636        let lease = setup(65536);
637        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
638
639        map.insert(&1, &10).expect("insert");
640        map.insert(&2, &20).expect("insert");
641        map.insert(&3, &30).expect("insert");
642
643        let mut entries: Vec<(u32, u32)> = map.iter().map(|r| r.expect("iter")).collect();
644        entries.sort_by_key(|&(k, _)| k);
645        assert_eq!(entries, alloc::vec![(1, 10), (2, 20), (3, 30)]);
646    }
647
648    #[test]
649    fn insert_after_remove_reuses_tombstone() {
650        let lease = setup(65536);
651        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
652
653        map.insert(&1, &100).expect("insert");
654        map.remove(&1).expect("remove");
655        assert_eq!(map.len(), 0);
656
657        map.insert(&1, &200).expect("re-insert");
658        assert_eq!(map.get(&1).expect("get"), Some(200));
659        assert_eq!(map.len(), 1);
660    }
661
662    #[test]
663    fn with_capacity_works() {
664        host::reset_mock();
665        host::mock_set_fbmu_arena_size(65536);
666        let map: FabricHashMap<u32, u32> =
667            FabricHashMap::with_capacity(64, 8, 8).expect("with_capacity");
668        assert!(map.bucket_count >= 64);
669    }
670
671    #[test]
672    fn multi_bucket_distribution() {
673        // Insert N distinct keys into a map with 64 buckets. Verify they all
674        // come back correctly via get() and iter(), which exercises the hash
675        // distribution and linear probing across multiple bucket positions.
676        host::reset_mock();
677        host::mock_set_fbmu_arena_size(65536);
678        let mut map: FabricHashMap<u32, u32> =
679            FabricHashMap::with_capacity(64, 8, 8).expect("with_capacity");
680
681        let n = 40u32; // 40 / 64 < 75% load factor
682        for i in 0..n {
683            assert_eq!(map.insert(&i, &(i * 100)).expect("insert"), None);
684        }
685        assert_eq!(map.len(), n as usize);
686
687        // All keys retrievable
688        for i in 0..n {
689            assert_eq!(map.get(&i).expect("get"), Some(i * 100));
690        }
691
692        // Scan raw buckets and count how many distinct positions are occupied.
693        // With 40 keys in 64 buckets, we expect occupation in many different
694        // bucket indices — not all clumped in one region.
695        let mut occupied_indices: Vec<usize> = Vec::new();
696        for idx in 0..map.bucket_count as usize {
697            let bucket = map.read_bucket(idx).expect("read_bucket");
698            if bucket.occupied == BUCKET_OCCUPIED {
699                occupied_indices.push(idx);
700            }
701        }
702        assert_eq!(occupied_indices.len(), n as usize);
703
704        // Verify spread: occupied buckets should span more than half the
705        // bucket range (not all clumped in a small window).
706        let min_idx = *occupied_indices.first().unwrap();
707        let max_idx = *occupied_indices.last().unwrap();
708        let span = max_idx - min_idx + 1;
709        assert!(
710            span > map.bucket_count as usize / 2,
711            "expected occupied buckets to span > half the range, got span={} out of {}",
712            span,
713            map.bucket_count
714        );
715    }
716
717    #[test]
718    fn load_factor_limit_enforced() {
719        // With 8 buckets, the 75% load factor allows 6 entries.
720        // The 7th insert should fail because (6+1)*4 = 28 > 8*3 = 24.
721        let bucket_size = FabricHashMap::<u32, u32>::compute_bucket_size(8, 8);
722        let arena = HEADER_SIZE + 8 * bucket_size;
723        let lease = setup(arena);
724        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
725        assert_eq!(map.bucket_count, 8);
726
727        for i in 0..6u32 {
728            map.insert(&i, &(i * 10)).expect("insert");
729        }
730        assert_eq!(map.len(), 6);
731        assert_eq!(
732            map.insert(&6, &60).unwrap_err(),
733            FabricError::CapacityExceeded
734        );
735    }
736
737    #[test]
738    fn remove_then_get_across_probe_chain() {
739        // Verify that tombstoning preserves the linear probing chain.
740        // If keys A, B, C hash to the same bucket (or adjacent), removing B
741        // must not break lookup of C.
742        host::reset_mock();
743        host::mock_set_fbmu_arena_size(65536);
744        let mut map: FabricHashMap<u32, u32> =
745            FabricHashMap::with_capacity(64, 8, 8).expect("with_capacity");
746
747        // Insert several keys to build up probe chains
748        for i in 0..30u32 {
749            map.insert(&i, &(i + 1000)).expect("insert");
750        }
751
752        // Remove every other key
753        for i in (0..30u32).step_by(2) {
754            assert_eq!(map.remove(&i).expect("remove"), Some(i + 1000));
755        }
756        assert_eq!(map.len(), 15);
757
758        // Remaining keys (odd) must still be findable despite tombstones
759        for i in (1..30u32).step_by(2) {
760            assert_eq!(
761                map.get(&i).expect("get after removals"),
762                Some(i + 1000),
763                "key {} missing after removing even keys",
764                i
765            );
766        }
767
768        // Removed keys must return None
769        for i in (0..30u32).step_by(2) {
770            assert_eq!(map.get(&i).expect("get removed"), None);
771        }
772    }
773
774    #[test]
775    fn iter_skips_tombstones() {
776        let lease = setup(65536);
777        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
778
779        map.insert(&1, &10).expect("insert");
780        map.insert(&2, &20).expect("insert");
781        map.insert(&3, &30).expect("insert");
782        map.remove(&2).expect("remove");
783
784        let mut entries: Vec<(u32, u32)> = map.iter().map(|r| r.expect("iter")).collect();
785        entries.sort_by_key(|&(k, _)| k);
786        assert_eq!(entries, alloc::vec![(1, 10), (3, 30)]);
787    }
788
789    #[test]
790    fn map_lease_accessors() {
791        let lease = setup(65536);
792        let map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
793        assert_ne!(map.lease_id(), 0);
794        assert!(map.expires_at_unix_secs() > 0);
795    }
796
797    #[test]
798    fn insert_reuses_tombstoned_bucket() {
799        // Insert key, remove it (creating tombstone), insert same key again.
800        // The map should reuse the tombstone slot.
801        let lease = setup(65536);
802        let mut map: FabricHashMap<u32, u32> = FabricHashMap::new(lease, 8, 8).expect("new");
803
804        map.insert(&42, &100).expect("insert");
805        map.remove(&42).expect("remove");
806        assert_eq!(map.len(), 0);
807
808        map.insert(&42, &200).expect("re-insert");
809        assert_eq!(map.len(), 1);
810        assert_eq!(map.get(&42).expect("get"), Some(200));
811
812        // Iterate should yield exactly one entry
813        let entries: Vec<(u32, u32)> = map.iter().map(|r| r.expect("iter")).collect();
814        assert_eq!(entries.len(), 1);
815    }
816}