miri/alloc/isolated_alloc.rs
1use std::alloc::Layout;
2use std::ptr::NonNull;
3
4use nix::sys::mman;
5use rustc_index::bit_set::DenseBitSet;
6
7/// How many bytes of memory each bit in the bitset represents.
8const COMPRESSION_FACTOR: usize = 4;
9
10/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
11/// pages (not mixed with Miri's own memory). This is used in native-lib mode.
12#[derive(Debug)]
13pub struct IsolatedAlloc {
14 /// Pointers to page-aligned memory that has been claimed by the allocator.
15 /// Every pointer here must point to a page-sized allocation claimed via
16 /// mmap. These pointers are used for "small" allocations.
17 page_ptrs: Vec<NonNull<u8>>,
18 /// Metadata about which bytes have been allocated on each page. The length
19 /// of this vector must be the same as that of `page_ptrs`, and the domain
20 /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
21 ///
22 /// Conceptually, each bit of the bitset represents the allocation status of
23 /// one n-byte chunk on the corresponding element of `page_ptrs`. Thus,
24 /// indexing into it should be done with a value one-nth of the corresponding
25 /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`).
26 page_infos: Vec<DenseBitSet<usize>>,
27 /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
28 /// with their size stored as the second element of the vector.
29 huge_ptrs: Vec<(NonNull<u8>, usize)>,
30 /// The host (not emulated) page size.
31 page_size: usize,
32}
33
34impl IsolatedAlloc {
35 /// Creates an empty allocator.
36 pub fn new() -> Self {
37 Self {
38 page_ptrs: Vec::new(),
39 huge_ptrs: Vec::new(),
40 page_infos: Vec::new(),
41 // SAFETY: `sysconf(_SC_PAGESIZE)` is always safe to call at runtime
42 // See https://www.man7.org/linux/man-pages/man3/sysconf.3.html
43 page_size: unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() },
44 }
45 }
46
47 /// For simplicity, we serve small allocations in multiples of COMPRESSION_FACTOR
48 /// bytes with at least that alignment.
49 #[inline]
50 fn normalized_layout(layout: Layout) -> Layout {
51 let align =
52 if layout.align() < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout.align() };
53 let size = layout.size().next_multiple_of(COMPRESSION_FACTOR);
54 Layout::from_size_align(size, align).unwrap()
55 }
56
57 /// For greater-than-page-sized allocations, returns the allocation size we need to request
58 /// including the slack we need to satisfy the alignment request.
59 #[inline]
60 fn huge_normalized_layout(&self, layout: Layout) -> usize {
61 // Allocate in page-sized chunks
62 let size = layout.size().next_multiple_of(self.page_size);
63 // And make sure the align is at least one page
64 let align = std::cmp::max(layout.align(), self.page_size);
65 // pg_count gives us the # of pages needed to satisfy the size. For
66 // align > page_size where align = n * page_size, a sufficently-aligned
67 // address must exist somewhere in the range of
68 // some_page_aligned_address..some_page_aligned_address + (n-1) * page_size
69 // (since if some_page_aligned_address + n * page_size is sufficently aligned,
70 // then so is some_page_aligned_address itself per the definition of n, so we
71 // can avoid using that 1 extra page).
72 // Thus we allocate n-1 extra pages
73 let pg_count = size.div_ceil(self.page_size);
74 let extra_pages = align.strict_div(self.page_size).saturating_sub(1);
75
76 pg_count.strict_add(extra_pages).strict_mul(self.page_size)
77 }
78
79 /// Determined whether a given normalized (size, align) should be sent to
80 /// `alloc_huge` / `dealloc_huge`.
81 #[inline]
82 fn is_huge_alloc(&self, layout: &Layout) -> bool {
83 layout.align() > self.page_size / 2 || layout.size() >= self.page_size / 2
84 }
85
86 /// Allocates memory as described in `Layout`. This memory should be deallocated
87 /// by calling `dealloc` on this same allocator.
88 ///
89 /// SAFETY: See `alloc::alloc()`.
90 pub unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 {
91 // SAFETY: Upheld by caller
92 unsafe { self.allocate(layout, false) }
93 }
94
95 /// Same as `alloc`, but zeroes out the memory.
96 ///
97 /// SAFETY: See `alloc::alloc_zeroed()`.
98 pub unsafe fn alloc_zeroed(&mut self, layout: Layout) -> *mut u8 {
99 // SAFETY: Upheld by caller
100 unsafe { self.allocate(layout, true) }
101 }
102
103 /// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
104 /// the `zeroed` argument.
105 ///
106 /// SAFETY: See `alloc::alloc()`.
107 unsafe fn allocate(&mut self, layout: Layout, zeroed: bool) -> *mut u8 {
108 let layout = IsolatedAlloc::normalized_layout(layout);
109 if self.is_huge_alloc(&layout) {
110 // SAFETY: Validity of `layout` upheld by caller; we checked that
111 // the size and alignment are appropriate for being a huge alloc
112 unsafe { self.alloc_huge(layout) }
113 } else {
114 for (&mut page, pinfo) in std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) {
115 // SAFETY: The value in `self.page_size` is used to allocate
116 // `page`, with page alignment
117 if let Some(ptr) =
118 unsafe { Self::alloc_small(self.page_size, layout, page, pinfo, zeroed) }
119 {
120 return ptr;
121 }
122 }
123
124 // We get here only if there's no space in our existing pages
125 let page_size = self.page_size;
126 // Add another page and allocate from it; this cannot fail since the
127 // new page is empty and we already asserted it fits into a page
128 let (page, pinfo) = self.add_page();
129
130 // SAFETY: See comment on `alloc_from_page` above
131 unsafe { Self::alloc_small(page_size, layout, page, pinfo, zeroed).unwrap() }
132 }
133 }
134
135 /// Used internally by `allocate` to abstract over some logic.
136 ///
137 /// SAFETY: `page` must be a page-aligned pointer to an allocated page,
138 /// where the allocation is (at least) `page_size` bytes.
139 unsafe fn alloc_small(
140 page_size: usize,
141 layout: Layout,
142 page: NonNull<u8>,
143 pinfo: &mut DenseBitSet<usize>,
144 zeroed: bool,
145 ) -> Option<*mut u8> {
146 // Check every alignment-sized block and see if there exists a `size`
147 // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n)
148 for offset in (0..page_size).step_by(layout.align()) {
149 let offset_pinfo = offset / COMPRESSION_FACTOR;
150 let size_pinfo = layout.size() / COMPRESSION_FACTOR;
151 // DenseBitSet::contains() panics if the index is out of bounds
152 if pinfo.domain_size() < offset_pinfo + size_pinfo {
153 break;
154 }
155 if !pinfo.contains_any(offset_pinfo..offset_pinfo + size_pinfo) {
156 pinfo.insert_range(offset_pinfo..offset_pinfo + size_pinfo);
157 // SAFETY: We checked the available bytes after `idx` in the call
158 // to `domain_size` above and asserted there are at least `idx +
159 // layout.size()` bytes available and unallocated after it.
160 // `page` must point to the start of the page, so adding `idx`
161 // is safe per the above.
162 unsafe {
163 let ptr = page.add(offset);
164 if zeroed {
165 // Only write the bytes we were specifically asked to
166 // zero out, even if we allocated more
167 ptr.write_bytes(0, layout.size());
168 }
169 return Some(ptr.as_ptr());
170 }
171 }
172 }
173 None
174 }
175
176 /// Expands the available memory pool by adding one page.
177 fn add_page(&mut self) -> (NonNull<u8>, &mut DenseBitSet<usize>) {
178 // SAFETY: mmap is always safe to call when requesting anonymous memory
179 let page_ptr = unsafe {
180 libc::mmap(
181 std::ptr::null_mut(),
182 self.page_size,
183 libc::PROT_READ | libc::PROT_WRITE,
184 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
185 -1,
186 0,
187 )
188 .cast::<u8>()
189 };
190 assert_ne!(page_ptr.addr(), usize::MAX, "mmap failed");
191 // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
192 assert!(self.page_size.is_multiple_of(COMPRESSION_FACTOR));
193 self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR));
194 self.page_ptrs.push(NonNull::new(page_ptr).unwrap());
195 (NonNull::new(page_ptr).unwrap(), self.page_infos.last_mut().unwrap())
196 }
197
198 /// Allocates in multiples of one page on the host system.
199 /// Will always be zeroed.
200 ///
201 /// SAFETY: Same as `alloc()`.
202 unsafe fn alloc_huge(&mut self, layout: Layout) -> *mut u8 {
203 let size = self.huge_normalized_layout(layout);
204 // SAFETY: mmap is always safe to call when requesting anonymous memory
205 let ret = unsafe {
206 libc::mmap(
207 std::ptr::null_mut(),
208 size,
209 libc::PROT_READ | libc::PROT_WRITE,
210 libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
211 -1,
212 0,
213 )
214 .cast::<u8>()
215 };
216 assert_ne!(ret.addr(), usize::MAX, "mmap failed");
217 self.huge_ptrs.push((NonNull::new(ret).unwrap(), size));
218 // huge_normalized_layout ensures that we've overallocated enough space
219 // for this to be valid.
220 ret.map_addr(|a| a.next_multiple_of(layout.align()))
221 }
222
223 /// Deallocates a pointer from this allocator.
224 ///
225 /// SAFETY: This pointer must have been allocated by calling `alloc()` (or
226 /// `alloc_zeroed()`) with the same layout as the one passed on this same
227 /// `IsolatedAlloc`.
228 pub unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
229 let layout = IsolatedAlloc::normalized_layout(layout);
230
231 if self.is_huge_alloc(&layout) {
232 // SAFETY: Partly upheld by caller, and we checked that the size
233 // and align, meaning this must have been allocated via `alloc_huge`
234 unsafe {
235 self.dealloc_huge(ptr, layout);
236 }
237 } else {
238 // SAFETY: It's not a huge allocation, therefore it is a small one.
239 let idx = unsafe { self.dealloc_small(ptr, layout) };
240
241 // This may have been the last allocation on this page. If so, free the entire page.
242 // FIXME: this can lead to threshold effects, we should probably add some form
243 // of hysteresis.
244 if self.page_infos[idx].is_empty() {
245 self.page_infos.remove(idx);
246 let page_ptr = self.page_ptrs.remove(idx);
247 // SAFETY: We checked that there are no outstanding allocations
248 // from us pointing to this page, and we know it was allocated
249 // in add_page as exactly a single page.
250 unsafe {
251 assert_eq!(libc::munmap(page_ptr.as_ptr().cast(), self.page_size), 0);
252 }
253 }
254 }
255 }
256
257 /// Returns the index of the page that this was deallocated from.
258 ///
259 /// SAFETY: the pointer must have been allocated with `alloc_small`.
260 unsafe fn dealloc_small(&mut self, ptr: *mut u8, layout: Layout) -> usize {
261 // Offset of the pointer in the current page
262 let offset = ptr.addr() % self.page_size;
263 // And then the page's base address
264 let page_addr = ptr.addr() - offset;
265
266 // Find the page this allocation belongs to.
267 // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
268 let pinfo = std::iter::zip(&mut self.page_ptrs, &mut self.page_infos)
269 .enumerate()
270 .find(|(_, (page, _))| page.addr().get() == page_addr);
271 let Some((idx_of_pinfo, (_, pinfo))) = pinfo else {
272 panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.page_ptrs)
273 };
274 // Mark this range as available in the page.
275 let ptr_idx_pinfo = offset / COMPRESSION_FACTOR;
276 let size_pinfo = layout.size() / COMPRESSION_FACTOR;
277 for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
278 pinfo.remove(idx);
279 }
280 idx_of_pinfo
281 }
282
283 /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
284 /// must ask for a size larger than the host pagesize.
285 unsafe fn dealloc_huge(&mut self, ptr: *mut u8, layout: Layout) {
286 let size = self.huge_normalized_layout(layout);
287 // Find the huge allocation containing `ptr`.
288 let idx = self
289 .huge_ptrs
290 .iter()
291 .position(|&(pg, size)| {
292 pg.addr().get() <= ptr.addr() && ptr.addr() < pg.addr().get().strict_add(size)
293 })
294 .expect("Freeing unallocated pages");
295 // And kick it from the list
296 let (un_offset_ptr, size2) = self.huge_ptrs.remove(idx);
297 assert_eq!(size, size2, "got wrong layout in dealloc");
298 // SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
299 unsafe {
300 let ret = libc::munmap(un_offset_ptr.as_ptr().cast(), size);
301 assert_eq!(ret, 0);
302 }
303 }
304
305 /// Returns a vector of page addresses managed by the allocator.
306 pub fn pages(&self) -> Vec<usize> {
307 let mut pages: Vec<usize> =
308 self.page_ptrs.iter().map(|p| p.expose_provenance().get()).collect();
309 for (ptr, size) in self.huge_ptrs.iter() {
310 for i in 0..size / self.page_size {
311 pages.push(ptr.expose_provenance().get().strict_add(i * self.page_size));
312 }
313 }
314 pages
315 }
316
317 /// Protects all owned memory as `PROT_NONE`, preventing accesses.
318 ///
319 /// SAFETY: Accessing memory after this point will result in a segfault
320 /// unless it is first unprotected.
321 pub unsafe fn prepare_ffi(&mut self) -> Result<(), nix::errno::Errno> {
322 let prot = mman::ProtFlags::PROT_NONE;
323 unsafe { self.mprotect(prot) }
324 }
325
326 /// Deprotects all owned memory by setting it to RW. Erroring here is very
327 /// likely unrecoverable, so it may panic if applying those permissions
328 /// fails.
329 pub fn unprep_ffi(&mut self) {
330 let prot = mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE;
331 unsafe {
332 self.mprotect(prot).unwrap();
333 }
334 }
335
336 /// Applies `prot` to every page managed by the allocator.
337 ///
338 /// SAFETY: Accessing memory in violation of the protection flags will
339 /// trigger a segfault.
340 unsafe fn mprotect(&mut self, prot: mman::ProtFlags) -> Result<(), nix::errno::Errno> {
341 for &pg in &self.page_ptrs {
342 unsafe {
343 mman::mprotect(pg.cast(), self.page_size, prot)?;
344 }
345 }
346 for &(hpg, size) in &self.huge_ptrs {
347 unsafe {
348 mman::mprotect(hpg.cast(), size.next_multiple_of(self.page_size), prot)?;
349 }
350 }
351 Ok(())
352 }
353}
354
355#[cfg(test)]
356mod tests {
357 use super::*;
358
359 /// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())`
360 /// are zeroes.
361 ///
362 /// SAFETY: `ptr` must have been allocated with `layout`.
363 unsafe fn assert_zeroes(ptr: *mut u8, layout: Layout) {
364 // SAFETY: Caller ensures this is valid
365 unsafe {
366 for ofs in 0..layout.size() {
367 assert_eq!(0, ptr.add(ofs).read());
368 }
369 }
370 }
371
372 /// Check that small (sub-pagesize) allocations are properly zeroed out.
373 #[test]
374 fn small_zeroes() {
375 let mut alloc = IsolatedAlloc::new();
376 // 256 should be less than the pagesize on *any* system
377 let layout = Layout::from_size_align(256, 32).unwrap();
378 // SAFETY: layout size is the constant above, not 0
379 let ptr = unsafe { alloc.alloc_zeroed(layout) };
380 // SAFETY: `ptr` was just allocated with `layout`
381 unsafe {
382 assert_zeroes(ptr, layout);
383 alloc.dealloc(ptr, layout);
384 }
385 }
386
387 /// Check that huge (> 1 page) allocations are properly zeroed out also.
388 #[test]
389 fn huge_zeroes() {
390 let mut alloc = IsolatedAlloc::new();
391 // 16k is about as big as pages get e.g. on macos aarch64
392 let layout = Layout::from_size_align(16 * 1024, 128).unwrap();
393 // SAFETY: layout size is the constant above, not 0
394 let ptr = unsafe { alloc.alloc_zeroed(layout) };
395 // SAFETY: `ptr` was just allocated with `layout`
396 unsafe {
397 assert_zeroes(ptr, layout);
398 alloc.dealloc(ptr, layout);
399 }
400 }
401
402 /// Check that repeatedly reallocating the same memory will still zero out
403 /// everything properly
404 #[test]
405 fn repeated_allocs() {
406 let mut alloc = IsolatedAlloc::new();
407 // Try both sub-pagesize allocs and those larger than / equal to a page
408 for sz in (1..=(16 * 1024)).step_by(128) {
409 let layout = Layout::from_size_align(sz, 1).unwrap();
410 // SAFETY: all sizes in the range above are nonzero as we start from 1
411 let ptr = unsafe { alloc.alloc_zeroed(layout) };
412 // SAFETY: `ptr` was just allocated with `layout`, which was used
413 // to bound the access size
414 unsafe {
415 assert_zeroes(ptr, layout);
416 ptr.write_bytes(255, sz);
417 alloc.dealloc(ptr, layout);
418 }
419 }
420 }
421
422 /// Checks that allocations of different sizes do not overlap, then for memory
423 /// leaks that might have occurred.
424 #[test]
425 fn check_leaks_and_overlaps() {
426 let mut alloc = IsolatedAlloc::new();
427
428 // Some random sizes and aligns
429 let mut sizes = vec![32; 10];
430 sizes.append(&mut vec![15; 4]);
431 sizes.append(&mut vec![256; 12]);
432 // Give it some multi-page ones too
433 sizes.append(&mut vec![32 * 1024; 4]);
434 sizes.push(4 * 1024);
435
436 // Matching aligns for the sizes
437 let mut aligns = vec![16; 12];
438 aligns.append(&mut vec![256; 2]);
439 aligns.append(&mut vec![64; 12]);
440 aligns.append(&mut vec![4096; 4]);
441 // And one that requests align > page_size
442 aligns.push(64 * 1024);
443
444 // Make sure we didn't mess up in the test itself!
445 assert_eq!(sizes.len(), aligns.len());
446
447 // Aggregate the sizes and aligns into a vec of layouts, then allocate them
448 let layouts: Vec<_> = std::iter::zip(sizes, aligns)
449 .map(|(sz, al)| Layout::from_size_align(sz, al).unwrap())
450 .collect();
451 // SAFETY: all sizes specified in `sizes` are nonzero
452 let ptrs: Vec<_> =
453 layouts.iter().map(|layout| unsafe { alloc.alloc_zeroed(*layout) }).collect();
454
455 for (&ptr, &layout) in std::iter::zip(&ptrs, &layouts) {
456 // We requested zeroed allocations, so check that that's true
457 // Then write to the end of the current size, so if the allocs
458 // overlap (or the zeroing is wrong) then `assert_zeroes` will panic.
459 // Also check that the alignment we asked for was respected
460 assert_eq!(ptr.addr().strict_rem(layout.align()), 0);
461 // SAFETY: each `ptr` was allocated with its corresponding `layout`,
462 // which is used to bound the access size
463 unsafe {
464 assert_zeroes(ptr, layout);
465 ptr.write_bytes(255, layout.size());
466 alloc.dealloc(ptr, layout);
467 }
468 }
469
470 // And then verify that no memory was leaked after all that
471 assert!(alloc.page_ptrs.is_empty() && alloc.huge_ptrs.is_empty());
472 }
473}