ClawSeven commented on code in PR #436: URL: https://github.com/apache/incubator-teaclave-sgx-sdk/pull/436#discussion_r1410328465
########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use super::ema::EmaOptions; +use super::page::AllocFlags; +use super::vmmgr::{RangeType, VMMGR}; +use super::{PageInfo, PageType, ProtFlags}; +use sgx_types::error::OsResult; + +// The size of fixed static memory for Static Allocator +const STATIC_MEM_SIZE: usize = 65536; + +// The size of initial reserve memory for Reserve Allocator +const INIT_MEM_SIZE: usize = 65536; + +// The size of guard pages +const GUARD_SIZE: usize = 0x8000; + +// The max allocated size of Reserve Allocator +const MAX_EMALLOC_SIZE: usize = 0x10000000; + +const ALLOC_MASK: usize = 1; +const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1); + +/// Static memory for allocation +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +/// Lowest level: Allocator for static memory +/// +/// TODO: reimplement static allocator with monotone increasing policies +static STATIC: Once<LockedHeap<32>> = Once::new(); + +/// Second level: Allocator for reserve memory +static RSRV_ALLOCATOR: Once<Mutex<Reserve>> = Once::new(); + +/// Init lowest level static memory allocator +pub fn init_static_alloc() { + STATIC.call_once(|| { + let static_alloc = LockedHeap::empty(); + unsafe { + static_alloc + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE) + }; + static_alloc + }); +} + +/// Init reserve memory allocator +/// init_reserve_alloc() need to be called after init_static_alloc() +pub fn init_reserve_alloc() { + RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); +} + +/// AllocType layout memory from reserve memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct RsrvAlloc; + +unsafe impl Allocator for RsrvAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + let size = layout.size(); + RSRV_ALLOCATOR + .get() + .unwrap() + .lock() + .emalloc(size) + .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size)) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) { + RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get()) + } +} + +/// AllocType layout memory from static memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + STATIC + .get() + .unwrap() + .lock() + .alloc(layout) + .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size())) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + STATIC.get().unwrap().lock().dealloc(ptr, layout); + } +} + +// Enum for allocator types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocType { + Static(StaticAlloc), + Reserve(RsrvAlloc), +} + +impl AllocType { + pub fn new_static() -> Self { + Self::Static(StaticAlloc) + } + + pub fn new_rsrv() -> Self { + Self::Reserve(RsrvAlloc) + } +} +// Chunk manages memory range. +// The Chunk structure is filled into the layout before the base pointer. +#[derive(Debug)] +struct Chunk { + base: usize, + size: usize, + used: usize, + link: Link, // singly intrusive linkedlist +} + +impl Chunk { + fn new(base: usize, size: usize) -> Self { + Self { + base, + size, + used: 0, + link: Link::new(), + } + } +} + +intrusive_adapter!(ChunkAda = UnsafeRef<Chunk>: Chunk { link: Link }); + +const NUM_EXACT_LIST: usize = 0x100; +const HEADER_SIZE: usize = size_of::<usize>(); +const EXACT_MATCH_INCREMENT: usize = 0x8; +const MIN_BLOCK_SIZE: usize = 0x10; +const MAX_EXACT_SIZE: usize = MIN_BLOCK_SIZE + EXACT_MATCH_INCREMENT * (NUM_EXACT_LIST - 1); + +// Free block for allocating memory with exact size +#[repr(C)] +#[derive(Debug)] +struct BlockFree { + size: usize, + link: Link, // singly intrusive linkedlist +} + +// Used block for tracking allocated size and base pointer +#[repr(C)] +#[derive(Debug)] +struct BlockUsed { + size: usize, + payload: usize, +} + +impl BlockFree { + fn new(size: usize) -> Self { + Self { + size, + link: Link::new(), + } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } +} + +impl BlockUsed { + fn new(size: usize) -> Self { + Self { size, payload: 0 } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } + + fn is_alloced(&self) -> bool { + self.size & ALLOC_MASK == 0 + } + + fn set_alloced(&mut self) { + self.size |= ALLOC_MASK; + } + + fn clear_alloced(&mut self) { + self.size &= SIZE_MASK; + } +} + +intrusive_adapter!(BlockFreeAda = UnsafeRef<BlockFree>: BlockFree { link: Link }); + +/// Interior allocator for reserve memory management +/// +/// TODO: implement slab allocator mechanism +pub struct Reserve { + exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256], + large_blocks: SinglyLinkedList<BlockFreeAda>, + chunks: SinglyLinkedList<ChunkAda>, + // The size of memory increment + incr_size: usize, + // statistics + allocated: usize, + total: usize, +} + +impl Reserve { + fn new(size: usize) -> Self { + let exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256] = { + let mut exact_blocks: [MaybeUninit<SinglyLinkedList<BlockFreeAda>>; 256] = + MaybeUninit::uninit_array(); + for block in &mut exact_blocks { + block.write(SinglyLinkedList::new(BlockFreeAda::new())); + } + unsafe { transmute(exact_blocks) } Review Comment: Done, nice suggestion! -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: notifications-unsubscr...@teaclave.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: notifications-unsubscr...@teaclave.apache.org For additional commands, e-mail: notifications-h...@teaclave.apache.org