volcano0dr commented on code in PR #436: URL: https://github.com/apache/incubator-teaclave-sgx-sdk/pull/436#discussion_r1384720346
########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use super::ema::EmaOptions; +use super::page::AllocFlags; +use super::vmmgr::{RangeType, VMMGR}; +use super::{PageInfo, PageType, ProtFlags}; +use sgx_types::error::OsResult; + +// The size of fixed static memory for Static Allocator +const STATIC_MEM_SIZE: usize = 65536; + +// The size of initial reserve memory for Reserve Allocator +const INIT_MEM_SIZE: usize = 65536; + +// The size of guard pages +const GUARD_SIZE: usize = 0x8000; + +// The max allocated size of Reserve Allocator +const MAX_EMALLOC_SIZE: usize = 0x10000000; + +const ALLOC_MASK: usize = 1; +const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1); + +/// Static memory for allocation +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +/// Lowest level: Allocator for static memory +/// +/// TODO: reimplement static allocator with monotone increasing policies +static STATIC: Once<LockedHeap<32>> = Once::new(); + +/// Second level: Allocator for reserve memory +static RSRV_ALLOCATOR: Once<Mutex<Reserve>> = Once::new(); + +/// Init lowest level static memory allocator +pub fn init_static_alloc() { + STATIC.call_once(|| { + let static_alloc = LockedHeap::empty(); + unsafe { + static_alloc + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE) + }; + static_alloc + }); +} + +/// Init reserve memory allocator +/// init_reserve_alloc() need to be called after init_static_alloc() +pub fn init_reserve_alloc() { + RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); +} + +/// AllocType layout memory from reserve memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct RsrvAlloc; + +unsafe impl Allocator for RsrvAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + let size = layout.size(); + RSRV_ALLOCATOR + .get() + .unwrap() + .lock() + .emalloc(size) + .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size)) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) { + RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get()) + } +} + +/// AllocType layout memory from static memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + STATIC + .get() + .unwrap() + .lock() + .alloc(layout) + .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size())) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + STATIC.get().unwrap().lock().dealloc(ptr, layout); + } +} + +// Enum for allocator types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocType { + Static(StaticAlloc), + Reserve(RsrvAlloc), +} + +impl AllocType { + pub fn new_static() -> Self { + Self::Static(StaticAlloc) + } + + pub fn new_rsrv() -> Self { + Self::Reserve(RsrvAlloc) + } +} +// Chunk manages memory range. +// The Chunk structure is filled into the layout before the base pointer. +#[derive(Debug)] +struct Chunk { + base: usize, + size: usize, + used: usize, + link: Link, // singly intrusive linkedlist +} + +impl Chunk { + fn new(base: usize, size: usize) -> Self { + Self { + base, + size, + used: 0, + link: Link::new(), + } + } +} + +intrusive_adapter!(ChunkAda = UnsafeRef<Chunk>: Chunk { link: Link }); + +const NUM_EXACT_LIST: usize = 0x100; +const HEADER_SIZE: usize = size_of::<usize>(); +const EXACT_MATCH_INCREMENT: usize = 0x8; +const MIN_BLOCK_SIZE: usize = 0x10; +const MAX_EXACT_SIZE: usize = MIN_BLOCK_SIZE + EXACT_MATCH_INCREMENT * (NUM_EXACT_LIST - 1); + +// Free block for allocating memory with exact size +#[repr(C)] +#[derive(Debug)] +struct BlockFree { Review Comment: ``` struct Payload { ptr: Option<NonNull<u8>>, } union BlockPtr { link: Link, payload: Payload, } struct Block { size: usize, ptr: BlockPtr } ``` ########## sgx_trts/src/emm/init.rs: ########## @@ -0,0 +1,256 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use super::alloc::{init_reserve_alloc, init_static_alloc}; +use super::vmmgr::init_vmmgr; + +pub fn init_emm() { + init_vmmgr(); + init_static_alloc(); + init_reserve_alloc(); +} + +cfg_if! { + if #[cfg(not(any(feature = "sim", feature = "hyper")))] { + pub use hw::*; + } else { + pub use sw::*; + } +} + +#[cfg(not(any(feature = "sim", feature = "hyper")))] +mod hw { + use crate::arch::{self, Layout, LayoutEntry}; + use crate::elf::program::Type; + use crate::emm::ema::EmaOptions; + use crate::emm::layout::LayoutTable; + use crate::emm::page::AllocFlags; + use crate::emm::vmmgr::{mm_init_static_region, EMA_PROT_MASK}; + use crate::emm::{ + mm_alloc_rts, mm_commit, mm_dealloc, mm_modify_perms, PageInfo, PageType, ProtFlags, + }; + use crate::enclave::parse; + use crate::enclave::MmLayout; + use sgx_types::error::{SgxResult, SgxStatus}; + + pub fn init_rts_emas() -> SgxResult { + init_segment_emas()?; + + let layout = arch::Global::get().layout_table(); + init_rts_contexts_emas(layout, 0)?; + Ok(()) + } + + fn init_rts_contexts_emas(table: &[Layout], offset: usize) -> SgxResult { + unsafe { + for (i, layout) in table.iter().enumerate() { + if is_group_id!(layout.group.id) { + let mut step = 0_usize; + for _ in 0..layout.group.load_times { + step += layout.group.load_step as usize; + init_rts_contexts_emas( + &table[i - layout.group.entry_count as usize..i], + step, + )?; + } + } else { + build_rts_context_emas(&layout.entry, offset)?; + } + } + Ok(()) + } + } + + fn build_rts_context_emas(entry: &LayoutEntry, offset: usize) -> SgxResult { + if entry.id == arch::LAYOUT_ID_USER_REGION { + return Ok(()); + } + + let rva = offset + (entry.rva as usize); + assert!(is_page_aligned!(rva)); + + // TODO: not sure get_enclave_base() equal to elrange_base or image_base + let addr = MmLayout::image_base() + rva; + let size = (entry.page_count << arch::SE_PAGE_SHIFT) as usize; + + // entry is guard page or has EREMOVE, build a reserved ema + if (entry.si_flags == 0) || (entry.attributes & arch::PAGE_ATTR_EREMOVE != 0) { + let mut options = + EmaOptions::new(Some(addr), size, AllocFlags::RESERVED | AllocFlags::SYSTEM); + options.info(PageInfo { + typ: PageType::None, + prot: ProtFlags::NONE, + }); + mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?; + return Ok(()); + } + + let post_remove = (entry.attributes & arch::PAGE_ATTR_POST_REMOVE) != 0; + let post_add = (entry.attributes & arch::PAGE_ATTR_POST_ADD) != 0; + let static_min = ((entry.attributes & arch::PAGE_ATTR_EADD) != 0) && !post_remove; + + if post_remove { + // TODO: maybe AllocFlags need more flags or PageType is not None + let mut options = EmaOptions::new(Some(addr), size, AllocFlags::SYSTEM); + options.info(PageInfo { + typ: PageType::None, + prot: ProtFlags::RW, + }); + mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?; + + mm_dealloc(addr, size).map_err(|_| SgxStatus::Unexpected)?; + } + + if post_add { + let commit_direction = if entry.id == arch::LAYOUT_ID_STACK_MAX + || entry.id == arch::LAYOUT_ID_STACK_DYN_MAX + || entry.id == arch::LAYOUT_ID_STACK_DYN_MIN + { + AllocFlags::GROWSDOWN + } else { + AllocFlags::GROWSUP + }; + + let options = EmaOptions::new( + Some(addr), + size, + AllocFlags::COMMIT_ON_DEMAND + | commit_direction + | AllocFlags::SYSTEM + | AllocFlags::FIXED, + ); + + mm_alloc_rts(&options).map_err(|_| SgxStatus::Unexpected)?; + } else if static_min { + let info = if entry.id == arch::LAYOUT_ID_TCS { + PageInfo { + typ: PageType::Tcs, + prot: ProtFlags::NONE, + } + } else { + PageInfo { + typ: PageType::Reg, + prot: ProtFlags::from_bits_truncate( + (entry.si_flags as usize & EMA_PROT_MASK) as u8, + ), + } + }; + let mut options = EmaOptions::new(Some(addr), size, AllocFlags::SYSTEM); + + options.info(info); + mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?; + } + + Ok(()) + } + + pub fn expand_stack_epc_pages(addr: usize, count: usize) -> SgxResult { + ensure!(addr != 0 && count != 0, SgxStatus::InvalidParameter); + + LayoutTable::new() + .check_dyn_range(addr, count, None) + .ok_or(SgxStatus::InvalidParameter)?; + + mm_commit(addr, count << arch::SE_PAGE_SHIFT).map_err(|_| SgxStatus::Unexpected)?; + + Ok(()) + } + + pub fn change_perm() -> SgxResult { + let elf = parse::new_elf()?; + let text_relo = parse::has_text_relo()?; + + let base = MmLayout::image_base(); + for phdr in elf.program_iter() { + let typ = phdr.get_type().unwrap_or(Type::Null); + if typ == Type::Load && text_relo && !phdr.flags().is_write() { + let mut perm = 0_u64; + let start = base + trim_to_page!(phdr.virtual_addr() as usize); + let end = + base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize); + let size = end - start; + + if phdr.flags().is_read() { + perm |= arch::SGX_EMA_PROT_READ; + } + if phdr.flags().is_execute() { + perm |= arch::SGX_EMA_PROT_EXEC; + } + + let prot = ProtFlags::from_bits_truncate(perm as u8); + mm_modify_perms(start, size, prot).map_err(|_| SgxStatus::Unexpected)?; + } + if typ == Type::GnuRelro { + let start = base + trim_to_page!(phdr.virtual_addr() as usize); + let end = + base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize); + let size = end - start; + + if size > 0 { + mm_modify_perms(start, size, ProtFlags::R) + .map_err(|_| SgxStatus::Unexpected)?; + } + } + } + + let layout_table = arch::Global::get().layout_table(); + if let Some(layout) = layout_table.iter().find(|layout| unsafe { + (layout.entry.id == arch::LAYOUT_ID_RSRV_MIN) + && (layout.entry.si_flags == arch::SI_FLAGS_RWX) + && (layout.entry.page_count > 0) + }) { + let start = base + unsafe { layout.entry.rva as usize }; + let size = unsafe { layout.entry.page_count as usize } << arch::SE_PAGE_SHIFT; + + mm_modify_perms(start, size, ProtFlags::R).map_err(|_| SgxStatus::Unexpected)?; + } + Ok(()) + } + + pub fn init_segment_emas() -> SgxResult { + let elf = parse::new_elf()?; + let text_relo = parse::has_text_relo()?; + + let base = MmLayout::image_base(); + for phdr in elf.program_iter() { Review Comment: ``` for phdr in elf.program_iter().filter(|phdr| phdr.get_type().unwrap_or(Type::Null) == Type::Load) ``` ########## sgx_trts/src/emm/init.rs: ########## @@ -0,0 +1,256 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use super::alloc::{init_reserve_alloc, init_static_alloc}; +use super::vmmgr::init_vmmgr; + +pub fn init_emm() { + init_vmmgr(); + init_static_alloc(); + init_reserve_alloc(); +} + +cfg_if! { + if #[cfg(not(any(feature = "sim", feature = "hyper")))] { + pub use hw::*; + } else { + pub use sw::*; Review Comment: sw mod is not defined ########## sgx_trts/src/emm/page.rs: ########## @@ -18,40 +18,64 @@ use crate::arch::{SecInfo, SE_PAGE_SHIFT, SE_PAGE_SIZE}; use crate::enclave::is_within_enclave; use crate::inst::EncluInst; +use bitflags::bitflags; use core::num::NonZeroUsize; -use sgx_types::error::{SgxResult, SgxStatus}; +use sgx_tlibc_sys::{EFAULT, EINVAL}; +use sgx_types::error::OsResult; use sgx_types::marker::ContiguousMemory; +bitflags! { Review Comment: Please use `impl_bitflags!` instead of `bitflags!`. ########## sgx_trts/src/enclave/mem.rs: ########## @@ -406,6 +406,40 @@ impl UserRegionMem { } } +pub fn is_within_rts_range(start: usize, len: usize) -> bool { Review Comment: The function is a pub attribute. It only checks that the address range is not in the user range. You should first check that the address range must be within the Enclave. ########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use super::ema::EmaOptions; +use super::page::AllocFlags; +use super::vmmgr::{RangeType, VMMGR}; +use super::{PageInfo, PageType, ProtFlags}; +use sgx_types::error::OsResult; + +// The size of fixed static memory for Static Allocator +const STATIC_MEM_SIZE: usize = 65536; + +// The size of initial reserve memory for Reserve Allocator +const INIT_MEM_SIZE: usize = 65536; + +// The size of guard pages +const GUARD_SIZE: usize = 0x8000; + +// The max allocated size of Reserve Allocator +const MAX_EMALLOC_SIZE: usize = 0x10000000; + +const ALLOC_MASK: usize = 1; +const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1); + +/// Static memory for allocation +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +/// Lowest level: Allocator for static memory +/// +/// TODO: reimplement static allocator with monotone increasing policies +static STATIC: Once<LockedHeap<32>> = Once::new(); + +/// Second level: Allocator for reserve memory +static RSRV_ALLOCATOR: Once<Mutex<Reserve>> = Once::new(); + +/// Init lowest level static memory allocator +pub fn init_static_alloc() { + STATIC.call_once(|| { + let static_alloc = LockedHeap::empty(); + unsafe { + static_alloc + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE) + }; + static_alloc + }); +} + +/// Init reserve memory allocator +/// init_reserve_alloc() need to be called after init_static_alloc() +pub fn init_reserve_alloc() { + RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); +} + +/// AllocType layout memory from reserve memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct RsrvAlloc; + +unsafe impl Allocator for RsrvAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + let size = layout.size(); + RSRV_ALLOCATOR + .get() + .unwrap() + .lock() + .emalloc(size) + .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size)) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) { + RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get()) + } +} + +/// AllocType layout memory from static memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + STATIC + .get() + .unwrap() + .lock() + .alloc(layout) + .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size())) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + STATIC.get().unwrap().lock().dealloc(ptr, layout); + } +} + +// Enum for allocator types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocType { + Static(StaticAlloc), + Reserve(RsrvAlloc), +} + +impl AllocType { + pub fn new_static() -> Self { + Self::Static(StaticAlloc) + } + + pub fn new_rsrv() -> Self { + Self::Reserve(RsrvAlloc) + } +} +// Chunk manages memory range. +// The Chunk structure is filled into the layout before the base pointer. +#[derive(Debug)] +struct Chunk { + base: usize, + size: usize, + used: usize, + link: Link, // singly intrusive linkedlist +} + +impl Chunk { + fn new(base: usize, size: usize) -> Self { + Self { + base, + size, + used: 0, + link: Link::new(), + } + } +} + +intrusive_adapter!(ChunkAda = UnsafeRef<Chunk>: Chunk { link: Link }); + +const NUM_EXACT_LIST: usize = 0x100; +const HEADER_SIZE: usize = size_of::<usize>(); +const EXACT_MATCH_INCREMENT: usize = 0x8; +const MIN_BLOCK_SIZE: usize = 0x10; +const MAX_EXACT_SIZE: usize = MIN_BLOCK_SIZE + EXACT_MATCH_INCREMENT * (NUM_EXACT_LIST - 1); + +// Free block for allocating memory with exact size +#[repr(C)] +#[derive(Debug)] +struct BlockFree { + size: usize, + link: Link, // singly intrusive linkedlist +} + +// Used block for tracking allocated size and base pointer +#[repr(C)] +#[derive(Debug)] +struct BlockUsed { + size: usize, + payload: usize, +} + +impl BlockFree { + fn new(size: usize) -> Self { + Self { + size, + link: Link::new(), + } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } +} + +impl BlockUsed { + fn new(size: usize) -> Self { + Self { size, payload: 0 } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } + + fn is_alloced(&self) -> bool { + self.size & ALLOC_MASK == 0 + } + + fn set_alloced(&mut self) { + self.size |= ALLOC_MASK; + } + + fn clear_alloced(&mut self) { + self.size &= SIZE_MASK; + } +} + +intrusive_adapter!(BlockFreeAda = UnsafeRef<BlockFree>: BlockFree { link: Link }); + +/// Interior allocator for reserve memory management +/// +/// TODO: implement slab allocator mechanism +pub struct Reserve { + exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256], + large_blocks: SinglyLinkedList<BlockFreeAda>, + chunks: SinglyLinkedList<ChunkAda>, + // The size of memory increment + incr_size: usize, + // statistics + allocated: usize, + total: usize, +} + +impl Reserve { + fn new(size: usize) -> Self { + let exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256] = { + let mut exact_blocks: [MaybeUninit<SinglyLinkedList<BlockFreeAda>>; 256] = + MaybeUninit::uninit_array(); + for block in &mut exact_blocks { + block.write(SinglyLinkedList::new(BlockFreeAda::new())); + } + unsafe { transmute(exact_blocks) } Review Comment: `unsafe { MaybeUninit::array_assume_init(exact_blocks)}` ########## sgx_trts/src/enclave/uninit.rs: ########## @@ -79,9 +81,17 @@ pub fn rtuninit(tc: ThreadControl) -> SgxResult { #[cfg(not(any(feature = "sim", feature = "hyper")))] { - if SysFeatures::get().is_edmm() && edmm::tcs::accept_trim_tcs(tcs).is_err() { - state::set_state(State::Crashed); - bail!(SgxStatus::Unexpected); + if SysFeatures::get().is_edmm() { Review Comment: This can be encapsulated into the `trim_tcs` function. ########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use super::ema::EmaOptions; +use super::page::AllocFlags; +use super::vmmgr::{RangeType, VMMGR}; +use super::{PageInfo, PageType, ProtFlags}; +use sgx_types::error::OsResult; + +// The size of fixed static memory for Static Allocator +const STATIC_MEM_SIZE: usize = 65536; + +// The size of initial reserve memory for Reserve Allocator +const INIT_MEM_SIZE: usize = 65536; + +// The size of guard pages +const GUARD_SIZE: usize = 0x8000; + +// The max allocated size of Reserve Allocator +const MAX_EMALLOC_SIZE: usize = 0x10000000; + +const ALLOC_MASK: usize = 1; +const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1); + +/// Static memory for allocation +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +/// Lowest level: Allocator for static memory +/// +/// TODO: reimplement static allocator with monotone increasing policies +static STATIC: Once<LockedHeap<32>> = Once::new(); + +/// Second level: Allocator for reserve memory +static RSRV_ALLOCATOR: Once<Mutex<Reserve>> = Once::new(); + +/// Init lowest level static memory allocator +pub fn init_static_alloc() { + STATIC.call_once(|| { + let static_alloc = LockedHeap::empty(); + unsafe { + static_alloc + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE) + }; + static_alloc + }); +} + +/// Init reserve memory allocator +/// init_reserve_alloc() need to be called after init_static_alloc() +pub fn init_reserve_alloc() { + RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); +} + +/// AllocType layout memory from reserve memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct RsrvAlloc; + +unsafe impl Allocator for RsrvAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + let size = layout.size(); + RSRV_ALLOCATOR + .get() + .unwrap() + .lock() + .emalloc(size) + .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size)) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) { + RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get()) + } +} + +/// AllocType layout memory from static memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + STATIC + .get() + .unwrap() + .lock() + .alloc(layout) + .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size())) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + STATIC.get().unwrap().lock().dealloc(ptr, layout); + } +} + +// Enum for allocator types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocType { + Static(StaticAlloc), + Reserve(RsrvAlloc), +} + +impl AllocType { + pub fn new_static() -> Self { + Self::Static(StaticAlloc) + } + + pub fn new_rsrv() -> Self { + Self::Reserve(RsrvAlloc) + } +} +// Chunk manages memory range. +// The Chunk structure is filled into the layout before the base pointer. +#[derive(Debug)] +struct Chunk { + base: usize, + size: usize, + used: usize, + link: Link, // singly intrusive linkedlist +} + +impl Chunk { + fn new(base: usize, size: usize) -> Self { + Self { + base, + size, + used: 0, + link: Link::new(), + } + } +} + +intrusive_adapter!(ChunkAda = UnsafeRef<Chunk>: Chunk { link: Link }); + +const NUM_EXACT_LIST: usize = 0x100; +const HEADER_SIZE: usize = size_of::<usize>(); +const EXACT_MATCH_INCREMENT: usize = 0x8; +const MIN_BLOCK_SIZE: usize = 0x10; +const MAX_EXACT_SIZE: usize = MIN_BLOCK_SIZE + EXACT_MATCH_INCREMENT * (NUM_EXACT_LIST - 1); + +// Free block for allocating memory with exact size +#[repr(C)] +#[derive(Debug)] +struct BlockFree { + size: usize, + link: Link, // singly intrusive linkedlist +} + +// Used block for tracking allocated size and base pointer +#[repr(C)] +#[derive(Debug)] +struct BlockUsed { + size: usize, + payload: usize, +} + +impl BlockFree { + fn new(size: usize) -> Self { + Self { + size, + link: Link::new(), + } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } +} + +impl BlockUsed { + fn new(size: usize) -> Self { + Self { size, payload: 0 } + } + + fn set_size(&mut self, size: usize) { + self.size = size; + } + + fn block_size(&self) -> usize { + self.size & SIZE_MASK + } + + fn is_alloced(&self) -> bool { + self.size & ALLOC_MASK == 0 + } + + fn set_alloced(&mut self) { + self.size |= ALLOC_MASK; + } + + fn clear_alloced(&mut self) { + self.size &= SIZE_MASK; + } +} + +intrusive_adapter!(BlockFreeAda = UnsafeRef<BlockFree>: BlockFree { link: Link }); + +/// Interior allocator for reserve memory management +/// +/// TODO: implement slab allocator mechanism +pub struct Reserve { + exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256], + large_blocks: SinglyLinkedList<BlockFreeAda>, + chunks: SinglyLinkedList<ChunkAda>, + // The size of memory increment + incr_size: usize, + // statistics + allocated: usize, + total: usize, +} + +impl Reserve { + fn new(size: usize) -> Self { + let exact_blocks: [SinglyLinkedList<BlockFreeAda>; 256] = { + let mut exact_blocks: [MaybeUninit<SinglyLinkedList<BlockFreeAda>>; 256] = + MaybeUninit::uninit_array(); + for block in &mut exact_blocks { + block.write(SinglyLinkedList::new(BlockFreeAda::new())); + } + unsafe { transmute(exact_blocks) } + }; + + let mut reserve = Self { + exact_blocks, + large_blocks: SinglyLinkedList::new(BlockFreeAda::new()), + chunks: SinglyLinkedList::new(ChunkAda::new()), + incr_size: 65536, + allocated: 0, + total: 0, + }; + + // We shouldn't handle the allocation error of reserve memory when initializing, + // If it returns error, the sdk should panic and crash. + unsafe { + reserve.add_chunks(size).unwrap(); + } + reserve + } + + // Find the available free block for memory allocation, + // and bsize must be round to eight + fn get_free_block(&mut self, bsize: usize) -> Option<UnsafeRef<BlockFree>> { + if bsize <= MAX_EXACT_SIZE { + // TODO: for exact size block, maybe we can reuse larger block + // rather than allocating block from chunk + return self.get_exact_block(bsize); + } + + // Loop and find the most available large block + let list = &mut self.large_blocks; + let mut cursor = list.front_mut(); + let mut suit_block: Option<*const BlockFree> = None; + let mut suit_block_size = 0; + while !cursor.is_null() { + let curr_block = cursor.get().unwrap(); + if curr_block.size >= bsize + && (suit_block.is_none() || (suit_block_size > curr_block.size)) + { + suit_block = Some(curr_block as *const BlockFree); + suit_block_size = curr_block.block_size(); + } + cursor.move_next(); + } + + suit_block?; + + cursor = list.front_mut(); + + let mut curr_block_ptr = cursor.get().unwrap() as *const BlockFree; + if curr_block_ptr == suit_block.unwrap() { + return list.pop_front(); + } + + let mut cursor_next = cursor.peek_next(); + while !cursor_next.is_null() { Review Comment: Why do we need to traverse the linked list twice? ########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use super::ema::EmaOptions; +use super::page::AllocFlags; +use super::vmmgr::{RangeType, VMMGR}; +use super::{PageInfo, PageType, ProtFlags}; +use sgx_types::error::OsResult; + +// The size of fixed static memory for Static Allocator +const STATIC_MEM_SIZE: usize = 65536; + +// The size of initial reserve memory for Reserve Allocator +const INIT_MEM_SIZE: usize = 65536; + +// The size of guard pages +const GUARD_SIZE: usize = 0x8000; + +// The max allocated size of Reserve Allocator +const MAX_EMALLOC_SIZE: usize = 0x10000000; + +const ALLOC_MASK: usize = 1; +const SIZE_MASK: usize = !(EXACT_MATCH_INCREMENT - 1); + +/// Static memory for allocation +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +/// Lowest level: Allocator for static memory +/// +/// TODO: reimplement static allocator with monotone increasing policies +static STATIC: Once<LockedHeap<32>> = Once::new(); + +/// Second level: Allocator for reserve memory +static RSRV_ALLOCATOR: Once<Mutex<Reserve>> = Once::new(); + +/// Init lowest level static memory allocator +pub fn init_static_alloc() { + STATIC.call_once(|| { + let static_alloc = LockedHeap::empty(); + unsafe { + static_alloc + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE) + }; + static_alloc + }); +} + +/// Init reserve memory allocator +/// init_reserve_alloc() need to be called after init_static_alloc() +pub fn init_reserve_alloc() { + RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); +} + +/// AllocType layout memory from reserve memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct RsrvAlloc; + +unsafe impl Allocator for RsrvAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + let size = layout.size(); + RSRV_ALLOCATOR + .get() + .unwrap() + .lock() + .emalloc(size) + .map(|addr| NonNull::slice_from_raw_parts(NonNull::new(addr as *mut u8).unwrap(), size)) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, _layout: Layout) { + RSRV_ALLOCATOR.get().unwrap().lock().efree(ptr.addr().get()) + } +} + +/// AllocType layout memory from static memory region +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { + STATIC + .get() + .unwrap() + .lock() + .alloc(layout) + .map(|addr| NonNull::slice_from_raw_parts(addr, layout.size())) + .map_err(|_| AllocError) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + STATIC.get().unwrap().lock().dealloc(ptr, layout); + } +} + +// Enum for allocator types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum AllocType { Review Comment: ``` pub enum AllocType { Static, Reserve, } impl AllocType { pub fn alloctor(&self) -> &'static dyn Allocator { match self { AllocType::Static => &StaticAlloc, AllocType::Reserve => &ReserveAlloc, } } } ``` ########## sgx_trts/src/emm/alloc.rs: ########## @@ -0,0 +1,542 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::singly_linked_list::CursorMut; +use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; +use intrusive_collections::UnsafeRef; +use sgx_tlibc_sys::ENOMEM; + +use core::alloc::{AllocError, Allocator, Layout}; +use core::mem::size_of; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; Review Comment: `use crate::sync::{SpinMutex, Once};` We need to re-implement `Once` to support `Once<T>`. ########## sgx_trts/src/emm/ema.rs: ########## @@ -0,0 +1,719 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE}; +use crate::emm::{PageInfo, PageRange, PageType, ProtFlags}; +use crate::enclave::is_within_enclave; +use alloc::boxed::Box; +use intrusive_collections::{intrusive_adapter, LinkedListLink, UnsafeRef}; +use sgx_tlibc_sys::{c_void, EACCES, EFAULT, EINVAL}; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; +use super::alloc::{RsrvAlloc, StaticAlloc}; +use super::bitmap::BitArray; +use super::ocall; +use super::page::AllocFlags; +use super::pfhandler::PfHandler; + +/// Enclave Management Area +/// +/// Question: should we replace BitArray with pointer +/// to split struct into two pieces of 80 bytes and 32 bytes or an entity of 104 bytes? +#[repr(C)] Review Comment: remove #[repr(C)] ########## sgx_trts/src/emm/ema.rs: ########## @@ -0,0 +1,719 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE}; +use crate::emm::{PageInfo, PageRange, PageType, ProtFlags}; +use crate::enclave::is_within_enclave; +use alloc::boxed::Box; +use intrusive_collections::{intrusive_adapter, LinkedListLink, UnsafeRef}; +use sgx_tlibc_sys::{c_void, EACCES, EFAULT, EINVAL}; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; +use super::alloc::{RsrvAlloc, StaticAlloc}; +use super::bitmap::BitArray; +use super::ocall; +use super::page::AllocFlags; +use super::pfhandler::PfHandler; + +/// Enclave Management Area +/// +/// Question: should we replace BitArray with pointer +/// to split struct into two pieces of 80 bytes and 32 bytes or an entity of 104 bytes? +#[repr(C)] +pub(crate) struct Ema { + // page aligned start address + start: usize, + // bytes, round to page bytes + length: usize, + alloc_flags: AllocFlags, + info: PageInfo, + // bitmap for EACCEPT status + // FIXME: replace BitArray with pointer + eaccept_map: Option<BitArray>, + // custom PF handler + handler: Option<PfHandler>, + // private data for PF handler + priv_data: Option<*mut c_void>, + alloc: AllocType, + // intrusive linkedlist + link: LinkedListLink, +} + +// Implement ema adapter for the operations of intrusive linkedlist +intrusive_adapter!(pub(crate) EmaAda = UnsafeRef<Ema>: Ema { link: LinkedListLink }); + +#[derive(Clone, Copy)] +/// Options for allocating Emas. +pub struct EmaOptions { + pub addr: Option<usize>, + pub length: usize, + pub alloc_flags: AllocFlags, + pub alloc: AllocType, + info: PageInfo, + handler: Option<PfHandler>, + priv_data: Option<*mut c_void>, +} + +// TODO: remove send and sync +unsafe impl Send for Ema {} +unsafe impl Sync for Ema {} + +impl Ema { + /// Initialize Emanode with null eaccept map, + /// and start address must be page aligned + pub fn new( Review Comment: ``` impl Clone for Ema { } ``` ########## sgx_trts/src/emm/bitmap.rs: ########## @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use alloc::boxed::Box; +use alloc::vec; +use core::alloc::Allocator; +use core::alloc::Layout; +use core::ptr::NonNull; +use sgx_tlibc_sys::EACCES; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; + +const BYTE_SIZE: usize = 8; +macro_rules! bytes_num { + ($num:expr) => { + ($num + BYTE_SIZE - 1) / BYTE_SIZE + }; +} + +#[repr(C)] +#[derive(Debug)] +pub struct BitArray { Review Comment: ``` pub struct BitArray<'a> { bits: usize, data: Box<[u8], &'a dyn Allocator>, } ########## sgx_trts/src/emm/init.rs: ########## @@ -0,0 +1,256 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use super::alloc::{init_reserve_alloc, init_static_alloc}; +use super::vmmgr::init_vmmgr; + +pub fn init_emm() { + init_vmmgr(); + init_static_alloc(); + init_reserve_alloc(); +} + +cfg_if! { + if #[cfg(not(any(feature = "sim", feature = "hyper")))] { + pub use hw::*; + } else { + pub use sw::*; + } +} + +#[cfg(not(any(feature = "sim", feature = "hyper")))] +mod hw { + use crate::arch::{self, Layout, LayoutEntry}; + use crate::elf::program::Type; + use crate::emm::ema::EmaOptions; + use crate::emm::layout::LayoutTable; + use crate::emm::page::AllocFlags; + use crate::emm::vmmgr::{mm_init_static_region, EMA_PROT_MASK}; + use crate::emm::{ + mm_alloc_rts, mm_commit, mm_dealloc, mm_modify_perms, PageInfo, PageType, ProtFlags, + }; + use crate::enclave::parse; + use crate::enclave::MmLayout; + use sgx_types::error::{SgxResult, SgxStatus}; + + pub fn init_rts_emas() -> SgxResult { + init_segment_emas()?; + + let layout = arch::Global::get().layout_table(); + init_rts_contexts_emas(layout, 0)?; + Ok(()) + } + + fn init_rts_contexts_emas(table: &[Layout], offset: usize) -> SgxResult { + unsafe { + for (i, layout) in table.iter().enumerate() { + if is_group_id!(layout.group.id) { + let mut step = 0_usize; + for _ in 0..layout.group.load_times { + step += layout.group.load_step as usize; + init_rts_contexts_emas( + &table[i - layout.group.entry_count as usize..i], + step, + )?; + } + } else { + build_rts_context_emas(&layout.entry, offset)?; Review Comment: ``` if entry.id != arch::LAYOUT_ID_USER_REGION { build_rts_context_emas(&layout.entry, offset)?; } ``` ########## sgx_trts/src/emm/bitmap.rs: ########## @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use alloc::boxed::Box; +use alloc::vec; +use core::alloc::Allocator; +use core::alloc::Layout; +use core::ptr::NonNull; +use sgx_tlibc_sys::EACCES; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; + +const BYTE_SIZE: usize = 8; +macro_rules! bytes_num { + ($num:expr) => { + ($num + BYTE_SIZE - 1) / BYTE_SIZE + }; +} + +#[repr(C)] Review Comment: remove #[repr(C)] ########## sgx_trts/src/emm/ema.rs: ########## @@ -0,0 +1,719 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE}; +use crate::emm::{PageInfo, PageRange, PageType, ProtFlags}; +use crate::enclave::is_within_enclave; +use alloc::boxed::Box; +use intrusive_collections::{intrusive_adapter, LinkedListLink, UnsafeRef}; +use sgx_tlibc_sys::{c_void, EACCES, EFAULT, EINVAL}; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; +use super::alloc::{RsrvAlloc, StaticAlloc}; +use super::bitmap::BitArray; +use super::ocall; +use super::page::AllocFlags; +use super::pfhandler::PfHandler; + +/// Enclave Management Area +/// +/// Question: should we replace BitArray with pointer +/// to split struct into two pieces of 80 bytes and 32 bytes or an entity of 104 bytes? +#[repr(C)] +pub(crate) struct Ema { + // page aligned start address + start: usize, + // bytes, round to page bytes + length: usize, + alloc_flags: AllocFlags, + info: PageInfo, + // bitmap for EACCEPT status + // FIXME: replace BitArray with pointer + eaccept_map: Option<BitArray>, + // custom PF handler + handler: Option<PfHandler>, + // private data for PF handler + priv_data: Option<*mut c_void>, + alloc: AllocType, + // intrusive linkedlist + link: LinkedListLink, +} + +// Implement ema adapter for the operations of intrusive linkedlist +intrusive_adapter!(pub(crate) EmaAda = UnsafeRef<Ema>: Ema { link: LinkedListLink }); + +#[derive(Clone, Copy)] +/// Options for allocating Emas. +pub struct EmaOptions { + pub addr: Option<usize>, + pub length: usize, + pub alloc_flags: AllocFlags, + pub alloc: AllocType, + info: PageInfo, + handler: Option<PfHandler>, + priv_data: Option<*mut c_void>, +} + +// TODO: remove send and sync +unsafe impl Send for Ema {} +unsafe impl Sync for Ema {} + +impl Ema { + /// Initialize Emanode with null eaccept map, + /// and start address must be page aligned + pub fn new( + start: usize, + length: usize, + alloc_flags: AllocFlags, + info: PageInfo, + handler: Option<PfHandler>, + priv_data: Option<*mut c_void>, + alloc: AllocType, + ) -> Self { + Self { + start, + length, + alloc_flags, + info, + eaccept_map: None, + handler, + priv_data, + link: LinkedListLink::new(), + alloc, + } + } + + pub fn new_options(options: &EmaOptions) -> OsResult<Self> { Review Comment: `new_options` rename to `new` ########## sgx_trts/src/emm/bitmap.rs: ########## @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use alloc::boxed::Box; +use alloc::vec; +use core::alloc::Allocator; +use core::alloc::Layout; +use core::ptr::NonNull; +use sgx_tlibc_sys::EACCES; +use sgx_types::error::OsResult; + +use super::alloc::AllocType; + +const BYTE_SIZE: usize = 8; +macro_rules! bytes_num { + ($num:expr) => { + ($num + BYTE_SIZE - 1) / BYTE_SIZE + }; +} + +#[repr(C)] +#[derive(Debug)] +pub struct BitArray { + bits: usize, + bytes: usize, + data: *mut u8, + alloc: AllocType, +} + +impl BitArray { + /// Init BitArray with all zero bits + pub fn new(bits: usize, alloc: AllocType) -> OsResult<Self> { + let bytes = bytes_num!(bits); + + // FIXME: return error if OOM + let data = match alloc { + AllocType::Reserve(allocator) => { + // Set bits to all zeros + let data = vec::from_elem_in(0_u8, bytes, allocator).into_boxed_slice(); + Box::into_raw(data) as *mut u8 + } + AllocType::Static(allocator) => { + let data = vec::from_elem_in(0_u8, bytes, allocator).into_boxed_slice(); + Box::into_raw(data) as *mut u8 + } + }; + + Ok(Self { + bits, + bytes, + data, + alloc, + }) + } + + /// Get the value of the bit at a given index + pub fn get(&self, index: usize) -> OsResult<bool> { + if index >= self.bits { + return Err(EACCES); + } + + let byte_index = index / BYTE_SIZE; + let bit_index = index % BYTE_SIZE; + let bit_mask = 1 << bit_index; + let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; + Ok((data.get(byte_index).unwrap() & bit_mask) != 0) + } + + /// Check whether all bits are set true + pub fn all_true(&self) -> bool { + for pos in 0..self.bits { + if !self.get(pos).unwrap() { + return false; + } + } + true + } + + /// Set the value of the bit at the specified index + pub fn set(&mut self, index: usize, value: bool) -> OsResult { + if index >= self.bits { + return Err(EACCES); + } + let byte_index = index / BYTE_SIZE; + let bit_index = index % BYTE_SIZE; + let bit_mask = 1 << bit_index; + + let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; + + if value { + data[byte_index] |= bit_mask; + } else { + data[byte_index] &= !bit_mask; + } + Ok(()) + } + + /// Set all the bits to true + pub fn set_full(&mut self) { + let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; + data.fill(0xFF); + } + + /// Clear all the bits + pub fn clear(&mut self) { + let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; + data.fill(0); + } + + /// Split current bit array at specified position, return a new allocated bit array + /// corresponding to the bits at the range of [pos, end). + /// And the current bit array manages the bits at the range of [0, pos). + pub fn split(&mut self, pos: usize) -> OsResult<BitArray> { Review Comment: There is a waste of memory -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: notifications-unsubscr...@teaclave.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: notifications-unsubscr...@teaclave.apache.org For additional commands, e-mail: notifications-h...@teaclave.apache.org