This is an automated email from the ASF dual-hosted git repository.
rduan pushed a commit to branch v2.0.0-preview
in repository https://gitbox.apache.org/repos/asf/incubator-teaclave-sgx-sdk.git
The following commit(s) were added to refs/heads/v2.0.0-preview by this push:
new 5158a6f1 Implementing sync primitives with futex-S
5158a6f1 is described below
commit 5158a6f1a0bcb7ba6c75a9e305ad5b4d02daa1e9
Author: volcano <[email protected]>
AuthorDate: Wed Nov 30 16:00:31 2022 +0800
Implementing sync primitives with futex-S
---
sgx_protected_fs/tfs/src/sys/metadata.rs | 8 -
sgx_sync/src/capi.rs | 6 +-
sgx_sync/src/condvar.rs | 24 +-
sgx_sync/src/condvar/check.rs | 16 +-
sgx_sync/src/lib.rs | 2 +-
sgx_sync/src/mutex.rs | 22 +-
sgx_sync/src/{remutex.rs => remutex/generic.rs} | 109 ++++--
sgx_sync/src/{remutex.rs => remutex/legacy.rs} | 2 +-
.../thread_parker => sgx_sync/src/remutex}/mod.rs | 5 +-
sgx_sync/src/rwlock.rs | 41 +--
sgx_sync/src/sys/futex.rs | 4 +-
sgx_sync/src/{ => sys}/lazy_box.rs | 0
sgx_sync/src/sys/locks/futex/condvar.rs | 77 +++++
sgx_sync/src/sys/{ => locks/futex}/mod.rs | 4 +-
sgx_sync/src/sys/locks/futex/mutex.rs | 119 +++++++
sgx_sync/src/sys/locks/futex/rwlock.rs | 365 +++++++++++++++++++++
.../src/sys/locks/futex/wait.rs | 27 +-
sgx_sync/src/sys/{ => locks/generic}/condvar.rs | 4 +-
sgx_sync/src/sys/{ => locks/generic}/mod.rs | 2 -
sgx_sync/src/sys/{ => locks/generic}/mutex.rs | 2 +-
sgx_sync/src/sys/{ => locks/generic}/rwlock.rs | 2 +-
sgx_sync/src/sys/{ => locks}/mod.rs | 9 +-
sgx_sync/src/sys/mod.rs | 5 +-
sgx_sync/src/sys/ocall/mod.rs | 60 ++--
sgx_tstd/src/sync/rwlock/tests.rs | 1 +
sgx_tstd/src/sys/futex.rs | 10 +-
sgx_tstd/src/sys_common/once/mod.rs | 7 +-
sgx_tstd/src/sys_common/thread_parker/mod.rs | 5 +-
sgx_urts/src/ocall/sync.rs | 47 ++-
29 files changed, 784 insertions(+), 201 deletions(-)
diff --git a/sgx_protected_fs/tfs/src/sys/metadata.rs
b/sgx_protected_fs/tfs/src/sys/metadata.rs
index a8231f7f..7f0e0f4a 100644
--- a/sgx_protected_fs/tfs/src/sys/metadata.rs
+++ b/sgx_protected_fs/tfs/src/sys/metadata.rs
@@ -30,12 +30,6 @@ pub const SGX_FILE_ID: u64 = 0x5347_585F_4649_4C45;
pub const SGX_FILE_MAJOR_VERSION: u8 = 0x01;
pub const SGX_FILE_MINOR_VERSION: u8 = 0x00;
-#[derive(Copy, Clone, Debug, Default)]
-#[repr(C)]
-pub struct McUuid {
- mc_uuid: [u8; 16],
-}
-
impl_enum! {
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
@@ -110,8 +104,6 @@ pub const FULLNAME_MAX_LEN: usize = PATHNAME_MAX_LEN +
FILENAME_MAX_LEN;
pub struct MetadataEncrypted {
pub file_name: [u8; FILENAME_MAX_LEN],
pub size: usize,
- pub mc_uuid: McUuid,
- pub mc_value: u32,
pub mht_key: Key128bit,
pub mht_gmac: Mac128bit,
pub data: [u8; MD_USER_DATA_SIZE],
diff --git a/sgx_sync/src/capi.rs b/sgx_sync/src/capi.rs
index 9625723a..85405794 100644
--- a/sgx_sync/src/capi.rs
+++ b/sgx_sync/src/capi.rs
@@ -16,9 +16,9 @@
// under the License..
use crate::spin::SpinMutex;
-use crate::sys::condvar::{Condvar, MovableCondvar};
-use crate::sys::mutex::{MovableMutex, MovableReentrantMutex, Mutex,
MutexControl};
-use crate::sys::rwlock::{MovableRwLock, RwLock};
+use crate::sys::locks::generic::condvar::{Condvar, MovableCondvar};
+use crate::sys::locks::generic::mutex::{MovableMutex, MovableReentrantMutex,
Mutex, MutexControl};
+use crate::sys::locks::generic::rwlock::{MovableRwLock, RwLock};
use alloc::boxed::Box;
use core::mem;
use core::mem::ManuallyDrop;
diff --git a/sgx_sync/src/condvar.rs b/sgx_sync/src/condvar.rs
index 4c1452fa..e6a3da3b 100644
--- a/sgx_sync/src/condvar.rs
+++ b/sgx_sync/src/condvar.rs
@@ -16,10 +16,9 @@
// under the License..
use crate::mutex::MovableMutex;
-use crate::sys::condvar as imp;
-use crate::sys::mutex as mutex_imp;
+use crate::sys::locks::condvar as imp;
+use crate::sys::locks::mutex as mutex_imp;
use core::time::Duration;
-use sgx_types::error::errno::ETIMEDOUT;
mod check;
@@ -44,15 +43,13 @@ impl Condvar {
/// Signals one waiter on this condition variable to wake up.
#[inline]
pub fn notify_one(&self) {
- let r = unsafe { self.inner.notify_one() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.inner.notify_one() };
}
/// Awakens all current waiters on this condition variable.
#[inline]
pub fn notify_all(&self) {
- let r = unsafe { self.inner.notify_all() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.inner.notify_all() };
}
/// Waits for a signal on the specified mutex.
@@ -62,9 +59,9 @@ impl Condvar {
/// May panic if used with more than one mutex.
#[inline]
pub unsafe fn wait(&self, mutex: &MovableMutex) {
- self.check.verify(mutex);
- let r = self.inner.wait(mutex.raw());
- debug_assert_eq!(r, Ok(()));
+ let mutex_raw = mutex.raw();
+ self.check.verify(mutex_raw);
+ self.inner.wait(mutex_raw)
}
/// Waits for a signal on the specified mutex with a timeout duration
@@ -75,10 +72,9 @@ impl Condvar {
/// May panic if used with more than one mutex.
#[inline]
pub unsafe fn wait_timeout(&self, mutex: &MovableMutex, dur: Duration) ->
bool {
- self.check.verify(mutex);
- let r = self.inner.wait_timeout(mutex.raw(), dur);
- debug_assert!(r == Err(ETIMEDOUT) || r == Ok(()));
- r == Ok(())
+ let mutex_raw = mutex.raw();
+ self.check.verify(mutex_raw);
+ self.inner.wait_timeout(mutex_raw, dur)
}
}
diff --git a/sgx_sync/src/condvar/check.rs b/sgx_sync/src/condvar/check.rs
index 0f00e9b7..69097602 100644
--- a/sgx_sync/src/condvar/check.rs
+++ b/sgx_sync/src/condvar/check.rs
@@ -14,10 +14,9 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
-
-use crate::lazy_box::{LazyBox, LazyInit};
-use crate::mutex::MovableMutex;
-use crate::sys::mutex as imp;
+use crate::sys::lazy_box::{LazyBox, LazyInit};
+use crate::sys::locks::futex;
+use crate::sys::locks::generic;
use core::ptr;
use core::sync::atomic::{AtomicPtr, Ordering};
@@ -42,8 +41,9 @@ impl SameMutexCheck {
addr: AtomicPtr::new(ptr::null_mut()),
}
}
- pub fn verify(&self, mutex: &MovableMutex) {
- let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _;
+ pub fn verify(&self, mutex: &generic::mutex::MovableMutex) {
+ let raw: &generic::mutex::Mutex = mutex;
+ let addr = raw as *const generic::mutex::Mutex as *const () as *mut _;
// Relaxed is okay here because we never read through `self.addr`, and
only use it to
// compare addresses.
match self.addr.compare_exchange(
@@ -61,7 +61,7 @@ impl SameMutexCheck {
/// Unboxed mutexes may move, so `Condvar` can not require its address to stay
/// constant.
-impl CondvarCheck for imp::Mutex {
+impl CondvarCheck for futex::mutex::Mutex {
type Check = NoCheck;
}
@@ -72,5 +72,5 @@ impl NoCheck {
pub const fn new() -> Self {
Self
}
- pub fn verify(&self, _: &MovableMutex) {}
+ pub fn verify(&self, _: &futex::mutex::MovableMutex) {}
}
diff --git a/sgx_sync/src/lib.rs b/sgx_sync/src/lib.rs
index 76d3818a..c86ec359 100644
--- a/sgx_sync/src/lib.rs
+++ b/sgx_sync/src/lib.rs
@@ -25,6 +25,7 @@
#![feature(linked_list_remove)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![allow(dead_code)]
#![allow(clippy::missing_safety_doc)]
#![allow(non_camel_case_types)]
@@ -37,7 +38,6 @@ extern crate sgx_types;
mod barrier;
mod condvar;
mod futex;
-mod lazy_box;
mod lazy_lock;
mod lock_api;
mod mutex;
diff --git a/sgx_sync/src/mutex.rs b/sgx_sync/src/mutex.rs
index 922e7d50..3ba60cc9 100644
--- a/sgx_sync/src/mutex.rs
+++ b/sgx_sync/src/mutex.rs
@@ -16,8 +16,7 @@
// under the License..
use crate::lock_api::RawMutex;
-use crate::sys::mutex as imp;
-use sgx_types::error::errno::EBUSY;
+use crate::sys::locks::mutex as imp;
/// An SGX-based mutual exclusion lock, meant for use in static variables.
///
@@ -46,9 +45,7 @@ impl StaticMutex {
/// same thread.
#[inline]
pub unsafe fn lock(&'static self) -> StaticMutexGuard {
- let r = self.0.lock();
- debug_assert_eq!(r, Ok(()));
-
+ self.0.lock();
StaticMutexGuard(&self.0)
}
}
@@ -59,8 +56,9 @@ pub struct StaticMutexGuard(&'static imp::Mutex);
impl Drop for StaticMutexGuard {
#[inline]
fn drop(&mut self) {
- let r = unsafe { self.0.unlock() };
- debug_assert_eq!(r, Ok(()));
+ unsafe {
+ self.0.unlock();
+ }
}
}
@@ -92,17 +90,14 @@ impl MovableMutex {
/// Locks the mutex blocking the current thread until it is available.
#[inline]
pub fn raw_lock(&self) {
- let r = unsafe { self.0.lock() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.0.lock() }
}
/// Attempts to lock the mutex without blocking, returning whether it was
/// successfully acquired or not.
#[inline]
pub fn try_lock(&self) -> bool {
- let r = unsafe { self.0.try_lock() };
- debug_assert!(r == Err(EBUSY) || r == Ok(()));
- r == Ok(())
+ unsafe { self.0.try_lock() }
}
/// Unlocks the mutex.
@@ -111,8 +106,7 @@ impl MovableMutex {
/// mutex.
#[inline]
pub unsafe fn raw_unlock(&self) {
- let r = self.0.unlock();
- debug_assert_eq!(r, Ok(()));
+ self.0.unlock()
}
}
diff --git a/sgx_sync/src/remutex.rs b/sgx_sync/src/remutex/generic.rs
similarity index 54%
copy from sgx_sync/src/remutex.rs
copy to sgx_sync/src/remutex/generic.rs
index c0f692c8..77da8776 100644
--- a/sgx_sync/src/remutex.rs
+++ b/sgx_sync/src/remutex/generic.rs
@@ -15,18 +15,48 @@
// specific language governing permissions and limitations
// under the License..
-use crate::sys::mutex as sys;
-use core::fmt;
+use crate::mutex as sys;
+use core::cell::UnsafeCell;
use core::ops::Deref;
use core::panic::{RefUnwindSafe, UnwindSafe};
+use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use sgx_trts::tcs;
/// A re-entrant mutual exclusion
///
/// This mutex will block *other* threads waiting for the lock to become
/// available. The thread which has already locked the mutex can lock it
/// multiple times without blocking, preventing a common source of deadlocks.
+///
+/// This is used by stdout().lock() and friends.
+///
+/// ## Implementation details
+///
+/// The 'owner' field tracks which thread has locked the mutex.
+///
+/// We use current_thread_unique_ptr() as the thread identifier,
+/// which is just the address of a thread local variable.
+///
+/// If `owner` is set to the identifier of the current thread,
+/// we assume the mutex is already locked and instead of locking it again,
+/// we increment `lock_count`.
+///
+/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
+/// it reaches zero.
+///
+/// `lock_count` is protected by the mutex and only accessed by the thread
that has
+/// locked the mutex, so needs no synchronization.
+///
+/// `owner` can be checked by other threads that want to see if they already
+/// hold the lock, so needs to be atomic. If it compares equal, we're on the
+/// same thread that holds the mutex and memory access can use relaxed ordering
+/// since we're not dealing with multiple threads. If it compares unequal,
+/// synchronization is left to the mutex, making relaxed memory ordering for
+/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
- inner: sys::MovableReentrantMutex,
+ mutex: sys::MovableMutex,
+ owner: AtomicUsize,
+ lock_count: UnsafeCell<u32>,
data: T,
}
@@ -57,10 +87,11 @@ impl<T> !Send for ReentrantMutexGuard<'_, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
- ///
pub const fn new(t: T) -> ReentrantMutex<T> {
ReentrantMutex {
- inner: sys::MovableReentrantMutex::new(),
+ mutex: sys::MovableMutex::new(),
+ owner: AtomicUsize::new(0),
+ lock_count: UnsafeCell::new(0),
data: t,
}
}
@@ -78,8 +109,17 @@ impl<T> ReentrantMutex<T> {
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
+ let this_thread = current_thread_unique_ptr();
+ // Safety: We only touch lock_count when we own the lock.
unsafe {
- let _ = self.inner.lock();
+ if self.owner.load(Relaxed) == this_thread {
+ self.increment_lock_count();
+ } else {
+ self.mutex.raw_lock();
+ self.owner.store(this_thread, Relaxed);
+ debug_assert_eq!(*self.lock_count.get(), 0);
+ *self.lock_count.get() = 1;
+ }
}
ReentrantMutexGuard { lock: self }
}
@@ -97,34 +137,27 @@ impl<T> ReentrantMutex<T> {
/// this call will return failure if the mutex would otherwise be
/// acquired.
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
- if unsafe { self.inner.try_lock().is_ok() } {
- Some(ReentrantMutexGuard { lock: self })
- } else {
- None
+ let this_thread = current_thread_unique_ptr();
+ // Safety: We only touch lock_count when we own the lock.
+ unsafe {
+ if self.owner.load(Relaxed) == this_thread {
+ self.increment_lock_count();
+ Some(ReentrantMutexGuard { lock: self })
+ } else if self.mutex.try_lock() {
+ self.owner.store(this_thread, Relaxed);
+ debug_assert_eq!(*self.lock_count.get(), 0);
+ *self.lock_count.get() = 1;
+ Some(ReentrantMutexGuard { lock: self })
+ } else {
+ None
+ }
}
}
-}
-impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self.try_lock() {
- Some(guard) => f
- .debug_struct("ReentrantMutex")
- .field("data", &*guard)
- .finish(),
- None => {
- struct LockedPlaceholder;
- impl fmt::Debug for LockedPlaceholder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("<locked>")
- }
- }
-
- f.debug_struct("ReentrantMutex")
- .field("data", &LockedPlaceholder)
- .finish()
- }
- }
+ unsafe fn increment_lock_count(&self) {
+ *self.lock_count.get() = (*self.lock_count.get())
+ .checked_add(1)
+ .expect("lock count overflow in reentrant mutex");
}
}
@@ -139,8 +172,20 @@ impl<T> Deref for ReentrantMutexGuard<'_, T> {
impl<T> Drop for ReentrantMutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
+ // Safety: We own the lock.
unsafe {
- let _ = self.lock.inner.unlock();
+ *self.lock.lock_count.get() -= 1;
+ if *self.lock.lock_count.get() == 0 {
+ self.lock.owner.store(0, Relaxed);
+ self.lock.mutex.raw_unlock();
+ }
}
}
}
+
+/// Get an tcsid that is unique per running thread.
+///
+/// This can be used as a non-null usize-sized ID.
+pub fn current_thread_unique_ptr() -> usize {
+ tcs::current().id().as_usize()
+}
diff --git a/sgx_sync/src/remutex.rs b/sgx_sync/src/remutex/legacy.rs
similarity index 99%
rename from sgx_sync/src/remutex.rs
rename to sgx_sync/src/remutex/legacy.rs
index c0f692c8..275a9a8b 100644
--- a/sgx_sync/src/remutex.rs
+++ b/sgx_sync/src/remutex/legacy.rs
@@ -15,7 +15,7 @@
// specific language governing permissions and limitations
// under the License..
-use crate::sys::mutex as sys;
+use crate::sys::locks::generic::mutex as sys;
use core::fmt;
use core::ops::Deref;
use core::panic::{RefUnwindSafe, UnwindSafe};
diff --git a/sgx_tstd/src/sys_common/thread_parker/mod.rs
b/sgx_sync/src/remutex/mod.rs
similarity index 86%
copy from sgx_tstd/src/sys_common/thread_parker/mod.rs
copy to sgx_sync/src/remutex/mod.rs
index d7be5e8d..babdd013 100644
--- a/sgx_tstd/src/sys_common/thread_parker/mod.rs
+++ b/sgx_sync/src/remutex/mod.rs
@@ -16,6 +16,7 @@
// under the License..
mod generic;
-mod futex;
+pub use generic::{ReentrantMutex, ReentrantMutexGuard};
-pub use generic::Parker;
+// mod legacy;
+// pub use legacy::{ReentrantMutex, ReentrantMutexGuard};
diff --git a/sgx_sync/src/rwlock.rs b/sgx_sync/src/rwlock.rs
index c417ce75..7782585a 100644
--- a/sgx_sync/src/rwlock.rs
+++ b/sgx_sync/src/rwlock.rs
@@ -16,8 +16,8 @@
// under the License..
use crate::lock_api::RawRwLock;
-use crate::sys::rwlock as imp;
-use sgx_types::error::errno::EBUSY;
+use crate::sys::locks::rwlock as imp;
+// use sgx_types::error::errno::EBUSY;
/// An SGX-based reader-writer lock., meant for use in static variables.
///
@@ -41,9 +41,7 @@ impl StaticRwLock {
pub fn read(&'static self) -> StaticRwLockReadGuard {
// Safety: All methods require static references, therefore self
// cannot be moved between invocations.
- let r = unsafe { self.0.read() };
- debug_assert_eq!(r, Ok(()));
-
+ unsafe { self.0.read() };
StaticRwLockReadGuard(&self.0)
}
@@ -55,8 +53,7 @@ impl StaticRwLock {
pub fn write(&'static self) -> StaticRwLockWriteGuard {
// Safety: All methods require static references, therefore self
// cannot be moved between invocations.
- let r = unsafe { self.0.write() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.0.write() };
StaticRwLockWriteGuard(&self.0)
}
@@ -67,8 +64,9 @@ pub struct StaticRwLockReadGuard(&'static imp::RwLock);
impl Drop for StaticRwLockReadGuard {
fn drop(&mut self) {
- let r = unsafe { self.0.read_unlock() };
- debug_assert_eq!(r, Ok(()));
+ unsafe {
+ self.0.read_unlock();
+ }
}
}
@@ -77,8 +75,9 @@ pub struct StaticRwLockWriteGuard(&'static imp::RwLock);
impl Drop for StaticRwLockWriteGuard {
fn drop(&mut self) {
- let r = unsafe { self.0.write_unlock() };
- debug_assert_eq!(r, Ok(()));
+ unsafe {
+ self.0.write_unlock();
+ }
}
}
@@ -107,8 +106,7 @@ impl MovableRwLock {
/// thread to do so.
#[inline]
pub fn read(&self) {
- let r = unsafe { self.0.read() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.0.read() }
}
/// Attempts to acquire shared access to this lock, returning whether it
@@ -117,9 +115,7 @@ impl MovableRwLock {
/// This function does not block the current thread.
#[inline]
pub fn try_read(&self) -> bool {
- let r = unsafe { self.0.try_read() };
- debug_assert!(r == Err(EBUSY) || r == Ok(()));
- r == Ok(())
+ unsafe { self.0.try_read() }
}
/// Acquires write access to the underlying lock, blocking the current
thread
@@ -129,8 +125,7 @@ impl MovableRwLock {
/// previous method call.
#[inline]
pub fn write(&self) {
- let r = unsafe { self.0.write() };
- debug_assert_eq!(r, Ok(()));
+ unsafe { self.0.write() }
}
/// Attempts to acquire exclusive access to this lock, returning whether it
@@ -142,9 +137,7 @@ impl MovableRwLock {
/// previous method call.
#[inline]
pub fn try_write(&self) -> bool {
- let r = unsafe { self.0.try_write() };
- debug_assert!(r == Err(EBUSY) || r == Ok(()));
- r == Ok(())
+ unsafe { self.0.try_write() }
}
/// Unlocks previously acquired shared access to this lock.
@@ -152,8 +145,7 @@ impl MovableRwLock {
/// Behavior is undefined if the current thread does not have shared
access.
#[inline]
pub unsafe fn read_unlock(&self) {
- let r = self.0.read_unlock();
- debug_assert_eq!(r, Ok(()));
+ self.0.read_unlock()
}
/// Unlocks previously acquired exclusive access to this lock.
@@ -162,8 +154,7 @@ impl MovableRwLock {
/// exclusive access.
#[inline]
pub unsafe fn write_unlock(&self) {
- let r = self.0.write_unlock();
- debug_assert_eq!(r, Ok(()));
+ self.0.write_unlock()
}
}
diff --git a/sgx_sync/src/sys/futex.rs b/sgx_sync/src/sys/futex.rs
index 0fb732f6..119d4ec6 100644
--- a/sgx_sync/src/sys/futex.rs
+++ b/sgx_sync/src/sys/futex.rs
@@ -226,7 +226,9 @@ impl Bucket {
}
});
- Item::batch_wake(&items_to_wake);
+ if !items_to_wake.is_empty() {
+ Item::batch_wake(&items_to_wake);
+ }
count
}
diff --git a/sgx_sync/src/lazy_box.rs b/sgx_sync/src/sys/lazy_box.rs
similarity index 100%
rename from sgx_sync/src/lazy_box.rs
rename to sgx_sync/src/sys/lazy_box.rs
diff --git a/sgx_sync/src/sys/locks/futex/condvar.rs
b/sgx_sync/src/sys/locks/futex/condvar.rs
new file mode 100644
index 00000000..5858bd88
--- /dev/null
+++ b/sgx_sync/src/sys/locks/futex/condvar.rs
@@ -0,0 +1,77 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use super::mutex::Mutex;
+use super::wait::{futex_wait, futex_wake, futex_wake_all};
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use core::time::Duration;
+
+pub type MovableCondvar = Condvar;
+
+pub struct Condvar {
+ // The value of this atomic is simply incremented on every notification.
+ // This is used by `.wait()` to not miss any notifications after
+ // unlocking the mutex and before waiting for notifications.
+ futex: AtomicU32,
+}
+
+impl Condvar {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ futex: AtomicU32::new(0),
+ }
+ }
+
+ // All the memory orderings here are `Relaxed`,
+ // because synchronization is done by unlocking and locking the mutex.
+
+ pub unsafe fn notify_one(&self) {
+ self.futex.fetch_add(1, Relaxed);
+ futex_wake(&self.futex);
+ }
+
+ pub unsafe fn notify_all(&self) {
+ self.futex.fetch_add(1, Relaxed);
+ futex_wake_all(&self.futex);
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ self.wait_optional_timeout(mutex, None);
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) ->
bool {
+ self.wait_optional_timeout(mutex, Some(timeout))
+ }
+
+ unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout:
Option<Duration>) -> bool {
+ // Examine the notification counter _before_ we unlock the mutex.
+ let futex_value = self.futex.load(Relaxed);
+
+ // Unlock the mutex before going to sleep.
+ mutex.unlock();
+
+ // Wait, but only if there hasn't been any
+ // notification since we unlocked the mutex.
+ let r = futex_wait(&self.futex, futex_value, timeout);
+
+ // Lock the mutex again.
+ mutex.lock();
+
+ r
+ }
+}
diff --git a/sgx_sync/src/sys/mod.rs b/sgx_sync/src/sys/locks/futex/mod.rs
similarity index 96%
copy from sgx_sync/src/sys/mod.rs
copy to sgx_sync/src/sys/locks/futex/mod.rs
index 89b6ad2a..60c09347 100644
--- a/sgx_sync/src/sys/mod.rs
+++ b/sgx_sync/src/sys/locks/futex/mod.rs
@@ -15,8 +15,8 @@
// specific language governing permissions and limitations
// under the License..
+mod wait;
+
pub mod condvar;
-pub mod futex;
pub mod mutex;
-pub mod ocall;
pub mod rwlock;
diff --git a/sgx_sync/src/sys/locks/futex/mutex.rs
b/sgx_sync/src/sys/locks/futex/mutex.rs
new file mode 100644
index 00000000..7caa5b6b
--- /dev/null
+++ b/sgx_sync/src/sys/locks/futex/mutex.rs
@@ -0,0 +1,119 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use super::wait::{futex_wait, futex_wake};
+use core::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+
+pub type MovableMutex = Mutex;
+
+pub struct Mutex {
+ /// 0: unlocked
+ /// 1: locked, no other threads waiting
+ /// 2: locked, and other threads waiting (contended)
+ futex: AtomicU32,
+}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ futex: AtomicU32::new(0),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
+ self.lock_contended();
+ }
+ }
+
+ #[cold]
+ fn lock_contended(&self) {
+ // Spin first to speed things up if the lock is released quickly.
+ let mut state = self.spin();
+
+ // If it's unlocked now, attempt to take the lock
+ // without marking it as contended.
+ if state == 0 {
+ match self.futex.compare_exchange(0, 1, Acquire, Relaxed) {
+ Ok(_) => return, // Locked!
+ Err(s) => state = s,
+ }
+ }
+
+ loop {
+ // Put the lock in contended state.
+ // We avoid an unnecessary write if it as already set to 2,
+ // to be friendlier for the caches.
+ if state != 2 && self.futex.swap(2, Acquire) == 0 {
+ // We changed it from 0 to 2, so we just successfully locked
it.
+ return;
+ }
+
+ // Wait for the futex to change state, assuming it is still 2.
+ futex_wait(&self.futex, 2, None);
+
+ // Spin again after waking up.
+ state = self.spin();
+ }
+ }
+
+ fn spin(&self) -> u32 {
+ // In rust's implmenetation, this is `100`. Considering more overhead
in SGX environment,
+ // here we make it bigger.
+ let mut spin = 1000;
+ loop {
+ // We only use `load` (and not `swap` or `compare_exchange`)
+ // while spinning, to be easier on the caches.
+ let state = self.futex.load(Relaxed);
+
+ // We stop spinning when the mutex is unlocked (0),
+ // but also when it's contended (2).
+ if state != 1 || spin == 0 {
+ return state;
+ }
+
+ core::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ if self.futex.swap(0, Release) == 2 {
+ // We only wake up one thread. When that thread locks the mutex, it
+ // will mark the mutex as contended (2) (see lock_contended above),
+ // which makes sure that any other waiting threads will also be
+ // woken up eventually.
+ self.wake();
+ }
+ }
+
+ #[cold]
+ fn wake(&self) {
+ futex_wake(&self.futex);
+ }
+}
diff --git a/sgx_sync/src/sys/locks/futex/rwlock.rs
b/sgx_sync/src/sys/locks/futex/rwlock.rs
new file mode 100644
index 00000000..654d3744
--- /dev/null
+++ b/sgx_sync/src/sys/locks/futex/rwlock.rs
@@ -0,0 +1,365 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License..
+
+use super::wait::{futex_wait, futex_wake, futex_wake_all};
+use core::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+
+pub type MovableRwLock = RwLock;
+
+pub struct RwLock {
+ // The state consists of a 30-bit reader counter, a 'readers waiting'
flag, and a 'writers waiting' flag.
+ // Bits 0..30:
+ // 0: Unlocked
+ // 1..=0x3FFF_FFFE: Locked by N readers
+ // 0x3FFF_FFFF: Write locked
+ // Bit 30: Readers are waiting on this futex.
+ // Bit 31: Writers are waiting on the writer_notify futex.
+ state: AtomicU32,
+ // The 'condition variable' to notify writers through.
+ // Incremented on every signal.
+ writer_notify: AtomicU32,
+}
+
+const READ_LOCKED: u32 = 1;
+const MASK: u32 = (1 << 30) - 1;
+const WRITE_LOCKED: u32 = MASK;
+const MAX_READERS: u32 = MASK - 1;
+const READERS_WAITING: u32 = 1 << 30;
+const WRITERS_WAITING: u32 = 1 << 31;
+
+#[inline]
+fn is_unlocked(state: u32) -> bool {
+ state & MASK == 0
+}
+
+#[inline]
+fn is_write_locked(state: u32) -> bool {
+ state & MASK == WRITE_LOCKED
+}
+
+#[inline]
+fn has_readers_waiting(state: u32) -> bool {
+ state & READERS_WAITING != 0
+}
+
+#[inline]
+fn has_writers_waiting(state: u32) -> bool {
+ state & WRITERS_WAITING != 0
+}
+
+#[inline]
+fn is_read_lockable(state: u32) -> bool {
+ // This also returns false if the counter could overflow if we tried to
read lock it.
+ //
+ // We don't allow read-locking if there's readers waiting, even if the
lock is unlocked
+ // and there's no writers waiting. The only situation when this happens is
after unlocking,
+ // at which point the unlocking thread might be waking up writers, which
have priority over readers.
+ // The unlocking thread will clear the readers waiting bit and wake up
readers, if necessary.
+ state & MASK < MAX_READERS && !has_readers_waiting(state) &&
!has_writers_waiting(state)
+}
+
+#[inline]
+fn has_reached_max_readers(state: u32) -> bool {
+ state & MASK == MAX_READERS
+}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ state: AtomicU32::new(0),
+ writer_notify: AtomicU32::new(0),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ self.state
+ .fetch_update(Acquire, Relaxed, |s| {
+ is_read_lockable(s).then_some(s + READ_LOCKED)
+ })
+ .is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let state = self.state.load(Relaxed);
+ if !is_read_lockable(state)
+ || self
+ .state
+ .compare_exchange_weak(state, state + READ_LOCKED, Acquire,
Relaxed)
+ .is_err()
+ {
+ self.read_contended();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED;
+
+ // It's impossible for a reader to be waiting on a read-locked RwLock,
+ // except if there is also a writer waiting.
+ debug_assert!(!has_readers_waiting(state) ||
has_writers_waiting(state));
+
+ // Wake up a writer if we were the last reader and there's a writer
waiting.
+ if is_unlocked(state) && has_writers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ #[cold]
+ fn read_contended(&self) {
+ let mut state = self.spin_read();
+
+ loop {
+ // If we can lock it, lock it.
+ if is_read_lockable(state) {
+ match self
+ .state
+ .compare_exchange_weak(state, state + READ_LOCKED,
Acquire, Relaxed)
+ {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Check for overflow.
+ if has_reached_max_readers(state) {
+ panic!("too many active read locks on RwLock");
+ }
+
+ // Make sure the readers waiting bit is set before we go to sleep.
+ if !has_readers_waiting(state) {
+ if let Err(s) =
+ self.state
+ .compare_exchange(state, state | READERS_WAITING,
Relaxed, Relaxed)
+ {
+ state = s;
+ continue;
+ }
+ }
+
+ // Wait for the state to change.
+ futex_wait(&self.state, state | READERS_WAITING, None);
+
+ // Spin again after waking up.
+ state = self.spin_read();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ self.state
+ .fetch_update(Acquire, Relaxed, |s| {
+ is_unlocked(s).then_some(s + WRITE_LOCKED)
+ })
+ .is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ if self
+ .state
+ .compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed)
+ .is_err()
+ {
+ self.write_contended();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED;
+
+ debug_assert!(is_unlocked(state));
+
+ if has_writers_waiting(state) || has_readers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ #[cold]
+ fn write_contended(&self) {
+ let mut state = self.spin_write();
+
+ let mut other_writers_waiting = 0;
+
+ loop {
+ // If it's unlocked, we try to lock it.
+ if is_unlocked(state) {
+ match self.state.compare_exchange_weak(
+ state,
+ state | WRITE_LOCKED | other_writers_waiting,
+ Acquire,
+ Relaxed,
+ ) {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Set the waiting bit indicating that we're waiting on it.
+ if !has_writers_waiting(state) {
+ if let Err(s) =
+ self.state
+ .compare_exchange(state, state | WRITERS_WAITING,
Relaxed, Relaxed)
+ {
+ state = s;
+ continue;
+ }
+ }
+
+ // Other writers might be waiting now too, so we should make sure
+ // we keep that bit on once we manage lock it.
+ other_writers_waiting = WRITERS_WAITING;
+
+ // Examine the notification counter before we check if `state` has
changed,
+ // to make sure we don't miss any notifications.
+ let seq = self.writer_notify.load(Acquire);
+
+ // Don't go to sleep if the lock has become available,
+ // or if the writers waiting bit is no longer set.
+ state = self.state.load(Relaxed);
+ if is_unlocked(state) || !has_writers_waiting(state) {
+ continue;
+ }
+
+ // Wait for the state to change.
+ futex_wait(&self.writer_notify, seq, None);
+
+ // Spin again after waking up.
+ state = self.spin_write();
+ }
+ }
+
+ /// Wake up waiting threads after unlocking.
+ ///
+ /// If both are waiting, this will wake up only one writer, but will fall
+ /// back to waking up readers if there was no writer to wake up.
+ #[allow(clippy::collapsible_if)]
+ #[cold]
+ fn wake_writer_or_readers(&self, mut state: u32) {
+ assert!(is_unlocked(state));
+
+ // The readers waiting bit might be turned on at any point now,
+ // since readers will block when there's anything waiting.
+ // Writers will just lock the lock though, regardless of the waiting
bits,
+ // so we don't have to worry about the writer waiting bit.
+ //
+ // If the lock gets locked in the meantime, we don't have to do
+ // anything, because then the thread that locked the lock will take
+ // care of waking up waiters when it unlocks.
+
+ // If only writers are waiting, wake one of them up.
+ if state == WRITERS_WAITING {
+ match self.state.compare_exchange(state, 0, Relaxed, Relaxed) {
+ Ok(_) => {
+ self.wake_writer();
+ return;
+ }
+ Err(s) => {
+ // Maybe some readers are now waiting too. So, continue to
the next `if`.
+ state = s;
+ }
+ }
+ }
+
+ // If both writers and readers are waiting, leave the readers waiting
+ // and only wake up one writer.
+ if state == READERS_WAITING + WRITERS_WAITING {
+ if self
+ .state
+ .compare_exchange(state, READERS_WAITING, Relaxed, Relaxed)
+ .is_err()
+ {
+ // The lock got locked. Not our problem anymore.
+ return;
+ }
+ if self.wake_writer() {
+ return;
+ }
+ // No writers were actually blocked on futex_wait, so we continue
+ // to wake up readers instead, since we can't be sure if we
notified a writer.
+ state = READERS_WAITING;
+ }
+
+ // If readers are waiting, wake them all up.
+ if state == READERS_WAITING {
+ if self
+ .state
+ .compare_exchange(state, 0, Relaxed, Relaxed)
+ .is_ok()
+ {
+ futex_wake_all(&self.state);
+ }
+ }
+ }
+
+ /// This wakes one writer and returns true if we woke up a writer that was
+ /// blocked on futex_wait.
+ ///
+ /// If this returns false, it might still be the case that we notified a
+ /// writer that was about to go to sleep.
+ fn wake_writer(&self) -> bool {
+ self.writer_notify.fetch_add(1, Release);
+ futex_wake(&self.writer_notify)
+ // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke
+ // up any threads or not, and always return `false` here. That still
+ // results in correct behaviour: it just means readers get woken up as
+ // well in case both readers and writers were waiting.
+ }
+
+ /// Spin for a while, but stop directly at the given condition.
+ #[inline]
+ fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 {
+ // In rust's implmenetation, this is `100`. Considering more overhead
in SGX environment,
+ // here we make it bigger.
+ let mut spin = 1000; // Chosen by fair dice roll.
+ loop {
+ let state = self.state.load(Relaxed);
+ if f(state) || spin == 0 {
+ return state;
+ }
+ core::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+
+ #[inline]
+ fn spin_write(&self) -> u32 {
+ // Stop spinning when it's unlocked or when there's waiting writers,
to keep things somewhat fair.
+ self.spin_until(|state| is_unlocked(state) ||
has_writers_waiting(state))
+ }
+
+ #[inline]
+ fn spin_read(&self) -> u32 {
+ // Stop spinning when it's unlocked or read locked, or when there's
waiting threads.
+ self.spin_until(|state| {
+ !is_write_locked(state) || has_readers_waiting(state) ||
has_writers_waiting(state)
+ })
+ }
+}
diff --git a/sgx_tstd/src/sys/futex.rs b/sgx_sync/src/sys/locks/futex/wait.rs
similarity index 77%
copy from sgx_tstd/src/sys/futex.rs
copy to sgx_sync/src/sys/locks/futex/wait.rs
index da2a98a9..5255e35d 100644
--- a/sgx_tstd/src/sys/futex.rs
+++ b/sgx_sync/src/sys/locks/futex/wait.rs
@@ -15,9 +15,10 @@
// specific language governing permissions and limitations
// under the License..
-use crate::sync::atomic::AtomicU32;
-use crate::time::Duration;
-use sgx_sync::Futex;
+use crate::sys::futex::Futex;
+use core::sync::atomic::AtomicU32;
+use core::sync::atomic::Ordering::Relaxed;
+use core::time::Duration;
use sgx_types::error::errno::{EINTR, ETIMEDOUT};
/// Wait for a futex_wake operation to wake us.
@@ -25,10 +26,8 @@ use sgx_types::error::errno::{EINTR, ETIMEDOUT};
/// Returns directly if the futex doesn't hold the expected value.
///
/// Returns false on timeout, and true in all other cases.
-pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>)
->bool {
- use crate::sync::atomic::Ordering::Relaxed;
-
- let futex_obj = Futex::new(futex);
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>)
-> bool {
+ let futex_obj = Futex::new(futex as *const _ as usize);
loop {
// No need to wait if the value already changed.
if futex.load(Relaxed) != expected {
@@ -50,12 +49,16 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32,
timeout: Option<Duration>) -
///
/// On some platforms, this always returns false.
pub fn futex_wake(futex: &AtomicU32) -> bool {
- let futex = Futex::new(futex);
- futex.wake(1).is_ok()
+ let futex = Futex::new(futex as *const _ as usize);
+ match futex.wake(1) {
+ Ok(0) => false,
+ Ok(_) => true,
+ Err(_) => false,
+ }
}
/// Wake up all threads that are waiting on futex_wait on this futex.
-pub fn futex_wake_all(futex: &AtomicU32) -> bool {
- let futex = Futex::new(futex);
- futex.wake(i32::MAX).is_ok()
+pub fn futex_wake_all(futex: &AtomicU32) {
+ let futex = Futex::new(futex as *const _ as usize);
+ let _ = futex.wake(i32::MAX as usize);
}
diff --git a/sgx_sync/src/sys/condvar.rs
b/sgx_sync/src/sys/locks/generic/condvar.rs
similarity index 98%
rename from sgx_sync/src/sys/condvar.rs
rename to sgx_sync/src/sys/locks/generic/condvar.rs
index 95bcf275..c60c94d5 100644
--- a/sgx_sync/src/sys/condvar.rs
+++ b/sgx_sync/src/sys/locks/generic/condvar.rs
@@ -15,8 +15,8 @@
// specific language governing permissions and limitations
// under the License..
-use crate::lazy_box::{LazyBox, LazyInit};
-use crate::sys::mutex::Mutex;
+use super::mutex::Mutex;
+use crate::sys::lazy_box::{LazyBox, LazyInit};
use crate::sys::ocall;
use alloc::boxed::Box;
use alloc::collections::LinkedList;
diff --git a/sgx_sync/src/sys/mod.rs b/sgx_sync/src/sys/locks/generic/mod.rs
similarity index 96%
copy from sgx_sync/src/sys/mod.rs
copy to sgx_sync/src/sys/locks/generic/mod.rs
index 89b6ad2a..bf1bab89 100644
--- a/sgx_sync/src/sys/mod.rs
+++ b/sgx_sync/src/sys/locks/generic/mod.rs
@@ -16,7 +16,5 @@
// under the License..
pub mod condvar;
-pub mod futex;
pub mod mutex;
-pub mod ocall;
pub mod rwlock;
diff --git a/sgx_sync/src/sys/mutex.rs b/sgx_sync/src/sys/locks/generic/mutex.rs
similarity index 99%
rename from sgx_sync/src/sys/mutex.rs
rename to sgx_sync/src/sys/locks/generic/mutex.rs
index 85cc835d..4aed9dc4 100644
--- a/sgx_sync/src/sys/mutex.rs
+++ b/sgx_sync/src/sys/locks/generic/mutex.rs
@@ -15,7 +15,7 @@
// specific language governing permissions and limitations
// under the License..
-use crate::lazy_box::{LazyBox, LazyInit};
+use crate::sys::lazy_box::{LazyBox, LazyInit};
use crate::sys::ocall;
use alloc::boxed::Box;
use alloc::collections::LinkedList;
diff --git a/sgx_sync/src/sys/rwlock.rs
b/sgx_sync/src/sys/locks/generic/rwlock.rs
similarity index 99%
rename from sgx_sync/src/sys/rwlock.rs
rename to sgx_sync/src/sys/locks/generic/rwlock.rs
index 5532eef9..d97c5066 100644
--- a/sgx_sync/src/sys/rwlock.rs
+++ b/sgx_sync/src/sys/locks/generic/rwlock.rs
@@ -15,7 +15,7 @@
// specific language governing permissions and limitations
// under the License..
-use crate::lazy_box::{LazyBox, LazyInit};
+use crate::sys::lazy_box::{LazyBox, LazyInit};
use crate::sys::ocall;
use alloc::boxed::Box;
use alloc::collections::LinkedList;
diff --git a/sgx_sync/src/sys/mod.rs b/sgx_sync/src/sys/locks/mod.rs
similarity index 90%
copy from sgx_sync/src/sys/mod.rs
copy to sgx_sync/src/sys/locks/mod.rs
index 89b6ad2a..2b0be11d 100644
--- a/sgx_sync/src/sys/mod.rs
+++ b/sgx_sync/src/sys/locks/mod.rs
@@ -15,8 +15,9 @@
// specific language governing permissions and limitations
// under the License..
-pub mod condvar;
pub mod futex;
-pub mod mutex;
-pub mod ocall;
-pub mod rwlock;
+pub mod generic;
+
+pub use futex::condvar;
+pub use futex::mutex;
+pub use futex::rwlock;
diff --git a/sgx_sync/src/sys/mod.rs b/sgx_sync/src/sys/mod.rs
index 89b6ad2a..ee5e8e1e 100644
--- a/sgx_sync/src/sys/mod.rs
+++ b/sgx_sync/src/sys/mod.rs
@@ -15,8 +15,7 @@
// specific language governing permissions and limitations
// under the License..
-pub mod condvar;
pub mod futex;
-pub mod mutex;
+pub mod lazy_box;
+pub mod locks;
pub mod ocall;
-pub mod rwlock;
diff --git a/sgx_sync/src/sys/ocall/mod.rs b/sgx_sync/src/sys/ocall/mod.rs
index 7683b717..1d3fcca5 100644
--- a/sgx_sync/src/sys/ocall/mod.rs
+++ b/sgx_sync/src/sys/ocall/mod.rs
@@ -264,36 +264,6 @@ impl Timeout {
}
}
-extern "C" {
- pub fn u_thread_wait_event_ocall(
- result: *mut i32,
- error: *mut i32,
- tcs: usize,
- timeout: *const timespec,
- clockid: i32,
- absolute_time: i32,
- ) -> SgxStatus;
-
- pub fn u_thread_set_event_ocall(result: *mut i32, error: *mut i32, tcs:
usize) -> SgxStatus;
-
- pub fn u_thread_set_multiple_events_ocall(
- result: *mut i32,
- error: *mut i32,
- tcss: *const usize,
- total: usize,
- ) -> SgxStatus;
-
- pub fn u_thread_setwait_events_ocall(
- result: *mut i32,
- error: *mut i32,
- wait_tcs: usize,
- self_tcs: usize,
- timeout: *const timespec,
- clockid: i32,
- absolute_time: i32,
- ) -> SgxStatus;
-}
-
pub fn thread_wait_event(tcs: TcsId, dur: Option<Duration>) -> OsResult {
let mut result: i32 = 0;
let mut error: i32 = 0;
@@ -407,3 +377,33 @@ pub fn thread_wait_event_ex(tcs: TcsId, timeout:
Option<Timeout>) -> OsResult {
ensure!(result == 0, error);
Ok(())
}
+
+extern "C" {
+ pub fn u_thread_wait_event_ocall(
+ result: *mut i32,
+ error: *mut i32,
+ tcs: usize,
+ timeout: *const timespec,
+ clockid: i32,
+ absolute_time: i32,
+ ) -> SgxStatus;
+
+ pub fn u_thread_set_event_ocall(result: *mut i32, error: *mut i32, tcs:
usize) -> SgxStatus;
+
+ pub fn u_thread_set_multiple_events_ocall(
+ result: *mut i32,
+ error: *mut i32,
+ tcss: *const usize,
+ total: usize,
+ ) -> SgxStatus;
+
+ pub fn u_thread_setwait_events_ocall(
+ result: *mut i32,
+ error: *mut i32,
+ wait_tcs: usize,
+ self_tcs: usize,
+ timeout: *const timespec,
+ clockid: i32,
+ absolute_time: i32,
+ ) -> SgxStatus;
+}
diff --git a/sgx_tstd/src/sync/rwlock/tests.rs
b/sgx_tstd/src/sync/rwlock/tests.rs
index d96db9c6..06025441 100644
--- a/sgx_tstd/src/sync/rwlock/tests.rs
+++ b/sgx_tstd/src/sync/rwlock/tests.rs
@@ -102,6 +102,7 @@ fn test_rw_arc_no_poison_rr() {
let lock = arc.read().unwrap();
assert_eq!(*lock, 1);
}
+
#[test_case]
fn test_rw_arc_no_poison_rw() {
let arc = Arc::new(RwLock::new(1));
diff --git a/sgx_tstd/src/sys/futex.rs b/sgx_tstd/src/sys/futex.rs
index da2a98a9..e1b86da5 100644
--- a/sgx_tstd/src/sys/futex.rs
+++ b/sgx_tstd/src/sys/futex.rs
@@ -51,11 +51,15 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32,
timeout: Option<Duration>) -
/// On some platforms, this always returns false.
pub fn futex_wake(futex: &AtomicU32) -> bool {
let futex = Futex::new(futex);
- futex.wake(1).is_ok()
+ match futex.wake(1) {
+ Ok(0) => false,
+ Ok(_) => true,
+ Err(_) => false,
+ }
}
/// Wake up all threads that are waiting on futex_wait on this futex.
-pub fn futex_wake_all(futex: &AtomicU32) -> bool {
+pub fn futex_wake_all(futex: &AtomicU32) {
let futex = Futex::new(futex);
- futex.wake(i32::MAX).is_ok()
+ let _ = futex.wake(i32::MAX);
}
diff --git a/sgx_tstd/src/sys_common/once/mod.rs
b/sgx_tstd/src/sys_common/once/mod.rs
index 4f42e257..94b3e65f 100644
--- a/sgx_tstd/src/sys_common/once/mod.rs
+++ b/sgx_tstd/src/sys_common/once/mod.rs
@@ -40,7 +40,8 @@
// All in all, this is instead implemented with atomics and lock-free
// operations! Whee!
-//mod futex;
-mod generic;
+mod futex;
+pub use futex::{Once, OnceState};
-pub use generic::{Once, OnceState};
+// mod generic;
+// pub use generic::{Once, OnceState};
diff --git a/sgx_tstd/src/sys_common/thread_parker/mod.rs
b/sgx_tstd/src/sys_common/thread_parker/mod.rs
index d7be5e8d..e910b3d9 100644
--- a/sgx_tstd/src/sys_common/thread_parker/mod.rs
+++ b/sgx_tstd/src/sys_common/thread_parker/mod.rs
@@ -15,7 +15,8 @@
// specific language governing permissions and limitations
// under the License..
-mod generic;
mod futex;
+pub use futex::Parker;
-pub use generic::Parker;
+// mod generic;
+// pub use generic::Parker;
diff --git a/sgx_urts/src/ocall/sync.rs b/sgx_urts/src/ocall/sync.rs
index a9c02302..5942868e 100644
--- a/sgx_urts/src/ocall/sync.rs
+++ b/sgx_urts/src/ocall/sync.rs
@@ -17,6 +17,7 @@
use crate::ocall::util::*;
use libc::{self, c_int, size_t, timespec};
+use std::collections::VecDeque;
use std::io::Error;
use std::slice;
use std::sync::atomic::{AtomicI32, Ordering};
@@ -35,10 +36,12 @@ impl SeEvent {
}
pub fn wait_timeout(&self, timeout: ×pec, clockid: c_int,
absolute_time: c_int) -> i32 {
- let (wait_op, clockid) = if absolute_time == 1 {
- (libc::FUTEX_WAIT_BITSET, clockid)
+ const FUTEX_BITSET_MATCH_ANY: u32 = 0xFFFF_FFFF;
+
+ let (wait_op, clockid, bitset) = if absolute_time == 1 {
+ (libc::FUTEX_WAIT_BITSET, clockid, FUTEX_BITSET_MATCH_ANY)
} else {
- (libc::FUTEX_WAIT, 0)
+ (libc::FUTEX_WAIT, 0, 0)
};
if self.0.fetch_add(-1, Ordering::SeqCst) == 0 {
@@ -50,19 +53,14 @@ impl SeEvent {
-1,
timeout as *const timespec,
0,
- 0,
+ bitset,
)
};
+ let _ = self
+ .0
+ .compare_exchange(-1, 0, Ordering::SeqCst, Ordering::SeqCst);
if ret < 0 {
- match Error::last_os_error().raw_os_error() {
- Some(e) if e == libc::ETIMEDOUT || e == libc::EAGAIN || e
== libc::EINTR => {
- let _ = self
- .0
- .compare_exchange(-1, 0, Ordering::SeqCst,
Ordering::SeqCst);
- return -1;
- }
- _ => (),
- };
+ return -1;
}
}
0
@@ -81,16 +79,11 @@ impl SeEvent {
0,
)
};
+ let _ = self
+ .0
+ .compare_exchange(-1, 0, Ordering::SeqCst, Ordering::SeqCst);
if ret < 0 {
- match Error::last_os_error().raw_os_error() {
- Some(e) if e == libc::EAGAIN || e == libc::EINTR => {
- let _ = self
- .0
- .compare_exchange(-1, 0, Ordering::SeqCst,
Ordering::SeqCst);
- return -1;
- }
- _ => (),
- };
+ return -1;
}
}
0
@@ -126,13 +119,13 @@ struct TcsEvent<'a> {
}
struct TcsEventCache<'a> {
- cache: Mutex<Vec<TcsEvent<'a>>>,
+ cache: Mutex<VecDeque<TcsEvent<'a>>>,
}
impl<'a> TcsEventCache<'a> {
fn new() -> TcsEventCache<'a> {
TcsEventCache {
- cache: Mutex::new(Vec::new()),
+ cache: Mutex::new(VecDeque::with_capacity(16)),
}
}
@@ -142,7 +135,7 @@ impl<'a> TcsEventCache<'a> {
Some(e) => e.event,
None => {
let event = Box::leak(Box::new(SeEvent::new()));
- cahce_guard.push(TcsEvent { tcs, event });
+ cahce_guard.push_back(TcsEvent { tcs, event });
event
}
}
@@ -217,7 +210,7 @@ pub unsafe extern "C" fn u_thread_set_multiple_events_ocall(
tcss: *const size_t,
total: size_t,
) -> c_int {
- if tcss.is_null() {
+ if tcss.is_null() || total == 0 {
set_error(error, libc::EINVAL);
return -1;
}
@@ -225,7 +218,7 @@ pub unsafe extern "C" fn u_thread_set_multiple_events_ocall(
let tcss_slice = slice::from_raw_parts(tcss, total);
let mut errno = 0;
let mut result = 0;
- for tcs in tcss_slice.iter() {
+ for tcs in tcss_slice.iter().filter(|&&tcs| tcs != 0) {
result = get_tcs_event(*tcs).wake();
if result != 0 {
errno = Error::last_os_error().raw_os_error().unwrap_or(0);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]