This is an automated email from the ASF dual-hosted git repository.

acassis pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit d56e6de62832c7613d276155f08c8dc06e79d07e
Author: Tiago Medicci Serrano <tiago.medi...@espressif.com>
AuthorDate: Thu Mar 21 17:57:01 2024 -0300

    esp32: Update libc stubs to properly acquire/release locks.
    
    Avoid using static mutex and recursive mutex as the resource to be
    acquired/release. Instead, create a specific lock for each call if
    it does not exist.
---
 arch/xtensa/src/esp32/esp32_libc_stubs.c | 98 +++++++++++++++++++++++---------
 1 file changed, 70 insertions(+), 28 deletions(-)

diff --git a/arch/xtensa/src/esp32/esp32_libc_stubs.c 
b/arch/xtensa/src/esp32/esp32_libc_stubs.c
index 66f8047230..2cd2bf0695 100644
--- a/arch/xtensa/src/esp32/esp32_libc_stubs.c
+++ b/arch/xtensa/src/esp32/esp32_libc_stubs.c
@@ -38,18 +38,12 @@
 
 #include "rom/esp32_libc_stubs.h"
 
-/****************************************************************************
- * Pre-processor Definitions
- ****************************************************************************/
-
-#define _lock_t int
-
 /****************************************************************************
  * Private Types
  ****************************************************************************/
 
-static mutex_t g_nxlock_common;
-static mutex_t g_nxlock_recursive;
+static mutex_t g_nxlock_common    = NXMUTEX_INITIALIZER;
+static mutex_t g_nxlock_recursive = NXMUTEX_INITIALIZER;
 
 /* Forward declaration */
 
@@ -171,64 +165,112 @@ void _raise_r(struct _reent *r)
 
 void _lock_init(_lock_t *lock)
 {
-  nxmutex_init(&g_nxlock_common);
-  nxsem_get_value(&g_nxlock_common.sem, lock);
+  *lock = 0;
+
+  mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
+
+  nxmutex_init(mutex);
+
+  *lock = (_lock_t)mutex;
 }
 
 void _lock_init_recursive(_lock_t *lock)
 {
-  nxmutex_init(&g_nxlock_recursive);
-  nxsem_get_value(&g_nxlock_recursive.sem, lock);
+  *lock = 0;
+
+  rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
+
+  nxrmutex_init(rmutex);
+
+  *lock = (_lock_t)rmutex;
 }
 
 void _lock_close(_lock_t *lock)
 {
-  nxmutex_destroy(&g_nxlock_common);
+  mutex_t *mutex = (mutex_t *)(*lock);
+
+  nxmutex_destroy(mutex);
+  kmm_free((void *)(*lock));
   *lock = 0;
 }
 
 void _lock_close_recursive(_lock_t *lock)
 {
-  nxmutex_destroy(&g_nxlock_recursive);
+  rmutex_t *rmutex = (rmutex_t *)(*lock);
+
+  nxrmutex_destroy(rmutex);
+  kmm_free((void *)(*lock));
   *lock = 0;
 }
 
 void _lock_acquire(_lock_t *lock)
 {
-  nxmutex_lock(&g_nxlock_common);
-  nxsem_get_value(&g_nxlock_common.sem, lock);
+  if ((*lock) == 0)
+    {
+      mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
+
+      nxmutex_init(mutex);
+
+      *lock = (_lock_t)mutex;
+    }
+
+  nxmutex_lock((mutex_t *)(*lock));
 }
 
 void _lock_acquire_recursive(_lock_t *lock)
 {
-  nxmutex_lock(&g_nxlock_recursive);
-  nxsem_get_value(&g_nxlock_recursive.sem, lock);
+  if ((*lock) == 0)
+    {
+      rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
+
+      nxrmutex_init(rmutex);
+
+      *lock = (_lock_t)rmutex;
+    }
+
+  nxrmutex_lock((rmutex_t *)(*lock));
 }
 
 int _lock_try_acquire(_lock_t *lock)
 {
-  nxmutex_trylock(&g_nxlock_common);
-  nxsem_get_value(&g_nxlock_common.sem, lock);
-  return 0;
+  if ((*lock) == 0)
+    {
+      mutex_t *mutex = (mutex_t *)kmm_malloc(sizeof(mutex_t));
+
+      nxmutex_init(mutex);
+
+      *lock = (_lock_t)mutex;
+    }
+
+  return nxmutex_trylock((mutex_t *)(*lock));
 }
 
 int _lock_try_acquire_recursive(_lock_t *lock)
 {
-  nxmutex_trylock(&g_nxlock_recursive);
-  nxsem_get_value(&g_nxlock_recursive.sem, lock);
-  return 0;
+  if ((*lock) == 0)
+    {
+      rmutex_t *rmutex = (rmutex_t *)kmm_malloc(sizeof(rmutex_t));
+
+      nxrmutex_init(rmutex);
+
+      *lock = (_lock_t)rmutex;
+    }
+
+  return nxrmutex_trylock((rmutex_t *)(*lock));
 }
 
 void _lock_release(_lock_t *lock)
 {
-  nxmutex_unlock(&g_nxlock_common);
-  nxsem_get_value(&g_nxlock_common.sem, lock);
+  mutex_t *mutex = (mutex_t *)(*lock);
+
+  nxmutex_unlock(mutex);
 }
 
 void _lock_release_recursive(_lock_t *lock)
 {
-  nxmutex_unlock(&g_nxlock_recursive);
-  nxsem_get_value(&g_nxlock_recursive.sem, lock);
+  rmutex_t *rmutex = (rmutex_t *)(*lock);
+
+  nxrmutex_unlock(rmutex);
 }
 
 struct _reent *__getreent(void)

Reply via email to