xiaoxiang781216 commented on code in PR #8026:
URL: https://github.com/apache/nuttx/pull/8026#discussion_r1061323025


##########
mm/map/mm_map.c:
##########
@@ -0,0 +1,337 @@
+/****************************************************************************
+ * mm/map/mm_map.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <nuttx/queue.h>
+#include <nuttx/sched.h>
+#include <nuttx/semaphore.h>
+#include <nuttx/mm/map.h>
+#include <nuttx/kmalloc.h>
+#include <sys/mman.h>
+#include <assert.h>
+
+#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+static bool vaddr_in_area(FAR const void *addr, FAR const void *start,
+                          size_t length)
+{
+  uintptr_t u_addr = (uintptr_t)addr;
+  uintptr_t u_start = (uintptr_t)start;
+  return (u_addr >= u_start) && (u_addr < u_start + length);
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: mm_map_initialize
+ *
+ * Description:
+ *   Allocates a task group specific mm_map stucture. Called when the group
+ *   is initialized
+ *
+ ****************************************************************************/
+
+void mm_map_initialize(FAR struct mm_map_s *mm)
+{
+  memset(mm, 0, sizeof(struct mm_map_s));
+  sq_init(&mm->mm_map_sq);
+  nxmutex_init(&mm->mm_map_mutex);
+}
+
+/****************************************************************************
+ * Name: mm_map_destroy
+ *
+ * Description:
+ *   De-allocates a task group specific mm_map stucture and the mm_map_mutex
+ *
+ ****************************************************************************/
+
+void mm_map_destroy(FAR struct mm_map_s *mm)
+{
+  FAR struct mm_map_entry_s *map;
+
+  while ((map = (FAR struct mm_map_entry_s *)sq_remfirst(&mm->mm_map_sq)))
+    {
+      /* Pass null as group argument to indicate that actual MMU mappings
+       * must not be touched. The process is being deleted and we don't
+       * know in which context we are. Only kernel memory allocations
+       * need to be freed by drivers
+       */
+
+      if (map->munmap)
+        {
+          /* Unmap the whole region */
+
+          if (map->munmap(NULL, map, (void *)map->vaddr, map->length) < 0)
+            {
+              /* This is a bug. A driver provides munmap, but can't munmap
+               * the region. Just delete the map entry.
+               */
+
+              kmm_free(map);
+            }
+        }
+      else
+        {
+          /* Mappping has been added, but there is no unmap. Just delete the
+           * map entry.
+           */
+
+          kmm_free(map);
+        }
+    }
+
+  nxmutex_destroy(&mm->mm_map_mutex);
+}
+
+/****************************************************************************
+ * Name: mm_map_add
+ *
+ * Description:
+ *   Add a mapping to task group's mm_map list
+ *
+ ****************************************************************************/
+
+int mm_map_add(FAR struct mm_map_entry_s *entry)
+{
+  FAR struct tcb_s *tcb = nxsched_self();
+  FAR struct task_group_s *group = tcb->group;
+  FAR struct mm_map_s *mm = &group->tg_mm_map;
+  FAR struct mm_map_entry_s *map = kmm_zalloc(sizeof(struct mm_map_entry_s));
+  int ret;
+
+  if (!entry)
+    {
+      return -EINVAL;
+    }
+
+  if (!map)
+    {
+      return -ENOMEM;
+    }
+
+  /* Copy the provided mapping and add to the list */
+
+  *map = *entry;
+
+  ret = nxmutex_lock(&mm->mm_map_mutex);
+  if (ret < 0)
+    {
+      kmm_free(map);
+      return ret;
+    }
+
+  sq_addfirst((sq_entry_t *)map, &mm->mm_map_sq);
+
+  nxmutex_unlock(&mm->mm_map_mutex);
+
+  return OK;
+}
+
+/****************************************************************************
+ * Name: mm_map_next
+ *
+ * Description:
+ *   Returns the next mapping in the list.
+ *
+ ****************************************************************************/
+
+FAR struct mm_map_entry_s *mm_map_next(
+                           FAR const struct mm_map_entry_s *entry)
+{
+  FAR struct tcb_s *tcb = nxsched_self();
+  FAR struct task_group_s *group = tcb->group;
+  FAR struct mm_map_s *mm = &group->tg_mm_map;
+  FAR struct mm_map_entry_s *map = NULL;
+
+  if (nxmutex_lock(&mm->mm_map_mutex) == OK)
+    {
+      if (entry == NULL)
+        {
+          map = (struct mm_map_entry_s *)sq_peek(&mm->mm_map_sq);
+        }
+      else
+        {
+          map = (struct mm_map_entry_s *)sq_next(((sq_entry_t *)entry));
+        }
+
+      nxmutex_unlock(&mm->mm_map_mutex);
+    }
+
+  return map;

Review Comment:
   > the lock tries to protect the sq modifications. In a multi-threaded 
application separate threads can do mmap/munmap simultanously.
   > 
   
   Yes, but it's unsafe to return the entry after unlocking. The caller of 
mm_map_find_contains which touch the returned value will suffer the race 
condition.
   
   > But it is right that since mm_map_remove takes a direct reference to map, 
it needs to be locked also above in some cases to protect the queue.
   > 
   > I'll fix the locking
   
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to