[PATCH 1/6] ACPI/OSL: Split memory operation region implementations to a seperate file.

2014-10-22 Thread Lv Zheng
This patch moves SystemMemory operation region implementations to a
seperate file before doing cleanups. No functional changes.

Signed-off-by: Lv Zheng 
Tested-by: Fei Yang 
---
 drivers/acpi/Makefile |2 +-
 drivers/acpi/mem.c|  395 +
 drivers/acpi/osl.c|  381 ---
 3 files changed, 396 insertions(+), 382 deletions(-)
 create mode 100644 drivers/acpi/mem.c

diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 505d4d7..802b887 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -18,7 +18,7 @@ obj-y += acpi.o \
acpica/
 
 # All the builtin files are in the "acpi." module_param namespace.
-acpi-y += osl.o utils.o reboot.o
+acpi-y += mem.o osl.o utils.o reboot.o
 acpi-y += nvs.o
 
 # Power management related files
diff --git a/drivers/acpi/mem.c b/drivers/acpi/mem.c
new file mode 100644
index 000..722241e
--- /dev/null
+++ b/drivers/acpi/mem.c
@@ -0,0 +1,395 @@
+/*
+ * ACPI system memory implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ *   Author: Lv Zheng 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "internal.h"
+
+
+/*
+ * This list of permanent mappings is for memory that may be accessed from
+ * interrupt context, where we can't do the ioremap().
+ */
+struct acpi_ioremap {
+   struct list_head list;
+   void __iomem *virt;
+   acpi_physical_address phys;
+   acpi_size size;
+   unsigned long refcount;
+};
+
+static LIST_HEAD(acpi_ioremaps);
+static DEFINE_MUTEX(acpi_ioremap_lock);
+
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
+{
+   if (!--map->refcount)
+   list_del_rcu(>list);
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup(acpi_physical_address phys, acpi_size size)
+{
+   struct acpi_ioremap *map;
+
+   list_for_each_entry_rcu(map, _ioremaps, list)
+   if (map->phys <= phys &&
+   phys + size <= map->phys + map->size)
+   return map;
+
+   return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
+{
+   struct acpi_ioremap *map;
+
+   list_for_each_entry_rcu(map, _ioremaps, list)
+   if (map->virt <= virt &&
+   virt + size <= map->virt + map->size)
+   return map;
+
+   return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static void __iomem *
+acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
+{
+   struct acpi_ioremap *map;
+
+   map = acpi_map_lookup(phys, size);
+   if (map)
+   return map->virt + (phys - map->phys);
+
+   return NULL;
+}
+
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn)   page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn)   0
+#endif
+
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long 
pg_sz)
+{
+   unsigned long pfn;
+
+   pfn = pg_off >> PAGE_SHIFT;
+   if (should_use_kmap(pfn)) {
+   if (pg_sz > PAGE_SIZE)
+   return NULL;
+   return (void __iomem __force *)kmap(pfn_to_page(pfn));
+   }
+   return acpi_os_ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
+{
+   unsigned long pfn;
+
+   pfn = pg_off >> PAGE_SHIFT;
+   if (should_use_kmap(pfn))
+   kunmap(pfn_to_page(pfn));
+   else
+   iounmap(vaddr);
+}
+
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+{
+   if (!map->refcount) {
+   synchronize_rcu();
+   acpi_unmap(map->phys, map->virt);
+   kfree(map);
+   }
+}
+
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+{
+   struct acpi_ioremap *map;
+   void __iomem *virt = NULL;
+
+   mutex_lock(_ioremap_lock);
+   map = acpi_map_lookup(phys, size);
+   if (map) {
+   virt = map->virt + (phys - map->phys);
+   map->refcount++;
+   }
+   mutex_unlock(_ioremap_lock);
+
+   return virt;
+}
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+
+void __iomem *__init_refok
+acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
+{
+   struct acpi_ioremap *map;
+   void __iomem *virt;
+   acpi_physical_address pg_off;
+   acpi_size pg_sz;
+
+   if (phys > 

[PATCH 1/6] ACPI/OSL: Split memory operation region implementations to a seperate file.

2014-10-22 Thread Lv Zheng
This patch moves SystemMemory operation region implementations to a
seperate file before doing cleanups. No functional changes.

Signed-off-by: Lv Zheng lv.zh...@intel.com
Tested-by: Fei Yang fei.y...@intel.com
---
 drivers/acpi/Makefile |2 +-
 drivers/acpi/mem.c|  395 +
 drivers/acpi/osl.c|  381 ---
 3 files changed, 396 insertions(+), 382 deletions(-)
 create mode 100644 drivers/acpi/mem.c

diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 505d4d7..802b887 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -18,7 +18,7 @@ obj-y += acpi.o \
acpica/
 
 # All the builtin files are in the acpi. module_param namespace.
-acpi-y += osl.o utils.o reboot.o
+acpi-y += mem.o osl.o utils.o reboot.o
 acpi-y += nvs.o
 
 # Power management related files
diff --git a/drivers/acpi/mem.c b/drivers/acpi/mem.c
new file mode 100644
index 000..722241e
--- /dev/null
+++ b/drivers/acpi/mem.c
@@ -0,0 +1,395 @@
+/*
+ * ACPI system memory implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ *   Author: Lv Zheng lv.zh...@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include linux/kernel.h
+#include linux/mm.h
+#include linux/highmem.h
+#include linux/io.h
+#include linux/acpi.h
+
+#include internal.h
+
+
+/*
+ * This list of permanent mappings is for memory that may be accessed from
+ * interrupt context, where we can't do the ioremap().
+ */
+struct acpi_ioremap {
+   struct list_head list;
+   void __iomem *virt;
+   acpi_physical_address phys;
+   acpi_size size;
+   unsigned long refcount;
+};
+
+static LIST_HEAD(acpi_ioremaps);
+static DEFINE_MUTEX(acpi_ioremap_lock);
+
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
+{
+   if (!--map-refcount)
+   list_del_rcu(map-list);
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup(acpi_physical_address phys, acpi_size size)
+{
+   struct acpi_ioremap *map;
+
+   list_for_each_entry_rcu(map, acpi_ioremaps, list)
+   if (map-phys = phys 
+   phys + size = map-phys + map-size)
+   return map;
+
+   return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
+{
+   struct acpi_ioremap *map;
+
+   list_for_each_entry_rcu(map, acpi_ioremaps, list)
+   if (map-virt = virt 
+   virt + size = map-virt + map-size)
+   return map;
+
+   return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static void __iomem *
+acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
+{
+   struct acpi_ioremap *map;
+
+   map = acpi_map_lookup(phys, size);
+   if (map)
+   return map-virt + (phys - map-phys);
+
+   return NULL;
+}
+
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn)   page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn)   0
+#endif
+
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long 
pg_sz)
+{
+   unsigned long pfn;
+
+   pfn = pg_off  PAGE_SHIFT;
+   if (should_use_kmap(pfn)) {
+   if (pg_sz  PAGE_SIZE)
+   return NULL;
+   return (void __iomem __force *)kmap(pfn_to_page(pfn));
+   }
+   return acpi_os_ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
+{
+   unsigned long pfn;
+
+   pfn = pg_off  PAGE_SHIFT;
+   if (should_use_kmap(pfn))
+   kunmap(pfn_to_page(pfn));
+   else
+   iounmap(vaddr);
+}
+
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+{
+   if (!map-refcount) {
+   synchronize_rcu();
+   acpi_unmap(map-phys, map-virt);
+   kfree(map);
+   }
+}
+
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
+{
+   struct acpi_ioremap *map;
+   void __iomem *virt = NULL;
+
+   mutex_lock(acpi_ioremap_lock);
+   map = acpi_map_lookup(phys, size);
+   if (map) {
+   virt = map-virt + (phys - map-phys);
+   map-refcount++;
+   }
+   mutex_unlock(acpi_ioremap_lock);
+
+   return virt;
+}
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
+
+void __iomem *__init_refok
+acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
+{
+   struct acpi_ioremap *map;
+   void