Introduce a section and a label for statically allocated write rare
data. The label is named "__wr_after_init".
As the name implies, after the init phase is completed, this section
will be modifiable only by invoking write rare functions.
The section must take up a set of full pages.

Signed-off-by: Igor Stoppa <igor.sto...@huawei.com>

CC: Andy Lutomirski <l...@amacapital.net>
CC: Nadav Amit <nadav.a...@gmail.com>
CC: Matthew Wilcox <wi...@infradead.org>
CC: Peter Zijlstra <pet...@infradead.org>
CC: Kees Cook <keesc...@chromium.org>
CC: Dave Hansen <dave.han...@linux.intel.com>
CC: linux-integr...@vger.kernel.org
CC: kernel-harden...@lists.openwall.com
CC: linux...@kvack.org
CC: linux-kernel@vger.kernel.org
---
 include/asm-generic/vmlinux.lds.h | 20 ++++++++++++++++++++
 include/linux/cache.h             | 17 +++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
index 3d7a6a9c2370..b711dbe6999f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -311,6 +311,25 @@
        KEEP(*(__jump_table))                                           \
        __stop___jump_table = .;
 
+/*
+ * Allow architectures to handle wr_after_init data on their
+ * own by defining an empty WR_AFTER_INIT_DATA.
+ * However, it's important that pages containing WR_RARE data do not
+ * hold anything else, to avoid both accidentally unprotecting something
+ * that is supposed to stay read-only all the time and also to protect
+ * something else that is supposed to be writeable all the time.
+ */
+#ifndef WR_AFTER_INIT_DATA
+#define WR_AFTER_INIT_DATA(align)                                      \
+       . = ALIGN(PAGE_SIZE);                                           \
+       __start_wr_after_init = .;                                      \
+       . = ALIGN(align);                                               \
+       *(.data..wr_after_init)                                         \
+       . = ALIGN(PAGE_SIZE);                                           \
+       __end_wr_after_init = .;                                        \
+       . = ALIGN(align);
+#endif
+
 /*
  * Allow architectures to handle ro_after_init data on their
  * own by defining an empty RO_AFTER_INIT_DATA.
@@ -332,6 +351,7 @@
                __start_rodata = .;                                     \
                *(.rodata) *(.rodata.*)                                 \
                RO_AFTER_INIT_DATA      /* Read only after init */      \
+               WR_AFTER_INIT_DATA(align) /* wr after init */   \
                KEEP(*(__vermagic))     /* Kernel version magic */      \
                . = ALIGN(8);                                           \
                __start___tracepoints_ptrs = .;                         \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..9a7e7134b887 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,6 +31,23 @@
 #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
 #endif
 
+/*
+ * __wr_after_init is used to mark objects that cannot be modified
+ * directly after init (i.e. after mark_rodata_ro() has been called).
+ * These objects become effectively read-only, from the perspective of
+ * performing a direct write, like a variable assignment.
+ * However, they can be altered through a dedicated function.
+ * It is intended for those objects which are occasionally modified after
+ * init, however they are modified so seldomly, that the extra cost from
+ * the indirect modification is either negligible or worth paying, for the
+ * sake of the protection gained.
+ */
+#ifndef __wr_after_init
+#define __wr_after_init \
+               __attribute__((__section__(".data..wr_after_init")))
+#endif
+
+
 #ifndef ____cacheline_aligned
 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
-- 
2.19.1

Reply via email to