Hey,
this is my first post to the -hackers lists, so be merciful ;-)
I created a patch which implements MAP_HUGETLB for sysv shared memory segments
(PGSharedMemoryCreate). It is based on tests of Tom Lane and Andres Freund, I
added error handling, huge page size detection and a GUC variable.
Performance improvements differ from about 1% in the worst case to about 13% in
the best case. Benchmarking results are as follows:
pgbench -i -s 100 test
Patched:
pgbench -n -S -j 64 -c 64 -T 10 -M prepared test
tps avg: 51879.2
Unpatched:
pgbench -n -S -j 64 -c 64 -T 10 -M prepared test
tps avg: 45321.6
tps increase: 6557.6, 12.6%
Patched:
pgbench -n -S -j 64 -c 64 -T 180 -M prepared test (patched)
number of transactions actually processed: 8767510
tps = 48705.159196 (including connections establishing)
tps = 48749.761241 (excluding connections establishing)
Unpatched:
mit pgbench -n -S -j 64 -c 64 -T 120 -M prepared test (unpatched)
number of transactions actually processed: 8295439
tps = 46083.559187 (including connections establishing)
tps = 46097.763939 (excluding connections establishing)
tps diff: 2652, 5%
create table large (a int, b int);
insert into large (a, b) select s, s + 10 from generate_series(1, 10000000) s;
5 times executed, with \timing on:
SELECT sum(a), sum(b) from large;
Time: 1143.880 ms unpatched
Time: 1125.644 ms patched
about 1% difference
The patch ist attached. Any comments?
Greetings,
CK
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index b4fcbaf..66ed10f 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -1049,6 +1049,37 @@ include 'filename'
</listitem>
</varlistentry>
+ <varlistentry id="guc-huge-tlb-pages" xreflabel="huge_tlb_pages">
+ <term><varname>huge_tlb_pages</varname> (<type>enum</type>)</term>
+ <indexterm>
+ <primary><varname>huge_tlb_pages</> configuration parameter</primary>
+ </indexterm>
+ <listitem>
+ <para>
+ Enables/disables the use of huge tlb pages. Valid values are
+ <literal>on</literal>, <literal>off</literal> and <literal>try</literal>.
+ The default value is <literal>try</literal>.
+ </para>
+
+ <para>
+ With <varname>huge_tlb_pages</varname> set to <literal>on</literal>
+ <symbol>mmap()</symbol> will be called with <symbol>MAP_HUGETLB</symbol>.
+ If the call fails the server will fail fatally.
+ </para>
+
+ <para>
+ With <varname>huge_tlb_pages</varname> set to <literal>off</literal> we
+ will not use <symbol>MAP_HUGETLB</symbol> at all.
+ </para>
+
+ <para>
+ With <varname>huge_tlb_pages</varname> set to <literal>try</literal>
+ we will try to use <symbol>MAP_HUGETLB</symbol> and fall back to
+ <symbol>mmap()</symbol> without <symbol>MAP_HUGETLB</symbol>.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="guc-temp-buffers" xreflabel="temp_buffers">
<term><varname>temp_buffers</varname> (<type>integer</type>)</term>
<indexterm>
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index df06312..f5d212f 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -27,6 +27,9 @@
#ifdef HAVE_SYS_SHM_H
#include <sys/shm.h>
#endif
+#ifdef MAP_HUGETLB
+#include <dirent.h>
+#endif
#include "miscadmin.h"
#include "storage/ipc.h"
@@ -67,6 +70,12 @@ void *UsedShmemSegAddr = NULL;
static Size AnonymousShmemSize;
static void *AnonymousShmem;
+#ifdef MAP_HUGETLB
+HugeTlbType huge_tlb_pages = HUGE_TLB_TRY;
+#else
+HugeTlbType huge_tlb_pages = HUGE_TLB_OFF;
+#endif
+
static void *InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size);
static void IpcMemoryDetach(int status, Datum shmaddr);
static void IpcMemoryDelete(int status, Datum shmId);
@@ -342,6 +351,140 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
}
+#ifdef MAP_HUGETLB
+#define HUGE_PAGE_INFO_DIR "/sys/kernel/mm/hugepages"
+
+/*
+ * static long InternalGetFreeHugepagesCount(const char *name)
+ *
+ * Attempt to read the number of available hugepages from
+ * /sys/kernel/mm/hugepages/hugepages-<size>/free_hugepages
+ * Will fail (return -1) if file could not be opened, 0 if no pages are available
+ * and > 0 if there are free pages
+ *
+ */
+static long
+InternalGetFreeHugepagesCount(const char *name)
+{
+ int fd;
+ char buff[1024];
+ size_t len;
+ long result;
+ char *ptr;
+
+ len = snprintf(buff, 1024, "%s/%s/free_hugepages", HUGE_PAGE_INFO_DIR, name);
+ if (len == 1024) /* I don't think that this will happen ever */
+ {
+ elog(LOG, "Filename %s/%s/free_hugepages is too long!", HUGE_PAGE_INFO_DIR, name);
+ return -1;
+ }
+
+ fd = open(buff, O_RDONLY);
+ if (fd <= 0)
+ {
+ elog(LOG, "open(%s) failed: %s", buff, strerror(errno));
+ return -1;
+ }
+
+ len = read(fd, buff, 1024);
+ if (len <= 0)
+ {
+ elog(LOG, "Error reading file %s: %s\n", buff, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ /*
+ * If the content of free_hugepages is longer than or equal to 1024 bytes
+ * the rest is irrelevant; we simply want to know if there are any
+ * hugepages left
+ */
+ if (len == 1024)
+ {
+ buff[1023] = 0;
+ }
+ else
+ {
+ buff[len] = 0;
+ }
+
+ close(fd);
+
+ result = strtol(buff, &ptr, 10);
+
+ if (ptr == NULL)
+ {
+ elog(LOG, "Could not convert %s/%s/free_hugepages content to number!", HUGE_PAGE_INFO_DIR, name);
+ return -1;
+ }
+
+ return result;
+}
+
+/*
+ * static long InternalGetHugepageSize()
+ *
+ * Attempt to get a valid hugepage size from /sys/kernel/mm/hugepages/ by
+ * reading directory contents
+ * Will fail (return -1) if the directory could not be opened or no valid
+ * page sizes are available. Will return the biggest hugepage size on
+ * success.
+ *
+ */
+long
+InternalGetHugepageSize()
+{
+ struct dirent *ent;
+ DIR *dir = opendir(HUGE_PAGE_INFO_DIR);
+ long biggest_size = -1, size;
+ char *ptr;
+
+ if (dir == NULL)
+ {
+ elog(LOG, "Error opening directory %s: %s\n", HUGE_PAGE_INFO_DIR, strerror(errno));
+ return -1;
+ }
+
+ /*
+ * Linux supports multiple hugepage sizes if the hardware
+ * supports it; for each possible size there will be a
+ * directory in /sys/kernel/mm/hugepages consisting of the
+ * string hugepages- and the size of the page, e.g. on x86_64:
+ * hugepages-2048kB
+ */
+ while((ent = readdir(dir)) != NULL)
+ {
+ if (strncmp(ent->d_name, "hugepages-", 10) == 0)
+ {
+ size = strtol(ent->d_name + 10, &ptr, 10);
+ if (ptr == NULL)
+ {
+ continue;
+ }
+
+ if (strcmp(ptr, "kB") == 0)
+ {
+ size *= 1024;
+ }
+
+ if (biggest_size < size && InternalGetFreeHugepagesCount(ent->d_name) > 0)
+ {
+ biggest_size = size;
+ }
+ }
+ }
+
+ closedir(dir);
+
+ if (biggest_size == -1)
+ {
+ elog(LOG, "Could not find a valid hugepage size");
+ }
+
+ return biggest_size;
+}
+#endif
+
/*
* PGSharedMemoryCreate
*
@@ -391,7 +534,14 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
*/
#ifndef EXEC_BACKEND
{
+#ifdef MAP_HUGETLB
+ long pagesize = InternalGetHugepageSize();
+
+ if (pagesize <= 0)
+ pagesize = sysconf(_SC_PAGE_SIZE);
+#else
long pagesize = sysconf(_SC_PAGE_SIZE);
+#endif
/*
* Ensure request size is a multiple of pagesize.
@@ -410,8 +560,24 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
* to be false, we might need to add a run-time test here and do this
* only if the running kernel supports it.
*/
+
+#ifdef MAP_HUGETLB
+ if (huge_tlb_pages == HUGE_TLB_ON || huge_tlb_pages == HUGE_TLB_TRY)
+ {
+ AnonymousShmem = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ PG_MMAP_FLAGS|MAP_HUGETLB, -1, 0);
+
+ elog(DEBUG3, "mmap() tried with MAP_HUGEPAGE: %p", AnonymousShmem);
+
+ if (AnonymousShmem == MAP_FAILED && huge_tlb_pages == HUGE_TLB_TRY)
+ AnonymousShmem = mmap(NULL, size, PROT_READ|PROT_WRITE, PG_MMAP_FLAGS,
+ -1, 0);
+ }
+#else
AnonymousShmem = mmap(NULL, size, PROT_READ|PROT_WRITE, PG_MMAP_FLAGS,
-1, 0);
+#endif
+
if (AnonymousShmem == MAP_FAILED)
ereport(FATAL,
(errmsg("could not map anonymous shared memory: %m"),
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 745e7be..0ed0791 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -22,6 +22,7 @@
#include <limits.h>
#include <unistd.h>
#include <sys/stat.h>
+#include <sys/mman.h>
#ifdef HAVE_SYSLOG
#include <syslog.h>
#endif
@@ -389,6 +390,22 @@ static const struct config_enum_entry synchronous_commit_options[] = {
};
/*
+ * huge_tlb_pages may be on|off|try, where try is the default
+ * on: try to mmap() with MAP_HUGETLB and fail when mmap() fails
+ * off: do not try tp mmap() with MAP_HUGETLB
+ * try: try to mmap() with MAP_HUGETLB and fallback to mmap()
+ * w/o MAP_HUGETLB
+ */
+static const struct config_enum_entry huge_tlb_options[] = {
+#ifdef MAP_HUGETLB
+ {"on", HUGE_TLB_ON, false},
+ {"try", HUGE_TLB_TRY, false},
+#endif
+ {"off", HUGE_TLB_OFF, false},
+ {NULL, 0, false}
+};
+
+/*
* Options for enum values stored in other modules
*/
extern const struct config_enum_entry wal_level_options[];
@@ -3301,6 +3318,17 @@ static struct config_enum ConfigureNamesEnum[] =
NULL, NULL, NULL
},
+#ifdef MAP_HUGETLB
+ {
+ {"huge_tlb_pages", PGC_SUSET, RESOURCES_MEM,
+ gettext_noop("Enable/disable the use of the hugepages feature"),
+ NULL
+ },
+ &huge_tlb_pages,
+ HUGE_TLB_TRY, huge_tlb_options,
+ NULL, NULL, NULL
+ },
+#endif
/* End-of-list marker */
{
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index eeb9b82..e5bafec 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -113,6 +113,7 @@
#shared_buffers = 32MB # min 128kB
# (change requires restart)
+#huge_tlb_pages = try # try to map memory with MAP_HUGETLB (on, off, try)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
index bcaa42d..07743f4 100644
--- a/src/include/port/linux.h
+++ b/src/include/port/linux.h
@@ -20,3 +20,18 @@
* filesystems, because those don't support O_DIRECT.
*/
#define PLATFORM_DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+
+/*
+ * Possible values for huge_tlb_pages; default is HUGE_TLB_TRY
+ */
+typedef enum
+{
+ HUGE_TLB_OFF,
+ HUGE_TLB_ON,
+ HUGE_TLB_TRY
+} HugeTlbType;
+
+/*
+ * configure the use of huge TLB pages
+ */
+extern HugeTlbType huge_tlb_pages;
--
Sent via pgsql-hackers mailing list ([email protected])
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers