[
https://issues.apache.org/jira/browse/ARROW-186?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16214534#comment-16214534
]
ASF GitHub Bot commented on ARROW-186:
--
wesm closed pull request #98: ARROW-186 - Make sure alignment and memory
padding conform to spec
URL: https://github.com/apache/arrow/pull/98
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git
a/java/memory/src/main/java/io/netty/buffer/PooledByteBufAllocatorL.java
b/java/memory/src/main/java/io/netty/buffer/PooledByteBufAllocatorL.java
index b6de2e3aa..b22edcfbc 100644
--- a/java/memory/src/main/java/io/netty/buffer/PooledByteBufAllocatorL.java
+++ b/java/memory/src/main/java/io/netty/buffer/PooledByteBufAllocatorL.java
@@ -18,7 +18,9 @@
package io.netty.buffer;
+import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
+import io.netty.util.internal.SystemPropertyUtil;
import org.apache.arrow.memory.OutOfMemoryException;
@@ -37,7 +39,109 @@
private static final org.slf4j.Logger memoryLogger =
org.slf4j.LoggerFactory.getLogger("arrow" +
".allocator");
+ private static final int DEFAULT_NUM_HEAP_ARENA;
+ private static final int DEFAULT_NUM_DIRECT_ARENA;
+
+ private static final int DEFAULT_PAGE_SIZE;
+ private static final int DEFAULT_MAX_ORDER; // 8192 << 11 = 16 MiB per chunk
+ private static final int DEFAULT_TINY_CACHE_SIZE;
+ private static final int DEFAULT_SMALL_CACHE_SIZE;
+ private static final int DEFAULT_NORMAL_CACHE_SIZE;
+ private static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
+ private static final int DEFAULT_CACHE_TRIM_INTERVAL;
+ private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS;
+ private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = 64;
+
+ private static final int MIN_PAGE_SIZE = 4096;
+ private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE +
1) / 2);
+
+ static {
+ int defaultPageSize =
SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192);
+ Throwable pageSizeFallbackCause = null;
+ try {
+ validateAndCalculatePageShifts(defaultPageSize);
+ } catch (Throwable t) {
+ pageSizeFallbackCause = t;
+ defaultPageSize = 8192;
+ }
+ DEFAULT_PAGE_SIZE = defaultPageSize;
+
+ int defaultMaxOrder =
SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 11);
+ Throwable maxOrderFallbackCause = null;
+ try {
+ validateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
+ } catch (Throwable t) {
+ maxOrderFallbackCause = t;
+ defaultMaxOrder = 11;
+ }
+ DEFAULT_MAX_ORDER = defaultMaxOrder;
+
+ // Determine reasonable default for nHeapArena and nDirectArena.
+ // Assuming each arena has 3 chunks, the pool should not consume more
than 50% of max memory.
+ final Runtime runtime = Runtime.getRuntime();
+
+ // Use 2 * cores by default to reduce condition as we use 2 * cores for
the number of EventLoops
+ // in NIO and EPOLL as well. If we choose a smaller number we will run
into hotspots as allocation and
+ // deallocation needs to be synchronized on the PoolArena.
+ // See https://github.com/netty/netty/issues/3888
+ final int defaultMinNumArena = runtime.availableProcessors() * 2;
+ final int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
+ DEFAULT_NUM_HEAP_ARENA = Math.max(0,
+ SystemPropertyUtil.getInt(
+ "io.netty.allocator.numHeapArenas",
+ (int) Math.min(
+ defaultMinNumArena,
+ runtime.maxMemory() / defaultChunkSize / 2 /
3)));
+ DEFAULT_NUM_DIRECT_ARENA = Math.max(0,
+ SystemPropertyUtil.getInt(
+ "io.netty.allocator.numDirectArenas",
+ (int) Math.min(
+ defaultMinNumArena,
+ PlatformDependent.maxDirectMemory() /
defaultChunkSize / 2 / 3)));
+
+ // cache sizes
+ DEFAULT_TINY_CACHE_SIZE =
SystemPropertyUtil.getInt("io.netty.allocator.tinyCacheSize", 512);
+ DEFAULT_SMALL_CACHE_SIZE =
SystemPropertyUtil.getInt("io.netty.allocator.smallCacheSize", 256);
+ DEFAULT_NORMAL_CACHE_SIZE =
SystemPropertyUtil.getInt("io.netty.allocator.normalCacheSize", 64);
+
+ // 32 kb is the default maximum capacity of the cached buffer. Similar
to what is explained in
+ // 'Scalable memory allocation using jemalloc'
+ DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.getInt(
+ "io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);
+
+ // the