I moved the code to 3 linux boxes and received the out of memory exception
there as well -
RAM - 8GB
The command line arg is java -Xms3096m -Xmx3096m -XX:+UseG1GC
-XX:+DisableExplicitGC -jar datagrid-1.0-SNAPSHOT.jar
Exception is -
Caused by: java.lang.OutOfMemoryError: Java heap space
at java.nio.HeapCharBuffer.<init>(HeapCharBuffer.java:57) ~[na:1.8.0_121]
at java.nio.CharBuffer.allocate(CharBuffer.java:335) ~[na:1.8.0_121]
at java.nio.charset.CharsetDecoder.decode(CharsetDecoder.java:795)
~[na:1.8.0_121]
at java.nio.charset.Charset.decode(Charset.java:807) ~[na:1.8.0_121]
at com.mysql.jdbc.StringUtils.toString(StringUtils.java:2253)
~[mysql-connector-java-5.1.43.jar!/:5.1.43]
at com.mysql.jdbc.ResultSetRow.getString(ResultSetRow.java:701)
~[mysql-connector-java-5.1.43.jar!/:5.1.43]
at com.mysql.jdbc.BufferRow.getString(BufferRow.java:527)
~[mysql-connector-java-5.1.43.jar!/:5.1.43]
at com.mysql.jdbc.ResultSetImpl.getStringInternal(ResultSetImpl.java:5252)
~[mysql-connector-java-5.1.43.jar!/:5.1.43]
at com.mysql.jdbc.ResultSetImpl.getString(ResultSetImpl.java:5135)
~[mysql-connector-java-5.1.43.jar!/:5.1.43]
at
org.springframework.jdbc.support.JdbcUtils.getResultSetValue(JdbcUtils.java:151)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.SingleColumnRowMapper.getColumnValue(SingleColumnRowMapper.java:130)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.SingleColumnRowMapper.mapRow(SingleColumnRowMapper.java:96)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.RowMapperResultSetExtractor.extractData(RowMapperResultSetExtractor.java:93)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.RowMapperResultSetExtractor.extractData(RowMapperResultSetExtractor.java:60)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.JdbcTemplate$1.doInPreparedStatement(JdbcTemplate.java:697)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.JdbcTemplate.execute(JdbcTemplate.java:633)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at org.springframework.jdbc.core.JdbcTemplate.query(JdbcTemplate.java:684)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at org.springframework.jdbc.core.JdbcTemplate.query(JdbcTemplate.java:716)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at org.springframework.jdbc.core.JdbcTemplate.query(JdbcTemplate.java:726)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.JdbcTemplate.queryForObject(JdbcTemplate.java:794)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.jdbc.core.JdbcTemplate.queryForObject(JdbcTemplate.java:813)
~[spring-jdbc-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at com.test.some.datagrid.db.DataLoader.load(DataLoader.java:35)
~[classes!/:1.0-SNAPSHOT]
at
com.test.some.datagrid.config.GridConfiguration.lambda$initGrid$0(GridConfiguration.java:112)
~[classes!/:1.0-SNAPSHOT]
at
com.test.some.datagrid.config.GridConfiguration$$Lambda$11/355518265.accept(Unknown
Source) ~[na:na]
at java.util.ArrayList.forEach(ArrayList.java:1249) ~[na:1.8.0_121]
at
com.test.some.datagrid.config.GridConfiguration.initGrid(GridConfiguration.java:112)
~[classes!/:1.0-SNAPSHOT]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
~[na:1.8.0_121]
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
~[na:1.8.0_121]
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
~[na:1.8.0_121]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_121]
at
org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleElement.invoke(InitDestroyAnnotationBeanPostProcessor.java:366)
~[spring-beans-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
at
org.springframework.beans.factory.annotation.InitDestroyAnnotationBeanPostProcessor$LifecycleMetadata.invokeInitMethods(InitDestroyAnnotationBeanPostProcessor.java:311)
~[spring-beans-4.3.10.RELEASE.jar!/:4.3.10.RELEASE]
Full configuration class
@Configuration
public class GridConfiguration {
@Autowired
private DataSource dataSource;
@Autowired
private DataLoader dataLoader;
@Value("${grid.cache.warmup.startup}")
private boolean warmupGrid;
private static final String DEFAULT_MEMORY_POLICY =
"default_memory_policy";
private static final String CACHE_NAME = "docketCache";
private static final String GRID_INSTANCE_NAME = "grid";
//disable backups because the cluster is anyways HA, even if one node
fails, the re-balancing feature
//should take care of distributing fallen node data to other nodes.
Memory space is saved with no backups
private static final int NUM_OF_BACKUPS = 0;
private static final long STARTUP_OFFHEAP_MEMORY_SIZE = 314572800L;
private static final long METRICS_PRINT_FREQUENCY = 60000; //1 minute
@Bean
public Ignite igniteInstance() {
//Database based discovery of other nodes. Prevents IP from being
hardcoded in each instance
TcpDiscoverySpi spi = new TcpDiscoverySpi()
.setIpFinder(new TcpDiscoveryJdbcIpFinder()
.setDataSource(dataSource)
.setShared(true));
CacheConfiguration<String, String> cacheConfiguration = new
CacheConfiguration<>();
cacheConfiguration.setName(CACHE_NAME)
.setAtomicityMode(CacheAtomicityMode.ATOMIC)
.setBackups(NUM_OF_BACKUPS)
.setCacheMode(CacheMode.PARTITIONED)
// https://apacheignite.readme.io/docs/performance-tips
//JCache standard requires cache providers to support
store-by-value semantics
// which means that when you read a value from the cache,
you don't get the reference
// to the object that is actually stored, but rather a copy
of this object.
// Ignite behaves this way by default, but it's possible to
override this
//behavior via the CacheConfiguration. copyOnRead
configuration property:
.setCopyOnRead(false)
.setMemoryPolicyName(DEFAULT_MEMORY_POLICY)
.setStatisticsEnabled(true)
.setWriteBehindEnabled(true)
.setWriteThrough(true)
.setReadThrough(true)
.setCacheStoreFactory(FactoryBuilder.factoryOf(dataLoader));
MemoryPolicyConfiguration memoryPolicyConfiguration = new
MemoryPolicyConfiguration()
.setName(DEFAULT_MEMORY_POLICY)
.setInitialSize(STARTUP_OFFHEAP_MEMORY_SIZE)
.setPageEvictionMode(DataPageEvictionMode.RANDOM_2_LRU);
MemoryConfiguration memCfg = new MemoryConfiguration()
.setDefaultMemoryPolicyName(DEFAULT_MEMORY_POLICY)
.setMemoryPolicies(memoryPolicyConfiguration);
IgniteConfiguration cfg = new IgniteConfiguration();
cfg.setIgniteInstanceName(GRID_INSTANCE_NAME)
.setDiscoverySpi(spi)
.setCacheConfiguration(cacheConfiguration)
.setMemoryConfiguration(memCfg)
.setMetricsLogFrequency(METRICS_PRINT_FREQUENCY);
return Ignition.start(cfg);
}
@PostConstruct
public void initGrid() {
if (warmupGrid) {
try (IgniteDataStreamer<String, String> streamer =
igniteInstance().dataStreamer(CACHE_NAME)) {
List<String> gridKeys = dataLoader.getAllKeys();
gridKeys.forEach(e -> streamer.addData(e,
dataLoader.load(e)));
}
}
}
}
Data loader class:
@Component
public class DataLoader extends CacheStoreAdapter<String, String>
implements Serializable {
@Autowired
private transient JdbcTemplate jdbcTemplate;
@Value("${load.one.query}")
private String loadOneQuery;
@Value("${get.all.keys.query}")
private String getAllKeysQuery;
@Value("${store.one.query}")
private String storeOneQuery;
@Override
public String load(String key) {
log.info("Loading key {}", key);
try {
return jdbcTemplate.queryForObject(loadOneQuery, new
Object[]{key}, String.class);
} catch (EmptyResultDataAccessException e) {
//It is normal to not find data in grid
log.info("No grid data found for key {}", key);
}
return null;
}
@Override
public void delete(Object key) {
//coming soon...
}
@Override
public void write(Cache.Entry<? extends String, ? extends String>
entry) {
log.info("Writing key {}", entry.getKey());
jdbcTemplate.update(storeOneQuery, entry.getKey(),
entry.getValue());
}
public List<String> getAllKeys() {
log.info("Getting all keys to load");
return jdbcTemplate.queryForList(getAllKeysQuery, String.class);
}
}
Thanks,
Yasser
On Wed, Aug 2, 2017 at 9:08 AM, slava.koptilin <[email protected]>
wrote:
> Hi Yasser,
>
> 2) Could you please provide full Ignite and Cache configurations?
> It would be nice to see JVM options (Xms, Xmx etc) that you are using as
> well.
>
> Thanks.
>
>
>
>
> --
> View this message in context: http://apache-ignite-users.
> 70518.x6.nabble.com/Off-Heap-and-Eviction-policy-questions-
> tp15869p15904.html
> Sent from the Apache Ignite Users mailing list archive at Nabble.com.
>