Author: ivol37 at gmail.com
Date: Fri Jan 7 17:12:02 2011
New Revision: 580
Log:
[AMDATU-243] Made replication factor, read and write consistency levels
configurable.
Modified:
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/CassandraConfigurationService.java
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraConfigurationServiceImpl.java
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonServiceImpl.java
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/osgi/Activator.java
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerFactoryImpl.java
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerImpl.java
trunk/amdatu-core/config-filebased/src/main/resources/conf/org.amdatu.core.cassandra.application.cfg
trunk/pom.xml
Modified:
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/CassandraConfigurationService.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/CassandraConfigurationService.java
(original)
+++
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/CassandraConfigurationService.java
Fri Jan 7 17:12:02 2011
@@ -16,6 +16,8 @@
*/
package org.amdatu.cassandra.application;
+import org.apache.cassandra.thrift.ConsistencyLevel;
+
/**
* Interface for the Cassandra Configuration Service.
*
@@ -43,8 +45,60 @@
public static final String CONFIG_WORKDIR = "workdir";
/**
+ * Configuration key for the default replication factor
+ */
+ public static final String DEFAULT_REPLICATION_FACTOR =
"default_replication_factor";
+
+ /**
+ * Configuration key for the read consistency level
+ */
+ public static final String READ_CONSISTENCY_LEVEL =
"consistency_level_read";
+
+ /**
+ * Configuration key for the write consistency level
+ */
+ public static final String WRITE_CONSISTENCY_LEVEL =
"consistency_level_write";
+
+ /**
* Configuration key that stores a list of IP addresses that are part of
this cluster. The IP addresses
* are stored comma separated in ConfigurationAdmin.
*/
public static final String SEEDS = "seeds";
+
+ /**
+ * Returns the default replication factor for new keyspaces. The
replication factor determines
+ * the amount of nodes on which data is replicated. So if
default_replication_factor
+ * equals 1 and you have a two-node cluster, it is very likely that you
will get timeouts
+ * as it will try to read data only stored on the other node (data is
distributed among
+ * the two nodes). In case all data should be available on all nodes in
the cluster, which
+ * is necessary in case the node should also be able to run stand-alone,
the replication
+ * factor should equal the number of nodes in the cluster.
+ *
+ * @return The default replication factor
+ */
+ int getDefaultReplicationFactor();
+
+ /**
+ * Returns the read consistency level to apply. The read consistencly
level determines the
+ * amount of nodes in the cluster that must reply on a request for
providing the latest
+ * version of some row before the result is returned. Note that if the
read consistency
+ * level is ALL, all nodes in the cluster are effectively down as soon as
one node in
+ * the cluster goes down, as cassandra will block until it received an
answer from
+ * this (unavailable) node before returning the result.
+ *
+ * @return The read consistency level
+ */
+ ConsistencyLevel getReadConsistencyLevel();
+
+ /**
+ * Returns the write consistency level to apply. The write consistencly
level determines the
+ * amount of nodes in the cluster that should have received the write
request operation
+ * before the call is returned. Note that if the write consistency level
is ALL, all
+ * nodes in the cluster are effectively down as soon as one node in the
cluster goes
+ * down, as cassandra will block each write operation until this
(unavailble) node
+ * answered.
+ *
+ * @return The write consistency level
+ */
+ ConsistencyLevel getWriteConsistencyLevel();
}
Modified:
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraConfigurationServiceImpl.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraConfigurationServiceImpl.java
(original)
+++
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraConfigurationServiceImpl.java
Fri Jan 7 17:12:02 2011
@@ -24,6 +24,7 @@
import org.amdatu.cassandra.application.CassandraConfigurationService;
import org.amdatu.core.config.templates.ConfigTemplateCallbackHandler;
import org.amdatu.core.config.templates.ConfigTemplateManager;
+import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.log4j.PropertyConfigurator;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
@@ -49,6 +50,9 @@
// Private members
private File m_workDir;
+ private int m_defaultReplicationFactor;
+ private ConsistencyLevel m_readConsistencyLevel;
+ private ConsistencyLevel m_writeConsistencyLevel;
/**
* The init() method is invoked by the Felix dependency manager. It allows
us to initialize our service. In this
@@ -102,6 +106,9 @@
}
File workBaseDir = new File(System.getProperty("user.dir"),
"work");
m_workDir = new File(workBaseDir, (String)
dictionary.get(CONFIG_WORKDIR));
+ m_defaultReplicationFactor =
Integer.parseInt(dictionary.get(DEFAULT_REPLICATION_FACTOR).toString());
+ m_readConsistencyLevel =
ConsistencyLevel.valueOf(dictionary.get(READ_CONSISTENCY_LEVEL).toString());
+ m_writeConsistencyLevel =
ConsistencyLevel.valueOf(dictionary.get(WRITE_CONSISTENCY_LEVEL).toString());
}
}
@@ -115,11 +122,23 @@
result += EOL + "- " + seed;
}
return result;
- } else {
+ }
+ else {
// Return the value as-is
return configValue.toString();
}
}
+ }
+
+ public int getDefaultReplicationFactor() {
+ return m_defaultReplicationFactor;
+ }
+
+ public ConsistencyLevel getReadConsistencyLevel() {
+ return m_readConsistencyLevel;
+ }
+ public ConsistencyLevel getWriteConsistencyLevel() {
+ return m_writeConsistencyLevel;
}
}
Modified:
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonServiceImpl.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonServiceImpl.java
(original)
+++
trunk/amdatu-cassandra/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonServiceImpl.java
Fri Jan 7 17:12:02 2011
@@ -21,6 +21,7 @@
import java.util.List;
import java.util.Map;
+import org.amdatu.cassandra.application.CassandraConfigurationService;
import org.amdatu.cassandra.application.CassandraDaemonService;
import org.apache.cassandra.avro.CassandraDaemon;
import org.apache.cassandra.db.Table;
@@ -42,18 +43,18 @@
public class CassandraDaemonServiceImpl implements CassandraDaemonService {
// The default placement strategy
private final String DEFAULT_PLACEMENT_STRATEGY =
"org.apache.cassandra.locator.SimpleStrategy";
- private final int DEFAULT_REPLICATION_FACTOR = 1;
// The amount of milliseconds to wait after starting the Cassandra daemon.
This gives the Cassandra the
// time to synchronize its data with other nodes in the cluster and/or
perform any local migrations.
// If we wouldn't wait, this node will start adding keyspaces and
columnfamilies while it is also
// synchronizing the same keyspaces and columnfamily's with other nodes in
the cluster. This significantly
// increases the risk of synchronization errors and so we just wait for
this amount of time before continuing.
- private final int CASSANDRA_HALT_TIMEOUT = 5000;
+ private final static int CASSANDRA_HALT_TIMEOUT = 5000;
// Service dependencies, injected by the framework
private volatile LogService m_logService;
private volatile EventAdmin m_eventAdmin;
+ private volatile CassandraConfigurationService m_configuration = null;
// The CassandraDaemon cannot be stopped/started without stopping and
updating (to enforce classloader
// to be destroyed) this bundle. For that reason we block any attempts to
stop/start this service since
@@ -76,7 +77,6 @@
try {
// Setup the cassandra daemon
m_daemon = new CassandraDaemon();
-
m_logService.log(LogService.LOG_INFO, getClass().getName() + "
service started.");
}
catch (Throwable t) {
@@ -100,7 +100,10 @@
catch (InterruptedException e) {
}
- m_logService.log(LogService.LOG_INFO, "Cassandra Daemon started.");
+ m_logService.log(LogService.LOG_INFO, "Cassandra Daemon started with
configuration: ");
+ m_logService.log(LogService.LOG_INFO, "> Default replication factor: "
+ m_configuration.getDefaultReplicationFactor());
+ m_logService.log(LogService.LOG_INFO, "> Read consistency level: " +
m_configuration.getReadConsistencyLevel());
+ m_logService.log(LogService.LOG_INFO, "> Write consistency level: " +
m_configuration.getWriteConsistencyLevel());
}
public void stop() {
@@ -141,7 +144,7 @@
public synchronized boolean addKeyspace(String name) throws
InvalidRequestException, TException {
if (!keyspaceExists(name)) {
List<CfDef> empty = new ArrayList<CfDef>();
- KsDef ksDef = new KsDef(name, DEFAULT_PLACEMENT_STRATEGY,
DEFAULT_REPLICATION_FACTOR, empty);
+ KsDef ksDef = new KsDef(name, DEFAULT_PLACEMENT_STRATEGY,
m_configuration.getDefaultReplicationFactor(), empty);
try {
m_cassandraServer.system_add_keyspace(ksDef);
} catch (InvalidRequestException e) {
Modified:
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/osgi/Activator.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/osgi/Activator.java
(original)
+++
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/osgi/Activator.java
Fri Jan 7 17:12:02 2011
@@ -16,6 +16,7 @@
*/
package org.amdatu.cassandra.persistencemanager.osgi;
+import org.amdatu.cassandra.application.CassandraConfigurationService;
import org.amdatu.cassandra.application.CassandraDaemonService;
import
org.amdatu.cassandra.persistencemanager.CassandraPersistenceManagerFactory;
import
org.amdatu.cassandra.persistencemanager.service.CassandraPersistenceManagerFactoryImpl;
@@ -29,11 +30,12 @@
public void init(BundleContext context, DependencyManager manager) throws
Exception {
// Create and register the tenant unaware persistence manager
manager.add(
- createComponent()
-
.setInterface(CassandraPersistenceManagerFactory.class.getName(), null)
-
.setImplementation(CassandraPersistenceManagerFactoryImpl.class)
-
.add(createServiceDependency().setService(LogService.class).setRequired(true))
-
.add(createServiceDependency().setService(CassandraDaemonService.class).setRequired(true)));
+ createComponent()
+ .setInterface(CassandraPersistenceManagerFactory.class.getName(),
null)
+ .setImplementation(CassandraPersistenceManagerFactoryImpl.class)
+
.add(createServiceDependency().setService(LogService.class).setRequired(true))
+
.add(createServiceDependency().setService(CassandraConfigurationService.class).setRequired(true))
+
.add(createServiceDependency().setService(CassandraDaemonService.class).setRequired(true)));
}
@Override
Modified:
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerFactoryImpl.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerFactoryImpl.java
(original)
+++
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerFactoryImpl.java
Fri Jan 7 17:12:02 2011
@@ -19,6 +19,7 @@
import java.util.Dictionary;
import java.util.Hashtable;
+import org.amdatu.cassandra.application.CassandraConfigurationService;
import org.amdatu.cassandra.application.CassandraDaemonService;
import org.amdatu.cassandra.persistencemanager.CassandraPersistenceManager;
import
org.amdatu.cassandra.persistencemanager.CassandraPersistenceManagerFactory;
@@ -43,6 +44,7 @@
component.setImplementation(CassandraPersistenceManagerImpl.class);
component.setInterface(CassandraPersistenceManager.class.getName(),
serviceProperties);
component.add(m_dependencyManager.createServiceDependency().setService(LogService.class).setRequired(true));
+
component.add(m_dependencyManager.createServiceDependency().setService(CassandraConfigurationService.class).setRequired(true));
component.add(m_dependencyManager.createServiceDependency().setService(CassandraDaemonService.class)
.setRequired(true));
Modified:
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerImpl.java
==============================================================================
---
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerImpl.java
(original)
+++
trunk/amdatu-cassandra/cassandra-persistencemanager/src/main/java/org/amdatu/cassandra/persistencemanager/service/CassandraPersistenceManagerImpl.java
Fri Jan 7 17:12:02 2011
@@ -27,6 +27,7 @@
import java.util.Set;
import java.util.Map.Entry;
+import org.amdatu.cassandra.application.CassandraConfigurationService;
import org.amdatu.cassandra.application.CassandraDaemonService;
import org.amdatu.cassandra.persistencemanager.CassandraException;
import org.amdatu.cassandra.persistencemanager.CassandraPersistenceManager;
@@ -34,7 +35,6 @@
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.ColumnPath;
-import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.KeySlice;
import org.apache.cassandra.thrift.NotFoundException;
@@ -65,14 +65,6 @@
// Empty byte array
private final static ByteBuffer EMPTY = ByteBuffer.wrap(new byte[0]);
- // The consistency level to use for READ operations. Note that if we
define QUORUM or ALL and the
- // cluster only consists of two nodes; all READ operations will return a
timeout as soon as one
- // node goes down.
- private final static ConsistencyLevel READ_CONSISTENCY_LEVEL =
ConsistencyLevel.ONE;
-
- // The consistency level to use for WRITE operations
- private final static ConsistencyLevel WRITE_CONSISTENCY_LEVEL =
ConsistencyLevel.ONE;
-
// Investigation pointed out that retrying succeeds after about 10 times.
private final static int MAX_RETRIES = 10;
@@ -84,6 +76,7 @@
// Service dependencies injected by the Felix dependency manager
protected volatile CassandraDaemonService m_daemonService;
+ protected volatile CassandraConfigurationService m_configuration;
protected volatile LogService m_logService;
// Other instances injected by the Felix dependency manager
@@ -124,7 +117,7 @@
range.setEnd_key(EMPTY);
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- cs.get_range_slices(columnParent, p, range,
READ_CONSISTENCY_LEVEL);
+ cs.get_range_slices(columnParent, p, range,
m_configuration.getReadConsistencyLevel());
return true;
}
catch (Exception e) {
@@ -149,7 +142,7 @@
range.setEnd_key(toBytes(rowKey));
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- List<KeySlice> keySlices = cs.get_range_slices(columnParent, p,
range, READ_CONSISTENCY_LEVEL);
+ List<KeySlice> keySlices = cs.get_range_slices(columnParent, p,
range, m_configuration.getReadConsistencyLevel());
return keySlices != null && keySlices.size() > 0 &&
keySlices.get(0).getColumns().size() > 0;
}
catch (Exception e) {
@@ -171,7 +164,7 @@
columnPath.setColumn(toBytes(columnName));
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- ColumnOrSuperColumn sosc = cs.get(toBytes(rowKey), columnPath,
READ_CONSISTENCY_LEVEL);
+ ColumnOrSuperColumn sosc = cs.get(toBytes(rowKey), columnPath,
m_configuration.getReadConsistencyLevel());
sosc.getColumn();
return true;
}
@@ -209,7 +202,7 @@
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- List<KeySlice> keySlices = cs.get_range_slices(columnParent, p,
range, READ_CONSISTENCY_LEVEL);
+ List<KeySlice> keySlices = cs.get_range_slices(columnParent, p,
range, m_configuration.getReadConsistencyLevel());
List<String> keys = new ArrayList<String>();
for (KeySlice keySlice : keySlices) {
@@ -246,7 +239,7 @@
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
List<ColumnOrSuperColumn> slice =
- cs.get_slice(toBytes(rowKey), columnParent, predicate,
READ_CONSISTENCY_LEVEL);
+ cs.get_slice(toBytes(rowKey), columnParent, predicate,
m_configuration.getReadConsistencyLevel());
for (ColumnOrSuperColumn columnOrSuperColumn : slice) {
if (columnOrSuperColumn.isSetSuper_column()) {
@@ -284,7 +277,7 @@
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
List<KeySlice> getRangeSlices =
- cs.get_range_slices(columnParent, p, range,
READ_CONSISTENCY_LEVEL);
+ cs.get_range_slices(columnParent, p, range,
m_configuration.getReadConsistencyLevel());
return flattenSuper(getRangeSlices);
}
catch (Exception e) {
@@ -311,7 +304,7 @@
List<KeySlice> getRangeSlices;
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- getRangeSlices = cs.get_range_slices(columnParent, p, range,
READ_CONSISTENCY_LEVEL);
+ getRangeSlices = cs.get_range_slices(columnParent, p, range,
m_configuration.getReadConsistencyLevel());
Map<String, Map<String, Map<String, byte[]>>> flattenSuper =
flattenSuper(getRangeSlices);
Set<Entry<String, Map<String, Map<String, byte[]>>>> entrySet =
flattenSuper.entrySet();
for (Entry<String, Map<String, Map<String, byte[]>>> first :
entrySet) {
@@ -344,7 +337,7 @@
List<KeySlice> getRangeSlices;
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- getRangeSlices = cs.get_range_slices(columnParent, p, range,
READ_CONSISTENCY_LEVEL);
+ getRangeSlices = cs.get_range_slices(columnParent, p, range,
m_configuration.getReadConsistencyLevel());
Map<String, Map<String, Map<String, String>>> flattenSuper =
flattenStringSuper(getRangeSlices);
Set<Entry<String, Map<String, Map<String, String>>>> entrySet =
flattenSuper.entrySet();
for (Entry<String, Map<String, Map<String, String>>> first :
entrySet) {
@@ -373,10 +366,10 @@
cs.set_keyspace(m_keyspace);
// Perform read-repair
- cs.get(toBytes(rowKey), columnPath, READ_CONSISTENCY_LEVEL);
+ cs.get(toBytes(rowKey), columnPath,
m_configuration.getReadConsistencyLevel());
// Now read
- ColumnOrSuperColumn columnOrSuperColumn = cs.get(toBytes(rowKey),
columnPath, READ_CONSISTENCY_LEVEL);
+ ColumnOrSuperColumn columnOrSuperColumn = cs.get(toBytes(rowKey),
columnPath, m_configuration.getReadConsistencyLevel());
byte[] value = columnOrSuperColumn.getColumn().getValue();
return value;
}
@@ -428,7 +421,7 @@
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
List<ColumnOrSuperColumn> slice =
- cs.get_slice(toBytes(rowKey), columnParent, predicate,
READ_CONSISTENCY_LEVEL);
+ cs.get_slice(toBytes(rowKey), columnParent, predicate,
m_configuration.getReadConsistencyLevel());
Map<String, byte[]> result = new HashMap<String, byte[]>();
for (ColumnOrSuperColumn columnOrSuperColumn : slice) {
String name =
toString(columnOrSuperColumn.getColumn().getName());
@@ -485,7 +478,7 @@
Column column = new Column(toBytes(columnName),
ByteBuffer.wrap(value), timestamp);
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- cs.insert(toBytes(rowKey), column_parent, column,
WRITE_CONSISTENCY_LEVEL);
+ cs.insert(toBytes(rowKey), column_parent, column,
m_configuration.getWriteConsistencyLevel());
}
catch (Exception e) {
if (!(e instanceof RuntimeException)) {
@@ -586,7 +579,7 @@
}
Iface cs = m_daemonService.getCassandraServer();
cs.set_keyspace(m_keyspace);
- cs.remove(toBytes(rowKey), columnPath, timestamp,
WRITE_CONSISTENCY_LEVEL);
+ cs.remove(toBytes(rowKey), columnPath, timestamp,
m_configuration.getWriteConsistencyLevel());
}
catch (Exception e) {
if (!(e instanceof RuntimeException)) {
@@ -703,7 +696,7 @@
range.setEnd_key(toBytes(key));
Iface cs = m_daemonService.getCassandraServer();
List<ColumnOrSuperColumn> getSlice =
- cs.get_slice(toBytes(key), columnParent, p,
READ_CONSISTENCY_LEVEL);
+ cs.get_slice(toBytes(key), columnParent, p,
m_configuration.getReadConsistencyLevel());
// byte[][] result = new byte[getSlice.size()][0];
List<byte[]> result = new ArrayList<byte[]>(getSlice.size());
Modified:
trunk/amdatu-core/config-filebased/src/main/resources/conf/org.amdatu.core.cassandra.application.cfg
==============================================================================
---
trunk/amdatu-core/config-filebased/src/main/resources/conf/org.amdatu.core.cassandra.application.cfg
(original)
+++
trunk/amdatu-core/config-filebased/src/main/resources/conf/org.amdatu.core.cassandra.application.cfg
Fri Jan 7 17:12:02 2011
@@ -13,7 +13,7 @@
###############################
-# Clustering related properties
+# Cassandra clustering related properties
###############################
# Name of the Cassandra cluster to join. If a seed is defined in the 'seeds'
property
@@ -32,4 +32,29 @@
# This IP address must thus be accessible from the other nodes in the cluster
(to
# be precise; those that provided this node as one of its seeds).
# When cassandra is executed in stand-alone mode, this value should be
localhost
-listen_address=${cassandra.listen_address}
\ No newline at end of file
+listen_address=${cassandra.listen_address}
+
+# The default replication factor of new keyspaces. The replication factor
determines
+# the amount of nodes on which data is replicated. So if
default_replication_factor
+# equals 1 and you have a two-node cluster, it is very likely that you will
get timeouts
+# as it will try to read data only stored on the other node (data is
distributed among
+# the two nodes). In case all data should be available on all nodes in the
cluster, which
+# is necessary in case the node should also be able to run stand-alone, the
replication
+# factor should equal the number of nodes in the cluster.
+default_replication_factor=${cassandra.default_replication_factor}
+
+# The read consistency level to apply. The read consistencly level determines
the
+# amount of nodes in the cluster that must reply on a request for providing
the latest
+# version of some row before the result is returned. Note that if the read
consistency
+# level is ALL, all nodes in the cluster are effectively down as soon as one
node in
+# the cluster goes down, as cassandra will block until it received an answer
from
+# this (unavailable) node before returning the result.
+consistency_level_read=${cassandra.consistency_level_read}
+
+# The write consistency level to apply. The write consistencly level
determines the
+# amount of nodes in the cluster that should have received the write request
operation
+# before the call is returned. Note that if the write consistency level is
ALL, all
+# nodes in the cluster are effectively down as soon as one node in the cluster
goes
+# down, as cassandra will block each write operation until this (unavailble)
node
+# answered.
+consistency_level_write=${cassandra.consistency_level_write}
Modified: trunk/pom.xml
==============================================================================
--- trunk/pom.xml (original)
+++ trunk/pom.xml Fri Jan 7 17:12:02 2011
@@ -51,6 +51,15 @@
define this IP address in their list of seeds). For example:
"172.16.11.108" -->
<cassandra.listen_address>localhost</cassandra.listen_address>
+ <!-- The default Cassandra replication factor -->
+
<cassandra.default_replication_factor>1</cassandra.default_replication_factor>
+
+ <!-- The READ consistency level -->
+ <cassandra.consistency_level_read>ONE</cassandra.consistency_level_read>
+
+ <!-- The WRITE consistency level -->
+ <cassandra.consistency_level_write>ONE</cassandra.consistency_level_write>
+
<!--
Version numbers of dependent libraries
-->