Hi Stan

Currently I am using JDBC thin driver to connect to Ignite cluster for both
read/write. 

Below is the config xml I am  using

<?xml version="1.0" encoding="UTF-8"?>



<beans xmlns="http://www.springframework.org/schema/beans";
       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
       xmlns:util="http://www.springframework.org/schema/util";
       xsi:schemaLocation="http://www.springframework.org/schema/beans
                          
http://www.springframework.org/schema/beans/spring-beans.xsd
                           http://www.springframework.org/schema/util
                          
http://www.springframework.org/schema/util/spring-util.xsd";>

    <bean class="org.apache.ignite.configuration.IgniteConfiguration">
        <property name="clientMode" value="true"/>
        <property name="igniteInstanceName" value="IgnitePOC"/>

          <property name="dataStorageConfiguration">
            <bean 
class="org.apache.ignite.configuration.DataStorageConfiguration">
              <property name="defaultDataRegionConfiguration">
                <bean
class="org.apache.ignite.configuration.DataRegionConfiguration">
                  <property name="persistenceEnabled" value="true"/>
                </bean>
              </property>
            </bean>
          </property>
      
        <property name="discoverySpi">
            <bean
class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
                <property name="ipFinder">
                    
                    <bean
class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
                    
                        <property name="addresses">
                            <list>
                                
                                  <value>10.144.114.113:47500..47502</value>    
                             
                                 <value>10.144.114.114:47500..47502</value>
                                 <value>10.144.114.115:47500..47502</value>
                            </list>
                        </property>
                    </bean>
                </property>
            </bean>
        </property>

    </bean>
</beans>

If i wanted to use Oracle as Persistent layer,what changes I should be doing
for the config XML

As mentioned in the examples, do I need to implement CacheStore for each
table load, write, remove operations sameway mentioned below

public class CacheJdbcPersonStore extends CacheStoreAdapter<Long, Person> {
  // This mehtod is called whenever "get(...)" methods are called on
IgniteCache.
  @Override public Person load(Long key) {
    try (Connection conn = connection()) {
      try (PreparedStatement st = conn.prepareStatement("select * from
PERSONS where id=?")) {
        st.setLong(1, key);

        ResultSet rs = st.executeQuery();

        return rs.next() ? new Person(rs.getLong(1), rs.getString(2),
rs.getString(3)) : null;
      }
    }
    catch (SQLException e) {
      throw new CacheLoaderException("Failed to load: " + key, e);
    }
  }

  // This mehtod is called whenever "put(...)" methods are called on
IgniteCache.
  @Override public void write(Cache.Entry<Long, Person> entry) {
    try (Connection conn = connection()) {
      // Syntax of MERGE statement is database specific and should be
adopted for your database.
      // If your database does not support MERGE statement then use
sequentially update, insert statements.
      try (PreparedStatement st = conn.prepareStatement(
        "merge into PERSONS (id, firstName, lastName) key (id) VALUES (?, ?,
?)")) {
        for (Cache.Entry<Long, Person> entry : entries) {
          Person val = entry.getValue();
          
          st.setLong(1, entry.getKey());
          st.setString(2, val.getFirstName());
          st.setString(3, val.getLastName());
          
          st.executeUpdate();
        }
      }
    }
    catch (SQLException e) {
      throw new CacheWriterException("Failed to write [key=" + key + ",
val=" + val + ']', e);
    }
  }

  // This mehtod is called whenever "remove(...)" methods are called on
IgniteCache.
  @Override public void delete(Object key) {
    try (Connection conn = connection()) {
      try (PreparedStatement st = conn.prepareStatement("delete from PERSONS
where id=?")) {
        st.setLong(1, (Long)key);

        st.executeUpdate();
      }
    }
    catch (SQLException e) {
      throw new CacheWriterException("Failed to delete: " + key, e);
    }
  }

  // This mehtod is called whenever "loadCache()" and "localLoadCache()"
  // methods are called on IgniteCache. It is used for bulk-loading the
cache.
  // If you don't need to bulk-load the cache, skip this method.
  @Override public void loadCache(IgniteBiInClosure<Long, Person> clo,
Object... args) {
    if (args == null || args.length == 0 || args[0] == null)
      throw new CacheLoaderException("Expected entry count parameter is not
provided.");

    final int entryCnt = (Integer)args[0];

    try (Connection conn = connection()) {
      try (PreparedStatement st = conn.prepareStatement("select * from
PERSONS")) {
        try (ResultSet rs = st.executeQuery()) {
          int cnt = 0;

          while (cnt < entryCnt && rs.next()) {
            Person person = new Person(rs.getLong(1), rs.getString(2),
rs.getString(3));

            clo.apply(person.getId(), person);

            cnt++;
          }
        }
      }
    }
    catch (SQLException e) {
      throw new CacheLoaderException("Failed to load values from cache
store.", e);
    }
  }

  // Open JDBC connection.
  private Connection connection() throws SQLException  {
    // Open connection to your RDBMS systems (Oracle, MySQL, Postgres, DB2,
Microsoft SQL, etc.)
    // In this example we use H2 Database for simplification.
    Connection conn =
DriverManager.getConnection("jdbc:h2:mem:example;DB_CLOSE_DELAY=-1");

    conn.setAutoCommit(true);

    return conn;
  }
}

OR are there any other ways we can implement this

Thanks
Naveen



--
Sent from: http://apache-ignite-users.70518.x6.nabble.com/

Reply via email to