This is an automated email from the ASF dual-hosted git repository. nnag pushed a commit to branch develop in repository https://gitbox.apache.org/repos/asf/geode.git
commit 2a72fb22a698bbe1d9ffca026fea096c1e0b12b5 Author: nabarunnag <n...@cs.wisc.edu> AuthorDate: Wed Feb 21 17:17:43 2018 -0800 GEODE-4717: Refactor computeRepository * Extracted the code to reindex the region entries to an different method --- .../lucene/internal/IndexRepositoryFactory.java | 51 ++++++++++++---------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java index 99ef788..1d1c77b 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java @@ -15,7 +15,6 @@ package org.apache.geode.cache.lucene.internal; import java.io.IOException; -import java.io.Serializable; import java.util.HashSet; import java.util.Iterator; import java.util.Map; @@ -35,9 +34,7 @@ import org.apache.geode.cache.lucene.internal.repository.IndexRepository; import org.apache.geode.cache.lucene.internal.repository.IndexRepositoryImpl; import org.apache.geode.cache.query.internal.DefaultQuery; import org.apache.geode.distributed.DistributedLockService; -import org.apache.geode.distributed.LockServiceDestroyedException; import org.apache.geode.internal.cache.BucketRegion; -import org.apache.geode.internal.cache.ColocationHelper; import org.apache.geode.internal.cache.EntrySnapshot; import org.apache.geode.internal.cache.PartitionRegionConfig; import org.apache.geode.internal.cache.PartitionedRegion; @@ -114,26 +111,8 @@ public class IndexRepositoryFactory { success = true; return repo; } else { - Set<IndexRepository> affectedRepos = new HashSet<IndexRepository>(); - - Iterator keysIterator = dataBucket.keySet().iterator(); - while (keysIterator.hasNext()) { - Object key = keysIterator.next(); - Object value = getValue(userRegion.getEntry(key)); - if (value != null) { - repo.update(key, value); - } else { - repo.delete(key); - } - affectedRepos.add(repo); - } - - for (IndexRepository affectedRepo : affectedRepos) { - affectedRepo.commit(); - } - // fileRegion ops (get/put) need bucketId as a callbackArg for PartitionResolver - fileRegion.put(APACHE_GEODE_INDEX_COMPLETE, APACHE_GEODE_INDEX_COMPLETE, bucketId); - success = true; + success = + reindexUserDataRegion(bucketId, userRegion, fileRegion, dataBucket, success, repo); } return repo; } catch (IOException e) { @@ -152,6 +131,32 @@ public class IndexRepositoryFactory { } } + private boolean reindexUserDataRegion(Integer bucketId, PartitionedRegion userRegion, + PartitionedRegion fileRegion, BucketRegion dataBucket, boolean success, IndexRepository repo) + throws IOException { + Set<IndexRepository> affectedRepos = new HashSet<IndexRepository>(); + + Iterator keysIterator = dataBucket.keySet().iterator(); + while (keysIterator.hasNext()) { + Object key = keysIterator.next(); + Object value = getValue(userRegion.getEntry(key)); + if (value != null) { + repo.update(key, value); + } else { + repo.delete(key); + } + affectedRepos.add(repo); + } + + for (IndexRepository affectedRepo : affectedRepos) { + affectedRepo.commit(); + } + // fileRegion ops (get/put) need bucketId as a callbackArg for PartitionResolver + fileRegion.put(APACHE_GEODE_INDEX_COMPLETE, APACHE_GEODE_INDEX_COMPLETE, bucketId); + success = true; + return success; + } + private Object getValue(Region.Entry entry) { final EntrySnapshot es = (EntrySnapshot) entry; Object value; -- To stop receiving notification emails like this one, please contact n...@apache.org.