http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index 3904393..91e28fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -45,7 +45,7 @@ public class RegionServerAccounting { // Store the edits size during replaying WAL. Use this to roll back the // global memstore size once a region opening failed. private final ConcurrentMap<byte[], MemstoreSize> replayEditsPerRegion = - new ConcurrentSkipListMap<byte[], MemstoreSize>(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); private final Configuration conf;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 82e6778..ea346ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -40,7 +40,7 @@ public class RegionServicesForStores { private static final int POOL_SIZE = 10; private static final ThreadPoolExecutor INMEMORY_COMPACTION_POOL = new ThreadPoolExecutor(POOL_SIZE, POOL_SIZE, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<Runnable>(), + new LinkedBlockingQueue<>(), new ThreadFactory() { @Override public Thread newThread(Runnable r) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index 1accae1..b1473cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -140,8 +140,7 @@ public class SecureBulkLoadManager { List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers(region); if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext<RegionCoprocessorEnvironment> ctx = - new ObserverContext<RegionCoprocessorEnvironment>(getActiveUser()); + ObserverContext<RegionCoprocessorEnvironment> ctx = new ObserverContext<>(getActiveUser()); ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); @@ -162,8 +161,7 @@ public class SecureBulkLoadManager { List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers(region); if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext<RegionCoprocessorEnvironment> ctx = - new ObserverContext<RegionCoprocessorEnvironment>(getActiveUser()); + ObserverContext<RegionCoprocessorEnvironment> ctx = new ObserverContext<>(getActiveUser()); ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); @@ -177,9 +175,9 @@ public class SecureBulkLoadManager { public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException { - final List<Pair<byte[], String>> familyPaths = new ArrayList<Pair<byte[], String>>(request.getFamilyPathCount()); + final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount()); for(ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) { - familyPaths.add(new Pair<byte[], String>(el.getFamily().toByteArray(), el.getPath())); + familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath())); } Token userToken = null; @@ -324,7 +322,7 @@ public class SecureBulkLoadManager { this.fs = fs; this.stagingDir = stagingDir; this.conf = conf; - this.origPermissions = new HashMap<String, FsPermission>(); + this.origPermissions = new HashMap<>(); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 8581517..11d51d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -55,7 +55,7 @@ public abstract class Segment { public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE + ClassSize.CELL_SET + ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER; - private AtomicReference<CellSet> cellSet= new AtomicReference<CellSet>(); + private AtomicReference<CellSet> cellSet= new AtomicReference<>(); private final CellComparator comparator; protected long minSequenceId; private MemStoreLAB memStoreLAB; @@ -115,7 +115,7 @@ public abstract class Segment { } public List<KeyValueScanner> getScanners(long readPoint, long order) { - List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(1); + List<KeyValueScanner> scanners = new ArrayList<>(1); scanners.add(getScanner(readPoint, order)); return scanners; } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index 7e53026..1a8b89d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -108,7 +108,7 @@ public final class SegmentFactory { } private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) { - List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>(); + List<MemStoreLAB> mslabs = new ArrayList<>(); if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) { return null; } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index 11e46a4..874ca44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -121,8 +121,7 @@ public class ServerNonceManager { * which is a realistic worst case. If it's much worse, we could use some sort of memory * limit and cleanup. */ - private ConcurrentHashMap<NonceKey, OperationContext> nonces = - new ConcurrentHashMap<NonceKey, OperationContext>(); + private ConcurrentHashMap<NonceKey, OperationContext> nonces = new ConcurrentHashMap<>(); private int deleteNonceGracePeriod; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java index f19f26f..bdae05a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java @@ -58,7 +58,7 @@ public class ShutdownHook { * to be executed after the last regionserver referring to a given filesystem * stops. We keep track of the # of regionserver references in values of the map. */ - private final static Map<Runnable, Integer> fsShutdownHooks = new HashMap<Runnable, Integer>(); + private final static Map<Runnable, Integer> fsShutdownHooks = new HashMap<>(); /** * Install a shutdown hook that calls stop on the passed Stoppable http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 52811f6..ca7dfd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -125,7 +125,7 @@ public class StoreFileScanner implements KeyValueScanner { public static List<StoreFileScanner> getScannersForStoreFiles(Collection<StoreFile> files, boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, ScanQueryMatcher matcher, long readPt, boolean isPrimaryReplica) throws IOException { - List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(files.size()); + List<StoreFileScanner> scanners = new ArrayList<>(files.size()); List<StoreFile> sorted_files = new ArrayList<>(files); Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID); for (int i = 0; i < sorted_files.size(); i++) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index abfd3fc..23fae6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -119,7 +119,7 @@ abstract class StoreFlusher { ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List<Cell> kvs = new ArrayList<Cell>(); + List<Cell> kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 5c21a41..99ec30e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -92,7 +92,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Collects all the KVHeap that are eagerly getting closed during the // course of a scan - protected List<KeyValueHeap> heapsForDelayedClose = new ArrayList<KeyValueHeap>(); + protected List<KeyValueHeap> heapsForDelayedClose = new ArrayList<>(); /** * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not @@ -131,9 +131,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Indicates whether there was flush during the course of the scan protected volatile boolean flushed = false; // generally we get one file from a flush - protected List<StoreFile> flushedStoreFiles = new ArrayList<StoreFile>(1); + protected List<StoreFile> flushedStoreFiles = new ArrayList<>(1); // The current list of scanners - protected List<KeyValueScanner> currentScanners = new ArrayList<KeyValueScanner>(); + protected List<KeyValueScanner> currentScanners = new ArrayList<>(); // flush update lock private ReentrantLock flushLock = new ReentrantLock(); @@ -428,8 +428,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner filesOnly = false; } - List<KeyValueScanner> scanners = - new ArrayList<KeyValueScanner>(allScanners.size()); + List<KeyValueScanner> scanners = new ArrayList<>(allScanners.size()); // We can only exclude store files based on TTL if minVersions is set to 0. // Otherwise, we might have to return KVs that have technically expired. @@ -940,8 +939,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); - List<ParallelSeekHandler> handlers = - new ArrayList<ParallelSeekHandler>(storeFileScannerCount); + List<ParallelSeekHandler> handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, @@ -972,7 +970,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * @return all scanners in no particular order */ List<KeyValueScanner> getAllScannersForTesting() { - List<KeyValueScanner> allScanners = new ArrayList<KeyValueScanner>(); + List<KeyValueScanner> allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); if (current != null) allScanners.add(current); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index a2a0dcc..0ec41b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -79,7 +79,7 @@ public class StorefileRefresherChore extends ScheduledChore { throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); } - lastRefreshTimes = new HashMap<String, Long>(); + lastRefreshTimes = new HashMap<>(); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 2662dd1..7392492 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -143,7 +143,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(comparator); this.boundaries = targetBoundaries; - this.existingWriters = new ArrayList<StoreFileWriter>(this.boundaries.size() - 1); + this.existingWriters = new ArrayList<>(this.boundaries.size() - 1); // "major" range (range for which all files are included) boundaries, if any, // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); @@ -283,8 +283,8 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { this.left = left; this.right = right; int preallocate = Math.min(this.targetCount, 64); - this.existingWriters = new ArrayList<StoreFileWriter>(preallocate); - this.boundaries = new ArrayList<byte[]>(preallocate + 1); + this.existingWriters = new ArrayList<>(preallocate); + this.boundaries = new ArrayList<>(preallocate + 1); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index 9255634..1e78ab2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -84,7 +84,7 @@ public class StripeStoreEngine extends StoreEngine<StripeStoreFlusher, this.stripeRequest = compactionPolicy.selectCompaction( storeFileManager, filesCompacting, mayUseOffPeak); this.request = (this.stripeRequest == null) - ? new CompactionRequest(new ArrayList<StoreFile>()) : this.stripeRequest.getRequest(); + ? new CompactionRequest(new ArrayList<>()) : this.stripeRequest.getRequest(); return this.stripeRequest != null; } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 1b3c9f8..4a719f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -100,8 +100,7 @@ public class StripeStoreFileManager * same index, except the last one. Inside each list, the files are in reverse order by * seqNum. Note that the length of this is one higher than that of stripeEndKeys. */ - public ArrayList<ImmutableList<StoreFile>> stripeFiles - = new ArrayList<ImmutableList<StoreFile>>(); + public ArrayList<ImmutableList<StoreFile>> stripeFiles = new ArrayList<>(); /** Level 0. The files are in reverse order by seqNum. */ public ImmutableList<StoreFile> level0Files = ImmutableList.<StoreFile>of(); @@ -112,8 +111,8 @@ public class StripeStoreFileManager private State state = null; /** Cached file metadata (or overrides as the case may be) */ - private HashMap<StoreFile, byte[]> fileStarts = new HashMap<StoreFile, byte[]>(); - private HashMap<StoreFile, byte[]> fileEnds = new HashMap<StoreFile, byte[]>(); + private HashMap<StoreFile, byte[]> fileStarts = new HashMap<>(); + private HashMap<StoreFile, byte[]> fileEnds = new HashMap<>(); /** Normally invalid key is null, but in the map null is the result for "no key"; so use * the following constant value in these maps instead. Note that this is a constant and * we use it to compare by reference when we read from the map. */ @@ -277,7 +276,7 @@ public class StripeStoreFileManager } private byte[] getSplitPointFromAllFiles() throws IOException { - ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>(); + ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>(); sfs.addSublist(state.level0Files); sfs.addAllSublists(state.stripeFiles); if (sfs.isEmpty()) return null; @@ -305,7 +304,7 @@ public class StripeStoreFileManager return state.allFilesCached; // We need to read all files. } - ConcatenatedLists<StoreFile> result = new ConcatenatedLists<StoreFile>(); + ConcatenatedLists<StoreFile> result = new ConcatenatedLists<>(); result.addAllSublists(state.stripeFiles.subList(firstStripe, lastStripe + 1)); result.addSublist(state.level0Files); return result; @@ -385,9 +384,8 @@ public class StripeStoreFileManager */ private void loadUnclassifiedStoreFiles(List<StoreFile> storeFiles) { LOG.debug("Attempting to load " + storeFiles.size() + " store files."); - TreeMap<byte[], ArrayList<StoreFile>> candidateStripes = - new TreeMap<byte[], ArrayList<StoreFile>>(MAP_COMPARATOR); - ArrayList<StoreFile> level0Files = new ArrayList<StoreFile>(); + TreeMap<byte[], ArrayList<StoreFile>> candidateStripes = new TreeMap<>(MAP_COMPARATOR); + ArrayList<StoreFile> level0Files = new ArrayList<>(); // Separate the files into tentative stripes; then validate. Currently, we rely on metadata. // If needed, we could dynamically determine the stripes in future. for (StoreFile sf : storeFiles) { @@ -405,7 +403,7 @@ public class StripeStoreFileManager } else { ArrayList<StoreFile> stripe = candidateStripes.get(endRow); if (stripe == null) { - stripe = new ArrayList<StoreFile>(); + stripe = new ArrayList<>(); candidateStripes.put(endRow, stripe); } insertFileIntoStripe(stripe, sf); @@ -477,9 +475,9 @@ public class StripeStoreFileManager // Copy the results into the fields. State state = new State(); state.level0Files = ImmutableList.copyOf(level0Files); - state.stripeFiles = new ArrayList<ImmutableList<StoreFile>>(candidateStripes.size()); + state.stripeFiles = new ArrayList<>(candidateStripes.size()); state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][]; - ArrayList<StoreFile> newAllFiles = new ArrayList<StoreFile>(level0Files); + ArrayList<StoreFile> newAllFiles = new ArrayList<>(level0Files); int i = candidateStripes.size() - 1; for (Map.Entry<byte[], ArrayList<StoreFile>> entry : candidateStripes.entrySet()) { state.stripeFiles.add(ImmutableList.copyOf(entry.getValue())); @@ -685,7 +683,7 @@ public class StripeStoreFileManager this.nextWasCalled = false; List<StoreFile> src = components.get(currentComponent); if (src instanceof ImmutableList<?>) { - src = new ArrayList<StoreFile>(src); + src = new ArrayList<>(src); components.set(currentComponent, src); } src.remove(indexWithinComponent); @@ -711,13 +709,12 @@ public class StripeStoreFileManager private Collection<StoreFile> compactedFiles = null; private Collection<StoreFile> results = null; - private List<StoreFile> l0Results = new ArrayList<StoreFile>(); + private List<StoreFile> l0Results = new ArrayList<>(); private final boolean isFlush; public CompactionOrFlushMergeCopy(boolean isFlush) { // Create a lazy mutable copy (other fields are so lazy they start out as nulls). - this.stripeFiles = new ArrayList<List<StoreFile>>( - StripeStoreFileManager.this.state.stripeFiles); + this.stripeFiles = new ArrayList<>(StripeStoreFileManager.this.state.stripeFiles); this.isFlush = isFlush; } @@ -755,15 +752,14 @@ public class StripeStoreFileManager : ImmutableList.copyOf(this.level0Files); newState.stripeEndRows = (this.stripeEndRows == null) ? oldState.stripeEndRows : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); - newState.stripeFiles = new ArrayList<ImmutableList<StoreFile>>(this.stripeFiles.size()); + newState.stripeFiles = new ArrayList<>(this.stripeFiles.size()); for (List<StoreFile> newStripe : this.stripeFiles) { newState.stripeFiles.add(newStripe instanceof ImmutableList<?> ? (ImmutableList<StoreFile>)newStripe : ImmutableList.copyOf(newStripe)); } - List<StoreFile> newAllFiles = new ArrayList<StoreFile>(oldState.allFilesCached); - List<StoreFile> newAllCompactedFiles = - new ArrayList<StoreFile>(oldState.allCompactedFilesCached); + List<StoreFile> newAllFiles = new ArrayList<>(oldState.allFilesCached); + List<StoreFile> newAllCompactedFiles = new ArrayList<>(oldState.allCompactedFilesCached); if (!isFlush) { newAllFiles.removeAll(compactedFiles); if (delCompactedFiles) { @@ -803,7 +799,7 @@ public class StripeStoreFileManager List<StoreFile> stripeCopy = this.stripeFiles.get(index); ArrayList<StoreFile> result = null; if (stripeCopy instanceof ImmutableList<?>) { - result = new ArrayList<StoreFile>(stripeCopy); + result = new ArrayList<>(stripeCopy); this.stripeFiles.set(index, result); } else { result = (ArrayList<StoreFile>)stripeCopy; @@ -816,7 +812,7 @@ public class StripeStoreFileManager */ private final ArrayList<StoreFile> getLevel0Copy() { if (this.level0Files == null) { - this.level0Files = new ArrayList<StoreFile>(StripeStoreFileManager.this.state.level0Files); + this.level0Files = new ArrayList<>(StripeStoreFileManager.this.state.level0Files); } return this.level0Files; } @@ -849,7 +845,7 @@ public class StripeStoreFileManager // Make a new candidate stripe. if (newStripes == null) { - newStripes = new TreeMap<byte[], StoreFile>(MAP_COMPARATOR); + newStripes = new TreeMap<>(MAP_COMPARATOR); } StoreFile oldSf = newStripes.put(endRow, sf); if (oldSf != null) { @@ -893,8 +889,7 @@ public class StripeStoreFileManager TreeMap<byte[], StoreFile> newStripes) throws IOException { // Validate that the removed and added aggregate ranges still make for a full key space. boolean hasStripes = !this.stripeFiles.isEmpty(); - this.stripeEndRows = new ArrayList<byte[]>( - Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); + this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); int removeFrom = 0; byte[] firstStartRow = startOf(newStripes.firstEntry().getValue()); byte[] lastEndRow = newStripes.lastKey(); @@ -917,7 +912,7 @@ public class StripeStoreFileManager int removeTo = findStripeIndexByEndRow(lastEndRow); if (removeTo < 0) throw new IOException("Compaction is trying to add a bad range."); // See if there are files in the stripes we are trying to replace. - ArrayList<StoreFile> conflictingFiles = new ArrayList<StoreFile>(); + ArrayList<StoreFile> conflictingFiles = new ArrayList<>(); for (int removeIndex = removeTo; removeIndex >= removeFrom; --removeIndex) { conflictingFiles.addAll(this.stripeFiles.get(removeIndex)); } @@ -973,7 +968,7 @@ public class StripeStoreFileManager } } // Add the new stripe. - ArrayList<StoreFile> tmp = new ArrayList<StoreFile>(); + ArrayList<StoreFile> tmp = new ArrayList<>(); tmp.add(newStripe.getValue()); stripeFiles.add(insertAt, tmp); previousEndRow = newStripe.getKey(); @@ -992,8 +987,8 @@ public class StripeStoreFileManager @Override public List<byte[]> getStripeBoundaries() { - if (this.state.stripeFiles.isEmpty()) return new ArrayList<byte[]>(); - ArrayList<byte[]> result = new ArrayList<byte[]>(this.state.stripeEndRows.length + 2); + if (this.state.stripeFiles.isEmpty()) return new ArrayList<>(); + ArrayList<byte[]> result = new ArrayList<>(this.state.stripeEndRows.length + 2); result.add(OPEN_KEY); Collections.addAll(result, this.state.stripeEndRows); result.add(OPEN_KEY); @@ -1033,7 +1028,7 @@ public class StripeStoreFileManager LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs); if (expiredStoreFiles == null) { - expiredStoreFiles = new ArrayList<StoreFile>(); + expiredStoreFiles = new ArrayList<>(); } expiredStoreFiles.add(sf); } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 22c3ce7..85bae9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -57,7 +57,7 @@ public class StripeStoreFlusher extends StoreFlusher { @Override public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status, ThroughputController throughputController) throws IOException { - List<Path> result = new ArrayList<Path>(); + List<Path> result = new ArrayList<>(); int cellsCount = snapshot.getCellsCount(); if (cellsCount == 0) return result; // don't flush if there are no entries http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 6a3ff4a..3d4f9a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -96,7 +96,7 @@ public class CompactionRequest implements Comparable<CompactionRequest> { * @return The result (may be "this" or "other"). */ public CompactionRequest combineWith(CompactionRequest other) { - this.filesToCompact = new ArrayList<StoreFile>(other.getFiles()); + this.filesToCompact = new ArrayList<>(other.getFiles()); this.isOffPeak = other.isOffPeak; this.isMajor = other.isMajor; this.priority = other.priority; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 1fe5077..d72529a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -294,7 +294,7 @@ public abstract class Compactor<T extends CellSink> { if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) { // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles, // HFiles, and their readers - readersToClose = new ArrayList<StoreFile>(request.getFiles().size()); + readersToClose = new ArrayList<>(request.getFiles().size()); for (StoreFile f : request.getFiles()) { StoreFile clonedStoreFile = f.cloneForReader(); // create the reader after the store file is cloned in case @@ -320,7 +320,7 @@ public abstract class Compactor<T extends CellSink> { scanner = postCreateCoprocScanner(request, scanType, scanner, user); if (scanner == null) { // NULL scanner returned from coprocessor hooks means skip normal processing. - return new ArrayList<Path>(); + return new ArrayList<>(); } boolean cleanSeqId = false; if (fd.minSeqIdToKeep > 0) { @@ -413,7 +413,7 @@ public abstract class Compactor<T extends CellSink> { long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List<Cell> cells = new ArrayList<Cell>(); + List<Cell> cells = new ArrayList<>(); long closeCheckSizeLimit = HStore.getCloseCheckInterval(); long lastMillis = 0; if (LOG.isDebugEnabled()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index e37a7fe..6413ee6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -99,7 +99,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { @VisibleForTesting public boolean needsCompaction(final Collection<StoreFile> storeFiles, final List<StoreFile> filesCompacting) { - ArrayList<StoreFile> candidates = new ArrayList<StoreFile>(storeFiles); + ArrayList<StoreFile> candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); } catch (Exception e) { @@ -222,7 +222,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { // we put them in the same window as the last file in increasing order maxTimestampSeen = Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp() == null? Long.MIN_VALUE : storeFile.getMaximumTimestamp()); - storefileMaxTimestampPairs.add(new Pair<StoreFile, Long>(storeFile, maxTimestampSeen)); + storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen)); } Collections.reverse(storefileMaxTimestampPairs); @@ -299,7 +299,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp()); } - List<Long> boundaries = new ArrayList<Long>(); + List<Long> boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp for (CompactionWindow window = getIncomingWindow(now); @@ -317,7 +317,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { */ private static List<Long> getCompactionBoundariesForMinor(CompactionWindow window, boolean singleOutput) { - List<Long> boundaries = new ArrayList<Long>(); + List<Long> boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) { boundaries.add(window.startMillis()); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index 8b5aa31..0bd917a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -53,7 +53,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { @Override protected final ArrayList<StoreFile> applyCompactionPolicy(final ArrayList<StoreFile> candidates, final boolean mayUseOffPeak, final boolean mightBeStuck) throws IOException { - return new ArrayList<StoreFile>(applyCompactionPolicy(candidates, mightBeStuck, + return new ArrayList<>(applyCompactionPolicy(candidates, mightBeStuck, mayUseOffPeak, comConf.getMinFilesToCompact(), comConf.getMaxFilesToCompact())); } @@ -64,8 +64,8 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); // Start off choosing nothing. - List<StoreFile> bestSelection = new ArrayList<StoreFile>(0); - List<StoreFile> smallest = mightBeStuck ? new ArrayList<StoreFile>(0) : null; + List<StoreFile> bestSelection = new ArrayList<>(0); + List<StoreFile> smallest = mightBeStuck ? new ArrayList<>(0) : null; long bestSize = 0; long smallestSize = Long.MAX_VALUE; @@ -117,12 +117,12 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { if (bestSelection.isEmpty() && mightBeStuck) { LOG.debug("Exploring compaction algorithm has selected " + smallest.size() + " files of size "+ smallestSize + " because the store might be stuck"); - return new ArrayList<StoreFile>(smallest); + return new ArrayList<>(smallest); } LOG.debug("Exploring compaction algorithm has selected " + bestSelection.size() + " files of size " + bestSize + " starting at candidate #" + bestStart + " after considering " + opts + " permutations with " + optsInRatio + " in ratio"); - return new ArrayList<StoreFile>(bestSelection); + return new ArrayList<>(bestSelection); } private boolean isBetterSelection(List<StoreFile> bestSelection, http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index d339898..97b8387 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -117,7 +117,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { private Collection<StoreFile> getExpiredStores(Collection<StoreFile> files, Collection<StoreFile> filesCompacting) { long currentTime = EnvironmentEdgeManager.currentTime(); - Collection<StoreFile> expiredStores = new ArrayList<StoreFile>(); + Collection<StoreFile> expiredStores = new ArrayList<>(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles Long maxTs = sf.getReader().getMaxTimestamp(); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 77b0af8..42b57a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -43,7 +43,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { public List<StoreFile> preSelectCompactionForCoprocessor(final Collection<StoreFile> candidates, final List<StoreFile> filesCompacting) { - return getCurrentEligibleFiles(new ArrayList<StoreFile>(candidates), filesCompacting); + return getCurrentEligibleFiles(new ArrayList<>(candidates), filesCompacting); } /** @@ -56,7 +56,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { final List<StoreFile> filesCompacting, final boolean isUserCompaction, final boolean mayUseOffPeak, final boolean forceMajor) throws IOException { // Preliminary compaction subject to filters - ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles); + ArrayList<StoreFile> candidateSelection = new ArrayList<>(candidateFiles); // Stuck and not compacting enough (estimate). It is not guaranteed that we will be // able to compact more if stuck and compacting, because ratio policy excludes some // non-compacting files from consideration during compaction (see getCurrentEligibleFiles). http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index a553cf6..0b66d3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -68,7 +68,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { // We sincerely hope nobody is messing with us with their coprocessors. // If they do, they are very likely to shoot themselves in the foot. // We'll just exclude all the filesCompacting from the list. - ArrayList<StoreFile> candidateFiles = new ArrayList<StoreFile>(si.getStorefiles()); + ArrayList<StoreFile> candidateFiles = new ArrayList<>(si.getStorefiles()); candidateFiles.removeAll(filesCompacting); return candidateFiles; } @@ -217,7 +217,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { LOG.debug("No good compaction is possible in any stripe"); return null; } - List<StoreFile> filesToCompact = new ArrayList<StoreFile>(bqSelection); + List<StoreFile> filesToCompact = new ArrayList<>(bqSelection); // See if we can, and need to, split this stripe. int targetCount = 1; long targetKvs = Long.MAX_VALUE; @@ -246,7 +246,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { assert hasAllFiles; List<StoreFile> l0Files = si.getLevel0Files(); LOG.debug("Adding " + l0Files.size() + " files to compaction to be able to drop deletes"); - ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>(); + ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>(); sfs.addSublist(filesToCompact); sfs.addSublist(l0Files); req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries()); @@ -345,7 +345,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { } LOG.debug("Merging " + bestLength + " stripes to delete expired store files"); int endIndex = bestStart + bestLength - 1; - ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<StoreFile>(); + ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes.subList(bestStart, endIndex + 1)); SplitStripeCompactionRequest result = new SplitStripeCompactionRequest(sfs, si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); @@ -388,7 +388,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { splitCount += 1.0; } long kvCount = (long)(getTotalKvCount(files) / splitCount); - return new Pair<Long, Integer>(kvCount, (int)Math.ceil(splitCount)); + return new Pair<>(kvCount, (int)Math.ceil(splitCount)); } /** Stripe compaction request wrapper. */ http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index eb6e503..c3976b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -51,6 +51,9 @@ public class ScanDeleteTracker implements DeleteTracker { protected long familyStamp = 0L; protected SortedSet<Long> familyVersionStamps = new TreeSet<Long>(); protected Cell deleteCell = null; + protected byte[] deleteBuffer = null; + protected int deleteOffset = 0; + protected int deleteLength = 0; protected byte deleteType = 0; protected long deleteTimestamp = 0L; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index aa1205a..7b43c3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -271,7 +271,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { private final ExecutorCompletionService<Void> taskPool; private final ThreadPoolExecutor executor; private volatile boolean stopped; - private final List<Future<Void>> futures = new ArrayList<Future<Void>>(); + private final List<Future<Void>> futures = new ArrayList<>(); private final String name; SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) { @@ -283,10 +283,10 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs(" + new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name + ")-snapshot-pool")); executor.allowCoreThreadTimeOut(true); - taskPool = new ExecutorCompletionService<Void>(executor); + taskPool = new ExecutorCompletionService<>(executor); } boolean hasTasks() { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index 8867611..ca76ad5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -76,8 +76,7 @@ public abstract class PressureAwareThroughputController extends Configured imple private volatile double maxThroughput; private volatile double maxThroughputPerOperation; - protected final ConcurrentMap<String, ActiveOperation> activeOperations = - new ConcurrentHashMap<String, ActiveOperation>(); + protected final ConcurrentMap<String, ActiveOperation> activeOperations = new ConcurrentHashMap<>(); @Override public abstract void setup(final RegionServerServices server); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index bf283f8..f32d0ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -155,7 +155,7 @@ public abstract class AbstractFSWAL<W> implements WAL { protected final Configuration conf; /** Listeners that are called on WAL events. */ - protected final List<WALActionsListener> listeners = new CopyOnWriteArrayList<WALActionsListener>(); + protected final List<WALActionsListener> listeners = new CopyOnWriteArrayList<>(); /** * Class that does accounting of sequenceids in WAL subsystem. Holds oldest outstanding sequence @@ -413,7 +413,7 @@ public abstract class AbstractFSWAL<W> implements WAL { .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", DEFAULT_WAL_SYNC_TIMEOUT_MS)); int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 200); // Presize our map of SyncFutures by handler objects. - this.syncFuturesByHandler = new ConcurrentHashMap<Thread, SyncFuture>(maxHandlersCount); + this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount); this.implClassName = getClass().getSimpleName(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 83d93fe..c3e96cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -180,7 +180,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> { private final Deque<FSWALEntry> unackedAppends = new ArrayDeque<>(); - private final SortedSet<SyncFuture> syncFutures = new TreeSet<SyncFuture>(SEQ_COMPARATOR); + private final SortedSet<SyncFuture> syncFutures = new TreeSet<>(SEQ_COMPARATOR); // the highest txid of WAL entries being processed private long highestProcessedAppendTxid; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index a0ac8a2..e1f7b8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -156,7 +156,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter } private long write(Consumer<CompletableFuture<Long>> action) throws IOException { - CompletableFuture<Long> future = new CompletableFuture<Long>(); + CompletableFuture<Long> future = new CompletableFuture<>(); eventLoop.execute(() -> action.accept(future)); try { return future.get().longValue(); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index f5a3382..f0e29c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -226,7 +226,7 @@ public class FSHLog extends AbstractFSWAL<Writer> { String hostingThreadName = Thread.currentThread().getName(); // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense // spinning as other strategies do. - this.disruptor = new Disruptor<RingBufferTruck>(RingBufferTruck::new, + this.disruptor = new Disruptor<>(RingBufferTruck::new, getPreallocatedEventCount(), Threads.getNamedThreadFactory(hostingThreadName + ".append"), ProducerType.MULTI, new BlockingWaitStrategy()); // Advance the ring buffer sequence so that it starts from 1 instead of 0, @@ -489,7 +489,7 @@ public class FSHLog extends AbstractFSWAL<Writer> { // the meta table when succesful (i.e. sync), closing handlers -- etc. These are usually // much fewer in number than the user-space handlers so Q-size should be user handlers plus // some space for these other handlers. Lets multiply by 3 for good-measure. - this.syncFutures = new LinkedBlockingQueue<SyncFuture>(maxHandlersCount * 3); + this.syncFutures = new LinkedBlockingQueue<>(maxHandlersCount * 3); } void offer(final long sequence, final SyncFuture[] syncFutures, final int syncFutureCount) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index d10220d..f445059 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -88,7 +88,7 @@ public class ProtobufLogReader extends ReaderBase { // maximum size of the wal Trailer in bytes. If a user writes/reads a trailer with size larger // than this size, it is written/read respectively, with a WARN message in the log. protected int trailerWarnSize; - private static List<String> writerClsNames = new ArrayList<String>(); + private static List<String> writerClsNames = new ArrayList<>(); static { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(AsyncProtobufLogWriter.class.getSimpleName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index 62bc96e..f9ebed7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -44,7 +44,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { private static final Log LOG = LogFactory.getLog(SecureProtobufLogReader.class); private Decryptor decryptor = null; - private static List<String> writerClsNames = new ArrayList<String>(); + private static List<String> writerClsNames = new ArrayList<>(); static { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(SecureProtobufLogWriter.class.getSimpleName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 8226b82..cd73eb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -154,7 +154,7 @@ class SequenceIdAccounting { */ Map<byte[], Long> resetHighest() { Map<byte[], Long> old = this.highestSequenceIds; - this.highestSequenceIds = new HashMap<byte[], Long>(); + this.highestSequenceIds = new HashMap<>(); return old; } @@ -422,7 +422,7 @@ class SequenceIdAccounting { long lowest = getLowestSequenceId(m); if (lowest != HConstants.NO_SEQNUM && lowest <= e.getValue()) { if (toFlush == null) { - toFlush = new ArrayList<byte[]>(); + toFlush = new ArrayList<>(); } toFlush.add(e.getKey()); } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index f79fa01..7a8b3d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -130,7 +130,7 @@ public class WALEdit implements Writable, HeapSize { public WALEdit(int cellCount, boolean isReplay) { this.isReplay = isReplay; - cells = new ArrayList<Cell>(cellCount); + cells = new ArrayList<>(cellCount); } /** @@ -222,7 +222,7 @@ public class WALEdit implements Writable, HeapSize { int numFamilies = in.readInt(); if (numFamilies > 0) { if (scopes == null) { - scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); } for (int i = 0; i < numFamilies; i++) { byte[] fam = Bytes.readByteArray(in); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 4dee9f1..f451207 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -111,7 +111,7 @@ public class WALEditsReplaySink { if (entriesByRegion.containsKey(loc.getRegionInfo())) { regionEntries = entriesByRegion.get(loc.getRegionInfo()); } else { - regionEntries = new ArrayList<Entry>(); + regionEntries = new ArrayList<>(); entriesByRegion.put(loc.getRegionInfo(), regionEntries); } regionEntries.add(entry); @@ -160,7 +160,7 @@ public class WALEditsReplaySink { try { RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(conf, null); ReplayServerCallable<ReplicateWALEntryResponse> callable = - new ReplayServerCallable<ReplicateWALEntryResponse>(this.conn, this.rpcControllerFactory, + new ReplayServerCallable<>(this.conn, this.rpcControllerFactory, this.tableName, regionLoc, entries); factory.<ReplicateWALEntryResponse> newCaller().callWithRetries(callable, this.replayTimeout); } catch (IOException ie) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index 1045c1d..86fc1fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -52,7 +52,7 @@ public class BulkLoadCellFilter { } List<StoreDescriptor> storesList = bld.getStoresList(); // Copy the StoreDescriptor list and update it as storesList is a unmodifiableList - List<StoreDescriptor> copiedStoresList = new ArrayList<StoreDescriptor>(storesList); + List<StoreDescriptor> copiedStoresList = new ArrayList<>(storesList); Iterator<StoreDescriptor> copiedStoresListIterator = copiedStoresList.iterator(); boolean anyStoreRemoved = false; while (copiedStoresListIterator.hasNext()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java index 1d67faa..f858e5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java @@ -43,7 +43,7 @@ public class ChainWALEntryFilter implements WALEntryFilter { } public ChainWALEntryFilter(List<WALEntryFilter> filters) { - ArrayList<WALEntryFilter> rawFilters = new ArrayList<WALEntryFilter>(filters.size()); + ArrayList<WALEntryFilter> rawFilters = new ArrayList<>(filters.size()); // flatten the chains for (WALEntryFilter filter : filters) { if (filter instanceof ChainWALEntryFilter) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 1a603e0..23df804 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -52,7 +52,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private ZooKeeperWatcher zkw = null; // FindBugs: MT_CORRECTNESS - private List<ServerName> regionServers = new ArrayList<ServerName>(0); + private List<ServerName> regionServers = new ArrayList<>(0); private long lastRegionServerUpdate; protected void disconnect() { @@ -151,7 +151,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint if (children == null) { return Collections.emptyList(); } - List<ServerName> addresses = new ArrayList<ServerName>(children.size()); + List<ServerName> addresses = new ArrayList<>(children.size()); for (String child : children) { addresses.add(ServerName.parseServerName(child)); } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 9a1e2bc..2bedbfd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -80,8 +80,8 @@ public class DumpReplicationQueues extends Configured implements Tool { private long numWalsNotFound; public DumpReplicationQueues() { - deadRegionServers = new ArrayList<String>(); - deletedQueues = new ArrayList<String>(); + deadRegionServers = new ArrayList<>(); + deletedQueues = new ArrayList<>(); peersQueueSize = AtomicLongMap.create(); totalSizeOfWALs = 0; numWalsNotFound = 0; @@ -162,7 +162,7 @@ public class DumpReplicationQueues extends Configured implements Tool { public int run(String[] args) throws Exception { int errCode = -1; - LinkedList<String> argv = new LinkedList<String>(); + LinkedList<String> argv = new LinkedList<>(); argv.addAll(Arrays.asList(args)); DumpOptions opts = parseOpts(argv); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index de3159f..ba12d53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -127,7 +127,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<Runnable>()); + new LinkedBlockingQueue<>()); this.exec.allowCoreThreadTimeOut(true); this.abortable = ctx.getAbortable(); @@ -190,7 +190,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi */ @Override public boolean replicate(ReplicateContext replicateContext) { - CompletionService<Integer> pool = new ExecutorCompletionService<Integer>(this.exec); + CompletionService<Integer> pool = new ExecutorCompletionService<>(this.exec); List<Entry> entries = replicateContext.getEntries(); String walGroupId = replicateContext.getWalGroupId(); int sleepMultiplier = 1; @@ -212,12 +212,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // and number of current sinks int n = Math.min(Math.min(this.maxThreads, entries.size()/100+1), numSinks); - List<List<Entry>> entryLists = new ArrayList<List<Entry>>(n); + List<List<Entry>> entryLists = new ArrayList<>(n); if (n == 1) { entryLists.add(entries); } else { for (int i=0; i<n; i++) { - entryLists.add(new ArrayList<Entry>(entries.size()/n+1)); + entryLists.add(new ArrayList<>(entries.size()/n+1)); } // now group by region for (Entry e : entries) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 35aa1fb..c091b44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -109,7 +109,7 @@ public class HFileReplicator { builder.setNameFormat("HFileReplicationCallable-%1$d"); this.exec = new ThreadPoolExecutor(maxCopyThreads, maxCopyThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<Runnable>(), builder.build()); + new LinkedBlockingQueue<>(), builder.build()); this.exec.allowCoreThreadTimeOut(true); this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, @@ -144,7 +144,7 @@ public class HFileReplicator { Table table = this.connection.getTable(tableName); // Prepare collection of queue of hfiles to be loaded(replicated) - Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>(); + Deque<LoadQueueItem> queue = new LinkedList<>(); loadHFiles.prepareHFileQueue(stagingDir, table, queue, false); if (queue.isEmpty()) { @@ -221,7 +221,7 @@ public class HFileReplicator { } private Map<String, Path> copyHFilesToStagingDir() throws IOException { - Map<String, Path> mapOfCopiedHFiles = new HashMap<String, Path>(); + Map<String, Path> mapOfCopiedHFiles = new HashMap<>(); Pair<byte[], List<String>> familyHFilePathsPair; List<String> hfilePaths; byte[] family; @@ -270,7 +270,7 @@ public class HFileReplicator { totalNoOfHFiles = hfilePaths.size(); // For each list of hfile paths for the family - List<Future<Void>> futures = new ArrayList<Future<Void>>(); + List<Future<Void>> futures = new ArrayList<>(); Callable<Void> c; Future<Void> future; int currentCopied = 0; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index a647d03..7a9ef9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -39,7 +39,7 @@ public class MetricsSource implements BaseSource { private static final Log LOG = LogFactory.getLog(MetricsSource.class); // tracks last shipped timestamp for each wal group - private Map<String, Long> lastTimeStamps = new HashMap<String, Long>(); + private Map<String, Long> lastTimeStamps = new HashMap<>(); private int lastQueueSize = 0; private long lastHFileRefsQueueSize = 0; private String id; http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index dc4fad0..3e0de45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -238,7 +238,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = - new LinkedBlockingQueue<Runnable>(maxThreads * + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor( @@ -527,8 +527,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { return; } - ArrayList<Future<ReplicateWALEntryResponse>> tasks - = new ArrayList<Future<ReplicateWALEntryResponse>>(locations.size() - 1); + ArrayList<Future<ReplicateWALEntryResponse>> tasks = new ArrayList<>(locations.size() - 1); // All passed entries should belong to one region because it is coming from the EntryBuffers // split per region. But the regions might split and merge (unlike log recovery case). @@ -543,8 +542,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { rpcControllerFactory, tableName, location, regionInfo, row, entries, sink.getSkippedEditsCounter()); Future<ReplicateWALEntryResponse> task = pool.submit( - new RetryingRpcCallable<ReplicateWALEntryResponse>(rpcRetryingCallerFactory, - callable, operationTimeout)); + new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout)); tasks.add(task); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d3f9ba2..9cc9c7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -363,7 +363,7 @@ public class Replication extends WALActionsListener.Base implements } private void buildReplicationLoad() { - List<MetricsSource> sourceMetricsList = new ArrayList<MetricsSource>(); + List<MetricsSource> sourceMetricsList = new ArrayList<>(); // get source List<ReplicationSourceInterface> sources = this.replicationManager.getSources(); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index b02b212..ef97687 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -68,8 +68,7 @@ public class ReplicationLoad { this.replicationLoadSink = rLoadSinkBuild.build(); // build the SourceLoad List - Map<String, ClusterStatusProtos.ReplicationLoadSource> replicationLoadSourceMap = - new HashMap<String, ClusterStatusProtos.ReplicationLoadSource>(); + Map<String, ClusterStatusProtos.ReplicationLoadSource> replicationLoadSourceMap = new HashMap<>(); for (MetricsSource sm : this.sourceMetricsList) { // Get the actual peer id String peerId = sm.getPeerID(); @@ -111,8 +110,7 @@ public class ReplicationLoad { replicationLoadSourceMap.put(peerId, rLoadSourceBuild.build()); } - this.replicationLoadSourceList = new ArrayList<ClusterStatusProtos.ReplicationLoadSource>( - replicationLoadSourceMap.values()); + this.replicationLoadSourceList = new ArrayList<>(replicationLoadSourceMap.values()); } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 71f9f3d..a3d6d13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -152,8 +152,7 @@ public class ReplicationSink { long totalReplicated = 0; // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per // invocation of this method per table and cluster id. - Map<TableName, Map<List<UUID>, List<Row>>> rowMap = - new TreeMap<TableName, Map<List<UUID>, List<Row>>>(); + Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<>(); // Map of table name Vs list of pair of family and list of hfile paths from its namespace Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap = null; @@ -173,7 +172,7 @@ public class ReplicationSink { // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { if (bulkLoadHFileMap == null) { - bulkLoadHFileMap = new HashMap<String, List<Pair<byte[], List<String>>>>(); + bulkLoadHFileMap = new HashMap<>(); } buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell); } else { @@ -184,7 +183,7 @@ public class ReplicationSink { CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - List<UUID> clusterIds = new ArrayList<UUID>(entry.getKey().getClusterIdsList().size()); + List<UUID> clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size()); for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { clusterIds.add(toUUID(clusterId)); } @@ -275,20 +274,18 @@ public class ReplicationSink { private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS, List<Pair<byte[], List<String>>> familyHFilePathsList) { - List<String> hfilePaths = new ArrayList<String>(1); + List<String> hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); - familyHFilePathsList.add(new Pair<byte[], List<String>>(family, hfilePaths)); + familyHFilePathsList.add(new Pair<>(family, hfilePaths)); } private void addNewTableEntryInMap( final Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap, byte[] family, String pathToHfileFromNS, String tableName) { - List<String> hfilePaths = new ArrayList<String>(1); + List<String> hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); - Pair<byte[], List<String>> newFamilyHFilePathsPair = - new Pair<byte[], List<String>>(family, hfilePaths); - List<Pair<byte[], List<String>>> newFamilyHFilePathsList = - new ArrayList<Pair<byte[], List<String>>>(); + Pair<byte[], List<String>> newFamilyHFilePathsPair = new Pair<>(family, hfilePaths); + List<Pair<byte[], List<String>>> newFamilyHFilePathsList = new ArrayList<>(); newFamilyHFilePathsList.add(newFamilyHFilePathsPair); bulkLoadHFileMap.put(tableName, newFamilyHFilePathsList); } @@ -327,12 +324,12 @@ public class ReplicationSink { private <K1, K2, V> List<V> addToHashMultiMap(Map<K1, Map<K2,List<V>>> map, K1 key1, K2 key2, V value) { Map<K2,List<V>> innerMap = map.get(key1); if (innerMap == null) { - innerMap = new HashMap<K2, List<V>>(); + innerMap = new HashMap<>(); map.put(key1, innerMap); } List<V> values = innerMap.get(key2); if (values == null) { - values = new ArrayList<V>(); + values = new ArrayList<>(); innerMap.put(key2, values); } values.add(value); http://git-wip-us.apache.org/repos/asf/hbase/blob/b53f3547/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index d3f6d35..72da9bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -94,8 +94,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private static final Log LOG = LogFactory.getLog(ReplicationSource.class); // Queues of logs to process, entry in format of walGroupId->queue, // each presents a queue for one wal group - private Map<String, PriorityBlockingQueue<Path>> queues = - new HashMap<String, PriorityBlockingQueue<Path>>(); + private Map<String, PriorityBlockingQueue<Path>> queues = new HashMap<>(); // per group queue size, keep no more than this number of logs in each wal group private int queueSizePerGroup; private ReplicationQueues replicationQueues; @@ -140,8 +139,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private ReplicationThrottler throttler; private long defaultBandwidth; private long currentBandwidth; - private ConcurrentHashMap<String, ReplicationSourceShipperThread> workerThreads = - new ConcurrentHashMap<String, ReplicationSourceShipperThread>(); + private ConcurrentHashMap<String, ReplicationSourceShipperThread> workerThreads = new ConcurrentHashMap<>(); private AtomicLong totalBufferUsed; @@ -209,7 +207,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log.getName()); PriorityBlockingQueue<Path> queue = queues.get(logPrefix); if (queue == null) { - queue = new PriorityBlockingQueue<Path>(queueSizePerGroup, new LogsComparator()); + queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); queues.put(logPrefix, queue); if (this.sourceRunning) { // new wal group observed after source startup, start a new worker thread to track it
