[accumulo] 01/01: Merge branch '1.8'
This is an automated email from the ASF dual-hosted git repository. mmiller pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/accumulo.git commit 0cb58c519cb058d6a1029f2fe39f6071ca6b5ab8 Merge: 19f819d d6d8a7d Author: Mike MillerAuthorDate: Tue Jan 30 19:02:46 2018 -0500 Merge branch '1.8' Conflicts: core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java core/src/main/java/org/apache/accumulo/core/client/impl/Tables.java .../client/impl/MultiTableBatchWriterImpl.java | 76 + .../apache/accumulo/core/client/impl/TableMap.java | 101 .../apache/accumulo/core/client/impl/Tables.java | 174 - .../server/util/VerifyTabletAssignments.java | 2 +- .../org/apache/accumulo/master/tableOps/Utils.java | 2 +- .../accumulo/test/MultiTableBatchWriterIT.java | 119 +- 6 files changed, 173 insertions(+), 301 deletions(-) diff --cc core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java index aa0d469,a4a5b2f..255aa01 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java @@@ -82,37 -71,13 +71,13 @@@ public class MultiTableBatchWriterImpl } - /** -* CacheLoader which will look up the internal table ID for a given table name. -*/ - private class TableNameToIdLoader extends CacheLoader { - - @Override - public Table.ID load(String tableName) throws Exception { - Instance instance = context.getInstance(); - Table.ID tableId = Tables.getTableId(instance, tableName); - - if (Tables.getTableState(instance, tableId) == TableState.OFFLINE) - throw new TableOfflineException(instance, tableId.canonicalID()); - - return tableId; - } - - } - private TabletServerBatchWriter bw; - private ConcurrentHashMap tableWriters; + private ConcurrentHashMap tableWriters; private final ClientContext context; - private final LoadingCache nameToIdCache; public MultiTableBatchWriterImpl(ClientContext context, BatchWriterConfig config) { - this(context, config, DEFAULT_CACHE_TIME, DEFAULT_CACHE_TIME_UNIT); - } - - public MultiTableBatchWriterImpl(ClientContext context, BatchWriterConfig config, long cacheTime, TimeUnit cacheTimeUnit) { checkArgument(context != null, "context is null"); checkArgument(config != null, "config is null"); - checkArgument(cacheTimeUnit != null, "cacheTimeUnit is null"); this.context = context; this.bw = new TabletServerBatchWriter(context, config); tableWriters = new ConcurrentHashMap<>(); @@@ -156,9 -116,9 +116,9 @@@ * The name of the table which to find the ID for * @return The table ID, or null if the table name doesn't exist */ - private String getId(String tableName) throws TableNotFoundException { + private Table.ID getId(String tableName) throws TableNotFoundException { try { - return nameToIdCache.get(tableName); + return Tables.getTableId(context.inst, tableName); } catch (UncheckedExecutionException e) { Throwable cause = e.getCause(); @@@ -194,27 -140,7 +140,7 @@@ public BatchWriter getBatchWriter(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { checkArgument(tableName != null, "tableName is null"); - while (true) { - long cacheResetCount = Tables.getCacheResetCount(); - - // cacheResetCount could change after this point in time, but I think thats ok because just want to ensure this methods sees changes - // made before it was called. - - long internalResetCount = cacheLastState.get(); - - if (cacheResetCount > internalResetCount) { - if (!cacheLastState.compareAndSet(internalResetCount, cacheResetCount)) { - continue; // concurrent operation, lets not possibly move cacheLastState backwards in the case where a thread pauses for along time - } - - nameToIdCache.invalidateAll(); - break; - } - - break; - } - -String tableId = getId(tableName); +Table.ID tableId = getId(tableName); BatchWriter tbw = tableWriters.get(tableId); if (tbw == null) { diff --cc core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java index 000,3f3d90c..9f17fde mode 00,100644..100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java @@@ -1,0 -1,100 +1,101 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed
[accumulo] branch master updated (19f819d -> 0cb58c5)
This is an automated email from the ASF dual-hosted git repository. mmiller pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/accumulo.git. from 19f819d Merge branch '1.8' add 5adeb4b ACCUMULO-4778 Cache table name to id map (#364) add d6d8a7d Merge branch '1.7' into 1.8 new 0cb58c5 Merge branch '1.8' The 1 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .../client/impl/MultiTableBatchWriterImpl.java | 76 + .../apache/accumulo/core/client/impl/TableMap.java | 101 .../apache/accumulo/core/client/impl/Tables.java | 174 - .../server/util/VerifyTabletAssignments.java | 2 +- .../org/apache/accumulo/master/tableOps/Utils.java | 2 +- .../accumulo/test/MultiTableBatchWriterIT.java | 119 +- 6 files changed, 173 insertions(+), 301 deletions(-) create mode 100644 core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java -- To stop receiving notification emails like this one, please contact mmil...@apache.org.
[accumulo] 01/01: Merge branch '1.7' into 1.8
This is an automated email from the ASF dual-hosted git repository. mmiller pushed a commit to branch 1.8 in repository https://gitbox.apache.org/repos/asf/accumulo.git commit d6d8a7dbad07f5b17e572eb79d9daa9ad7ece4fa Merge: a3b8930 5adeb4b Author: Mike MillerAuthorDate: Tue Jan 30 16:24:53 2018 -0500 Merge branch '1.7' into 1.8 .../client/impl/MultiTableBatchWriterImpl.java | 79 + .../apache/accumulo/core/client/impl/TableMap.java | 100 + .../apache/accumulo/core/client/impl/Tables.java | 123 ++--- .../accumulo/test/MultiTableBatchWriterIT.java | 119 +--- 4 files changed, 162 insertions(+), 259 deletions(-) diff --cc test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java index d33b12c,000..cb5c1b2 mode 100644,00..100644 --- a/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java @@@ -1,518 -1,0 +1,407 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.accumulo.test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; - import java.util.concurrent.TimeUnit; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.MultiTableBatchWriter; +import org.apache.accumulo.core.client.MutationsRejectedException; +import org.apache.accumulo.core.client.Scanner; +import org.apache.accumulo.core.client.TableNotFoundException; - import org.apache.accumulo.core.client.TableOfflineException; +import org.apache.accumulo.core.client.admin.TableOperations; +import org.apache.accumulo.core.client.impl.ClientContext; +import org.apache.accumulo.core.client.impl.Credentials; +import org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Maps; + +public class MultiTableBatchWriterIT extends AccumuloClusterHarness { + + private Connector connector; + private MultiTableBatchWriter mtbw; + + @Override + public int defaultTimeoutSeconds() { +return 5 * 60; + } + + @Before + public void setUpArgs() throws AccumuloException, AccumuloSecurityException { +connector = getConnector(); - mtbw = getMultiTableBatchWriter(60); ++mtbw = getMultiTableBatchWriter(); + } + - public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) { ++ public MultiTableBatchWriter getMultiTableBatchWriter() { +ClientContext context = new ClientContext(connector.getInstance(), new Credentials(getAdminPrincipal(), getAdminToken()), getCluster().getClientConfig()); - return new MultiTableBatchWriterImpl(context, new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS); ++return new MultiTableBatchWriterImpl(context, new BatchWriterConfig()); + } + + @Test + public void testTableRenameDataValidation() throws Exception { + +try { + final String[] names = getUniqueNames(2); + final String table1 = names[0], table2 = names[1]; + + TableOperations tops = connector.tableOperations(); + tops.create(table1); + + BatchWriter bw1 = mtbw.getBatchWriter(table1); + + Mutation m1 = new Mutation("foo"); + m1.put("col1", "", "val1"); + + bw1.addMutation(m1); + + tops.rename(table1, table2); + tops.create(table1); + + BatchWriter bw2 = mtbw.getBatchWriter(table1); + + Mutation m2 = new Mutation("bar"); +
[accumulo] branch 1.8 updated (a3b8930 -> d6d8a7d)
This is an automated email from the ASF dual-hosted git repository. mmiller pushed a change to branch 1.8 in repository https://gitbox.apache.org/repos/asf/accumulo.git. from a3b8930 Merge branch '1.7' into 1.8 add 5adeb4b ACCUMULO-4778 Cache table name to id map (#364) new d6d8a7d Merge branch '1.7' into 1.8 The 1 revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "add" were already present in the repository and have only been added to this reference. Summary of changes: .../client/impl/MultiTableBatchWriterImpl.java | 79 + .../apache/accumulo/core/client/impl/TableMap.java | 100 + .../apache/accumulo/core/client/impl/Tables.java | 123 ++--- .../accumulo/test/MultiTableBatchWriterIT.java | 119 +--- 4 files changed, 162 insertions(+), 259 deletions(-) create mode 100644 core/src/main/java/org/apache/accumulo/core/client/impl/TableMap.java -- To stop receiving notification emails like this one, please contact mmil...@apache.org.
[accumulo] branch 1.7 updated: ACCUMULO-4778 Cache table name to id map (#364)
This is an automated email from the ASF dual-hosted git repository. mmiller pushed a commit to branch 1.7 in repository https://gitbox.apache.org/repos/asf/accumulo.git The following commit(s) were added to refs/heads/1.7 by this push: new 5adeb4b ACCUMULO-4778 Cache table name to id map (#364) 5adeb4b is described below commit 5adeb4b7ed561a0bcea1a1def17835310831662f Author: Mike MillerAuthorDate: Tue Jan 30 16:21:13 2018 -0500 ACCUMULO-4778 Cache table name to id map (#364) * Improved performance anytime tableIdMap is accessed (by the API or internally) * New class TableMap is cached per instance using Guava Cache * Added watcher on Tables ZooCache that will refresh the TableMap on any ZK table updates * Removed now obsolete internal cache from MultiTableBatchWriter --- .../client/impl/MultiTableBatchWriterImpl.java | 79 + .../apache/accumulo/core/client/impl/TableMap.java | 100 + .../apache/accumulo/core/client/impl/Tables.java | 123 ++--- .../accumulo/test/MultiTableBatchWriterIT.java | 119 +--- 4 files changed, 162 insertions(+), 259 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java index f5e1fa0..e7a6d73 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java +++ b/core/src/main/java/org/apache/accumulo/core/client/impl/MultiTableBatchWriterImpl.java @@ -19,37 +19,26 @@ package org.apache.accumulo.core.client.impl; import static com.google.common.base.Preconditions.checkArgument; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Instance; import org.apache.accumulo.core.client.MultiTableBatchWriter; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.client.TableOfflineException; import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.master.state.tables.TableState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; import com.google.common.util.concurrent.UncheckedExecutionException; public class MultiTableBatchWriterImpl implements MultiTableBatchWriter { - public static final long DEFAULT_CACHE_TIME = 200; - public static final TimeUnit DEFAULT_CACHE_TIME_UNIT = TimeUnit.MILLISECONDS; private static final Logger log = LoggerFactory.getLogger(MultiTableBatchWriterImpl.class); private AtomicBoolean closed; - private AtomicLong cacheLastState; private class TableBatchWriter implements BatchWriter { @@ -82,49 +71,17 @@ public class MultiTableBatchWriterImpl implements MultiTableBatchWriter { } - /** - * CacheLoader which will look up the internal table ID for a given table name. - */ - private class TableNameToIdLoader extends CacheLoader { - -@Override -public String load(String tableName) throws Exception { - Instance instance = context.getInstance(); - String tableId = Tables.getNameToIdMap(instance).get(tableName); - - if (tableId == null) -throw new TableNotFoundException(null, tableName, null); - - if (Tables.getTableState(instance, tableId) == TableState.OFFLINE) -throw new TableOfflineException(instance, tableId); - - return tableId; -} - - } - private TabletServerBatchWriter bw; private ConcurrentHashMap tableWriters; private final ClientContext context; - private final LoadingCache nameToIdCache; public MultiTableBatchWriterImpl(ClientContext context, BatchWriterConfig config) { -this(context, config, DEFAULT_CACHE_TIME, DEFAULT_CACHE_TIME_UNIT); - } - - public MultiTableBatchWriterImpl(ClientContext context, BatchWriterConfig config, long cacheTime, TimeUnit cacheTimeUnit) { checkArgument(context != null, "context is null"); checkArgument(config != null, "config is null"); -checkArgument(cacheTimeUnit != null, "cacheTimeUnit is null"); this.context = context; this.bw = new TabletServerBatchWriter(context, config); tableWriters = new ConcurrentHashMap<>(); this.closed = new AtomicBoolean(false); -this.cacheLastState =