[accumulo] branch master updated: Remove unused methods in ServerConfigurationFactory

2020-03-12 Thread mmiller
This is an automated email from the ASF dual-hosted git repository.

mmiller pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
 new 8404d14  Remove unused methods in ServerConfigurationFactory
8404d14 is described below

commit 8404d144d63c4629edd57d9bcc42d4c5626762a3
Author: Mike Miller 
AuthorDate: Thu Mar 12 16:27:13 2020 -0400

Remove unused methods in ServerConfigurationFactory
---
 .../accumulo/server/conf/ServerConfigurationFactory.java | 12 
 1 file changed, 12 deletions(-)

diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
 
b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
index 3a0779d..f9b8743 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
@@ -55,18 +55,6 @@ public class ServerConfigurationFactory extends 
ServerConfiguration {
 }
   }
 
-  static boolean removeCachedTableConfiguration(String instanceId, TableId 
tableId) {
-synchronized (tableConfigs) {
-  return tableConfigs.get(instanceId).remove(tableId) != null;
-}
-  }
-
-  static boolean removeCachedNamespaceConfiguration(String instanceId, 
NamespaceId namespaceId) {
-synchronized (namespaceConfigs) {
-  return namespaceConfigs.get(instanceId).remove(namespaceId) != null;
-}
-  }
-
   static void clearCachedConfigurations() {
 synchronized (tableConfigs) {
   tableConfigs.clear();



[accumulo] branch master updated (77b515f -> 84d5b99)

2020-03-12 Thread kturner
This is an automated email from the ASF dual-hosted git repository.

kturner pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git.


from 77b515f  Fix and simplify TransportCachingIT (#1552)
 add 3b9dadc  Fix idempotency bug in importtable (#1555)
 new 84d5b99  Merge branch '1.9'

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../tableOps/tableImport/MoveExportedFiles.java| 46 +-
 1 file changed, 36 insertions(+), 10 deletions(-)



[accumulo] 01/01: Merge branch '1.9'

2020-03-12 Thread kturner
This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 84d5b995302c8774c6e9024612aff7495dd73727
Merge: 77b515f 3b9dadc
Author: Keith Turner 
AuthorDate: Thu Mar 12 14:17:59 2020 -0400

Merge branch '1.9'

 .../tableOps/tableImport/MoveExportedFiles.java| 46 +-
 1 file changed, 36 insertions(+), 10 deletions(-)

diff --cc 
server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/MoveExportedFiles.java
index b5bb6be,000..b324f6b
mode 100644,00..100644
--- 
a/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/MoveExportedFiles.java
+++ 
b/server/master/src/main/java/org/apache/accumulo/master/tableOps/tableImport/MoveExportedFiles.java
@@@ -1,79 -1,0 +1,105 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.accumulo.master.tableOps.tableImport;
 +
 +import java.io.IOException;
++import java.util.Arrays;
++import java.util.HashSet;
 +import java.util.Map;
++import java.util.Set;
++import java.util.function.Function;
++import java.util.stream.Collectors;
 +
 +import 
org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
 +import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
 +import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
 +import org.apache.accumulo.fate.Repo;
 +import org.apache.accumulo.master.Master;
 +import org.apache.accumulo.master.tableOps.MasterRepo;
 +import org.apache.accumulo.server.fs.VolumeManager;
 +import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.Path;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
++import com.google.common.collect.Sets;
++
 +class MoveExportedFiles extends MasterRepo {
 +  private static final Logger log = 
LoggerFactory.getLogger(MoveExportedFiles.class);
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  private ImportedTableInfo tableInfo;
 +
 +  MoveExportedFiles(ImportedTableInfo ti) {
 +this.tableInfo = ti;
 +  }
 +
 +  @Override
 +  public Repo call(long tid, Master master) throws Exception {
 +try {
 +  VolumeManager fs = master.getVolumeManager();
 +
 +  Map fileNameMappings = 
PopulateMetadataTable.readMappingFile(fs, tableInfo);
 +
-   for (String oldFileName : fileNameMappings.keySet()) {
- if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
-   throw new 
AcceptableThriftTableOperationException(tableInfo.tableId.canonical(),
-   tableInfo.tableName, TableOperation.IMPORT, 
TableOperationExceptionType.OTHER,
-   "File referenced by exported table does not exists " + 
oldFileName);
- }
++  FileStatus[] exportedFiles = fs.listStatus(new 
Path(tableInfo.exportDir));
++  FileStatus[] importedFiles = fs.listStatus(new 
Path(tableInfo.importDir));
++
++  Function fileStatusName = fstat -> 
fstat.getPath().getName();
++
++  Set importing = Arrays.stream(exportedFiles).map(fileStatusName)
++  .map(fileNameMappings::get).collect(Collectors.toSet());
++
++  Set imported =
++  
Arrays.stream(importedFiles).map(fileStatusName).collect(Collectors.toSet());
++
++  if (log.isDebugEnabled()) {
++log.debug("Files already present in imported (target) directory: {}",
++imported.stream().collect(Collectors.joining(",")));
 +  }
 +
-   FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
++  Set missingFiles = Sets.difference(new 
HashSet(fileNameMappings.values()),
++  new HashSet(Sets.union(importing, imported)));
++
++  if (!missingFiles.isEmpty()) {
++throw new 
AcceptableThriftTableOperationException(tableInfo.tableId.canonical(),
++tableInfo.tableName, TableOperation.IMPORT, 
TableOperationExceptionType.OTHER,
++"Missing source files corresponding to files "
+++ missingFiles.stream().collect(Collectors.joining(",")));
++  }
 +
-   for (FileStatus fileStatus : files) {
++  for (FileStatus fileStatus : exportedFiles) {
 +

[accumulo] branch 1.9 updated: Fix idempotency bug in importtable (#1555)

2020-03-12 Thread kturner
This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch 1.9
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/1.9 by this push:
 new 3b9dadc  Fix idempotency bug in importtable (#1555)
3b9dadc is described below

commit 3b9dadce9d62d3432fabd995892568f63816420f
Author: Arvind Shyamsundar 
AuthorDate: Thu Mar 12 11:02:45 2020 -0700

Fix idempotency bug in importtable (#1555)

* Fix idempotency bug in importtable
The previous implementation would fail when an interrupted importtable
operation was retried by FATE, as it would not find (already moved)
files in the exported directory. Additionally, this commit also removes
a chatty fs.exists() check.

* Use Set operations to streamline code
This code uses set operations to validate if there are missing files in
the exported (source) directory. In addition the usage of sets improves
perf compared to the lambda scanning the file status arrays.

* Fix formatting and use inbuilt Java joiner
---
 .../master/tableOps/MoveExportedFiles.java | 46 +-
 1 file changed, 36 insertions(+), 10 deletions(-)

diff --git 
a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
 
b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
index 667ccfc..f13e1c5 100644
--- 
a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
+++ 
b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
@@ -17,7 +17,12 @@
 package org.apache.accumulo.master.tableOps;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
 
 import 
org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
@@ -30,6 +35,8 @@ import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Sets;
+
 class MoveExportedFiles extends MasterRepo {
   private static final Logger log = 
LoggerFactory.getLogger(MoveExportedFiles.class);
 
@@ -48,21 +55,40 @@ class MoveExportedFiles extends MasterRepo {
 
   Map fileNameMappings = 
PopulateMetadataTable.readMappingFile(fs, tableInfo);
 
-  for (String oldFileName : fileNameMappings.keySet()) {
-if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
-  throw new AcceptableThriftTableOperationException(tableInfo.tableId, 
tableInfo.tableName,
-  TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-  "File referenced by exported table does not exists " + 
oldFileName);
-}
+  FileStatus[] exportedFiles = fs.listStatus(new 
Path(tableInfo.exportDir));
+  FileStatus[] importedFiles = fs.listStatus(new 
Path(tableInfo.importDir));
+
+  Function fileStatusName = fstat -> 
fstat.getPath().getName();
+
+  Set importing = Arrays.stream(exportedFiles).map(fileStatusName)
+  .map(fileNameMappings::get).collect(Collectors.toSet());
+
+  Set imported =
+  
Arrays.stream(importedFiles).map(fileStatusName).collect(Collectors.toSet());
+
+  if (log.isDebugEnabled()) {
+log.debug("Files already present in imported (target) directory: {}",
+imported.stream().collect(Collectors.joining(",")));
   }
 
-  FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
+  Set missingFiles = Sets.difference(new 
HashSet(fileNameMappings.values()),
+  new HashSet(Sets.union(importing, imported)));
+
+  if (!missingFiles.isEmpty()) {
+throw new AcceptableThriftTableOperationException(tableInfo.tableId, 
tableInfo.tableName,
+TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+"Missing source files corresponding to files "
++ missingFiles.stream().collect(Collectors.joining(",")));
+  }
 
-  for (FileStatus fileStatus : files) {
+  for (FileStatus fileStatus : exportedFiles) {
 String newName = fileNameMappings.get(fileStatus.getPath().getName());
 
-if (newName != null)
-  fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, 
newName));
+if (newName != null) {
+  Path newPath = new Path(tableInfo.importDir, newName);
+  log.debug("Renaming file {} to {}", fileStatus.getPath(), newPath);
+  fs.rename(fileStatus.getPath(), newPath);
+}
   }
 
   return new FinishImportTable(tableInfo);