This is an automated email from the ASF dual-hosted git repository.

kishoreg pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git


The following commit(s) were added to refs/heads/master by this push:
     new 0826de5  Implement GcsPinotFS (#4911)
0826de5 is described below

commit 0826de532519f0076ad4c98ba865c22bf6a8b90d
Author: Elon Azoulay <[email protected]>
AuthorDate: Fri Dec 20 09:29:25 2019 -0800

    Implement GcsPinotFS (#4911)
---
 pinot-distribution/pom.xml                         |   8 +
 pinot-gcs-filesystem/pom.xml                       |  76 ++++
 .../org/apache/pinot/filesystem/GcsPinotFS.java    | 399 +++++++++++++++++++++
 .../pinot/spi/filesystem/PinotFSFactory.java       |   1 +
 pom.xml                                            |  94 ++++-
 5 files changed, 577 insertions(+), 1 deletion(-)

diff --git a/pinot-distribution/pom.xml b/pinot-distribution/pom.xml
index 0b6596d..e3cd623 100644
--- a/pinot-distribution/pom.xml
+++ b/pinot-distribution/pom.xml
@@ -74,6 +74,14 @@
     </dependency>
     <dependency>
       <groupId>org.apache.pinot</groupId>
+      <artifactId>pinot-gcs-filesystem</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.cloud</groupId>
+      <artifactId>google-cloud-storage</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.pinot</groupId>
       <artifactId>pinot-orc</artifactId>
     </dependency>
     <dependency>
diff --git a/pinot-gcs-filesystem/pom.xml b/pinot-gcs-filesystem/pom.xml
new file mode 100644
index 0000000..e96aef3
--- /dev/null
+++ b/pinot-gcs-filesystem/pom.xml
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>pinot</artifactId>
+      <groupId>org.apache.pinot</groupId>
+      <version>0.3.0-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>pinot-gcs-filesystem</artifactId>
+  <name>Pinot GCS Filesystem</name>
+  <url>https://pinot.apache.org</url>
+  <properties>
+    <pinot.root>${basedir}/..</pinot.root>
+  </properties>
+  <dependencies>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.pinot</groupId>
+      <artifactId>pinot-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.cloud</groupId>
+      <artifactId>google-cloud-storage</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.testng</groupId>
+      <artifactId>testng</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-shade-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <shadedArtifactAttached>false</shadedArtifactAttached>
+          <createDependencyReducedPom>false</createDependencyReducedPom>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file
diff --git 
a/pinot-gcs-filesystem/src/main/java/org/apache/pinot/filesystem/GcsPinotFS.java
 
b/pinot-gcs-filesystem/src/main/java/org/apache/pinot/filesystem/GcsPinotFS.java
new file mode 100644
index 0000000..6d237ab
--- /dev/null
+++ 
b/pinot-gcs-filesystem/src/main/java/org/apache/pinot/filesystem/GcsPinotFS.java
@@ -0,0 +1,399 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.filesystem;
+
+import com.google.api.gax.paging.Page;
+import com.google.auth.oauth2.GoogleCredentials;
+import com.google.cloud.WriteChannel;
+import com.google.cloud.storage.Blob;
+import com.google.cloud.storage.Bucket;
+import com.google.cloud.storage.CopyWriter;
+import com.google.cloud.storage.Storage;
+import com.google.cloud.storage.StorageException;
+import com.google.cloud.storage.StorageOptions;
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.configuration.Configuration;
+import org.apache.pinot.spi.filesystem.PinotFS;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UncheckedIOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+import static joptsimple.internal.Strings.isNullOrEmpty;
+import static org.glassfish.jersey.internal.guava.Preconditions.checkArgument;
+
+public class GcsPinotFS  extends PinotFS {
+  public static final String PROJECT_ID = "projectId";
+  public static final String GCP_KEY = "gcpKey";
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(GcsPinotFS.class);
+  private static final String DELIMITER = "/";
+  private static final int BUFFER_SIZE = 128 * 1024;
+  private Storage storage;
+
+  @Override
+  public void init(Configuration config) {
+    LOGGER.info("Configs are: {}, {}",
+            PROJECT_ID,
+            config.getString(PROJECT_ID));
+
+    checkArgument(!isNullOrEmpty(config.getString(PROJECT_ID)));
+    checkArgument(!isNullOrEmpty(config.getString(GCP_KEY)));
+    String projectId = config.getString(PROJECT_ID);
+    String gcpKey = config.getString(GCP_KEY);
+    try {
+      storage = StorageOptions.newBuilder()
+          .setProjectId(projectId)
+          
.setCredentials(GoogleCredentials.fromStream(Files.newInputStream(Paths.get(gcpKey))))
+          .build()
+          .getService();
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  private Bucket getBucket(URI uri) {
+    return storage.get(uri.getHost());
+  }
+
+  private Blob getBlob(URI uri) throws IOException {
+    try {
+      URI base = getBase(uri);
+      String path = sanitizePath(base.relativize(uri).getPath());
+      return getBucket(uri).get(path);
+    } catch (StorageException e) {
+      throw new IOException(e);
+    }
+  }
+  private boolean isPathTerminatedByDelimiter(URI uri) {
+    return uri.getPath().endsWith(DELIMITER);
+  }
+
+  private String normalizeToDirectoryPrefix(URI uri) throws IOException {
+    requireNonNull(uri, "uri is null");
+    URI strippedUri = getBase(uri).relativize(uri);
+    if (isPathTerminatedByDelimiter(strippedUri)) {
+      return sanitizePath(strippedUri.getPath());
+    }
+    return sanitizePath(strippedUri.getPath() + DELIMITER);
+  }
+
+  private URI normalizeToDirectoryUri(URI uri) throws IOException {
+    if (isPathTerminatedByDelimiter(uri)) {
+      return uri;
+    }
+    try {
+      return new URI(uri.getScheme(), uri.getHost(), 
sanitizePath(uri.getPath() + DELIMITER), null);
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+  }
+
+  private String sanitizePath(String path) {
+    path = path.replaceAll(DELIMITER + "+", DELIMITER);
+    if (path.startsWith(DELIMITER) && !path.equals(DELIMITER)) {
+      path = path.substring(1);
+    }
+    return path;
+  }
+
+  private URI getBase(URI uri) throws IOException {
+    try {
+      return new URI(uri.getScheme(), uri.getHost(), null, null);
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+  }
+
+  private boolean existsBlob(Blob blob) {
+    return blob != null && blob.exists();
+  }
+
+  private boolean existsFile(URI uri) throws IOException {
+      Blob blob = getBlob(uri);
+      return existsBlob(blob);
+  }
+
+  /**
+   * Determines if a path is a directory that is not empty
+   * @param uri The path under the gcs bucket
+   * @return {@code true} if the path is a non-empty directory,
+   *         {@code false} otherwise
+   */
+  private boolean isEmptyDirectory(URI uri) throws IOException {
+    if (!isDirectory(uri)) {
+      return false;
+    }
+    String prefix = normalizeToDirectoryPrefix(uri);
+    boolean isEmpty = true;
+    Page<Blob> page;
+    if (prefix.equals(DELIMITER)) {
+      page = getBucket(uri).list();
+    } else {
+      page = getBucket(uri).list(Storage.BlobListOption.prefix(prefix));
+    }
+    for (Blob blob : page.iterateAll()) {
+      if (blob.getName().equals(prefix)) {
+        continue;
+      } else {
+        isEmpty = false;
+        break;
+      }
+    }
+    return isEmpty;
+  }
+
+  private boolean copyFile(URI srcUri, URI dstUri) throws IOException {
+    Blob blob = getBlob(srcUri);
+    Blob newBlob = 
getBucket(dstUri).create(sanitizePath(getBase(dstUri).relativize(dstUri).getPath()),
 new byte[0]);
+    CopyWriter copyWriter = blob.copyTo(newBlob.getBlobId());
+    copyWriter.getResult();
+    return copyWriter.isDone();
+  }
+
+  @Override
+  public boolean mkdir(URI uri) throws IOException {
+    LOGGER.info("mkdir {}", uri);
+    try {
+      requireNonNull(uri, "uri is null");
+      String path = normalizeToDirectoryPrefix(uri);
+      // Bucket root directory already exists and cannot be created
+      if (path.equals(DELIMITER)) {
+        return true;
+      }
+      Blob blob = getBucket(uri).create(normalizeToDirectoryPrefix(uri), new 
byte[0]);
+      return blob.exists();
+    } catch (Throwable t) {
+      throw new IOException(t);
+    }
+  }
+
+  @Override
+  public boolean delete(URI segmentUri, boolean forceDelete) throws 
IOException {
+    LOGGER.info("Deleting uri {} force {}", segmentUri, forceDelete);
+    try {
+      if (!exists(segmentUri)) {
+        return false;
+      }
+      if (isDirectory(segmentUri)) {
+        if (!forceDelete) {
+          checkState(isEmptyDirectory(segmentUri), "ForceDelete flag is not 
set and directory '%s' is not empty", segmentUri);
+        }
+        String prefix = normalizeToDirectoryPrefix(segmentUri);
+        Page<Blob> page;
+        if (prefix.equals(DELIMITER)) {
+          page = getBucket(segmentUri).list();
+        } else {
+          page = 
getBucket(segmentUri).list(Storage.BlobListOption.prefix(prefix));
+        }
+        boolean deleteSucceeded = true;
+        for (Blob blob : page.iterateAll()) {
+          deleteSucceeded &= blob.delete();
+        }
+        return deleteSucceeded;
+      } else {
+        Blob blob = getBlob(segmentUri);
+        return blob != null && blob.delete();
+      }
+    } catch (IOException e) {
+      throw e;
+    } catch(Throwable t) {
+      throw new IOException(t);
+    }
+  }
+
+  @Override
+  public boolean doMove(URI srcUri, URI dstUri) throws IOException {
+    if (copy(srcUri, dstUri)) {
+      return  delete(srcUri, true);
+    }
+    return false;
+  }
+
+  /**
+   * Copy srcUri to dstUri. If copy fails attempt to delete dstUri.
+   * @param srcUri URI of the original file
+   * @param dstUri URI of the final file location
+   * @return {@code true} if copy succeeded otherwise return {@code false}
+   * @throws IOException
+   */
+  @Override
+  public boolean copy(URI srcUri, URI dstUri) throws IOException {
+    LOGGER.info("Copying uri {} to uri {}", srcUri, dstUri);
+    checkState(exists(srcUri), "Source URI '%s' does not exist", srcUri);
+    if (srcUri.equals(dstUri)) {
+      return true;
+    }
+    if (!isDirectory(srcUri)) {
+      delete(dstUri, true);
+      return copyFile(srcUri, dstUri);
+    }
+    dstUri = normalizeToDirectoryUri(dstUri);
+    ImmutableList.Builder<URI> builder = ImmutableList.builder();
+    Path srcPath = Paths.get(srcUri.getPath());
+    try {
+      boolean copySucceeded = false;
+      for (String directoryEntry : listFiles(srcUri, true)) {
+        URI src = new URI(srcUri.getScheme(), srcUri.getHost(), 
directoryEntry, null);
+        String relativeSrcPath = 
srcPath.relativize(Paths.get(directoryEntry)).toString();
+        String dstPath = dstUri.resolve(relativeSrcPath).getPath();
+        URI dst = new URI(dstUri.getScheme(), dstUri.getHost(), dstPath, null);
+        copySucceeded |= copyFile(src, dst);
+      }
+      return copySucceeded;
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public boolean exists(URI fileUri) throws IOException {
+    if (isDirectory(fileUri)) {
+      return true;
+    }
+    if (isPathTerminatedByDelimiter(fileUri)) {
+      return false;
+    }
+    return existsFile(fileUri);
+  }
+
+  @Override
+  public long length(URI fileUri) throws IOException {
+    try {
+      checkState(!isPathTerminatedByDelimiter(fileUri), "URI is a directory");
+      Blob blob = getBucket(fileUri).get(fileUri.getPath());
+      checkState(existsBlob(blob), "File '%s' does not exist", fileUri);
+      return blob.getSize();
+    } catch (Throwable t) {
+      throw new IOException(t);
+    }
+  }
+
+  @Override
+  public String[] listFiles(URI fileUri, boolean recursive) throws IOException 
{
+    try {
+      ImmutableList.Builder<String> builder = ImmutableList.builder();
+      String prefix = normalizeToDirectoryPrefix(fileUri);
+      Page<Blob> page;
+      if (recursive) {
+        page = storage.list(fileUri.getHost(), 
Storage.BlobListOption.prefix(prefix));
+      } else {
+        page = storage.list(fileUri.getHost(), 
Storage.BlobListOption.prefix(prefix), 
Storage.BlobListOption.currentDirectory());
+      }
+      page.iterateAll()
+          .forEach(blob -> {
+            if (!blob.getName().equals(fileUri.getPath())) {
+              builder.add(blob.getName());
+            }
+          });
+      return builder.build().toArray(new String[0]);
+    } catch (Throwable t) {
+      throw new IOException(t);
+    }
+  }
+
+  @Override
+  public void copyToLocalFile(URI srcUri, File dstFile) throws Exception {
+    LOGGER.info("Copy {} to local {}", srcUri, dstFile.getAbsolutePath());
+    Blob blob = getBlob(srcUri);
+    checkState(existsBlob(blob), "File '%s' does not exists", srcUri);
+    blob.downloadTo(dstFile.toPath());
+  }
+
+  @Override
+  public void copyFromLocalFile(File srcFile, URI dstUri) throws Exception {
+    LOGGER.info("Copying file {} to uri {}", srcFile.getAbsolutePath(), 
dstUri);
+    Bucket bucket = getBucket(dstUri);
+    Blob blob = bucket.create(sanitizePath(dstUri.getPath()), new byte[0]);
+    WriteChannel writeChannel = blob.writer();
+    writeChannel.setChunkSize(BUFFER_SIZE);
+    ByteBuffer buffer = ByteBuffer.allocate(BUFFER_SIZE);
+    SeekableByteChannel channel = Files.newByteChannel(srcFile.toPath());
+    for (int bytesRead = channel.read(buffer); bytesRead != -1; bytesRead = 
channel.read(buffer)) {
+      buffer.flip();
+      writeChannel.write(buffer);
+      buffer.clear();
+    }
+    writeChannel.close();
+  }
+
+  @Override
+  public boolean isDirectory(URI uri) throws IOException {
+    String prefix = normalizeToDirectoryPrefix(uri);
+    if (prefix.equals(DELIMITER)) {
+      return true;
+    }
+    Blob blob = getBucket(uri).get(prefix);
+    if (blob != null) {
+      return blob.exists();
+    }
+
+    try {
+      // Return true if folder was not explicitly created but is a prefix of 
one or more files.
+      // Use lazy iterable iterateAll() and verify that the iterator has 
elements.
+      return getBucket(uri).list(Storage.BlobListOption.prefix(prefix))
+              .iterateAll()
+              .iterator()
+              .hasNext();
+    } catch (Throwable t) {
+      throw new IOException(t);
+    }
+  }
+
+  @Override
+  public long lastModified(URI uri) throws IOException {
+    return getBlob(uri).getUpdateTime();
+  }
+
+  @Override
+  public boolean touch(URI uri) throws IOException {
+    try {
+      Blob blob = getBlob(uri);
+      long updateTime = blob.getUpdateTime();
+      storage.update(blob.toBuilder().setMetadata(blob.getMetadata()).build());
+      long newUpdateTime = getBlob(uri).getUpdateTime();
+      return newUpdateTime > updateTime;
+    } catch (StorageException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public InputStream open(URI uri) throws IOException {
+    try {
+      Blob blob = getBlob(uri);
+      return Channels.newInputStream(blob.reader());
+    } catch (StorageException e) {
+      throw new IOException(e);
+    }
+  }
+}
diff --git 
a/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/PinotFSFactory.java 
b/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/PinotFSFactory.java
index 8900929..0610f74 100644
--- 
a/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/PinotFSFactory.java
+++ 
b/pinot-spi/src/main/java/org/apache/pinot/spi/filesystem/PinotFSFactory.java
@@ -80,6 +80,7 @@ public class PinotFSFactory {
     if (pinotFS == null) {
       throw new RuntimeException("Pinot file system not configured for scheme: 
" + scheme);
     }
+    LOGGER.info("PinotFS for schema {} initialized: {}", scheme, 
pinotFS.getClass());
     return pinotFS;
   }
 
diff --git a/pom.xml b/pom.xml
index 7067db9..cc3a238 100644
--- a/pom.xml
+++ b/pom.xml
@@ -55,6 +55,7 @@
     <module>pinot-azure-filesystem</module>
     <module>pinot-record-readers</module>
     <module>pinot-connectors</module>
+    <module>pinot-gcs-filesystem</module>
   </modules>
 
   <licenses>
@@ -302,6 +303,11 @@
         <version>${project.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.pinot</groupId>
+        <artifactId>pinot-gcs-filesystem</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
         <groupId>nl.jqno.equalsverifier</groupId>
         <artifactId>equalsverifier</artifactId>
         <version>1.7.2</version>
@@ -945,6 +951,12 @@
         <artifactId>commons-math3</artifactId>
         <version>3.2</version>
       </dependency>
+      <!-- hadoop-common & google-cloud-storage use gson -->
+      <dependency>
+        <groupId>com.google.code.gson</groupId>
+        <artifactId>gson</artifactId>
+        <version>2.2.4</version>
+      </dependency>
       <!-- kafka_2.10 & jmh-core use jopt-simple -->
       <dependency>
         <groupId>net.sf.jopt-simple</groupId>
@@ -961,8 +973,88 @@
         <artifactId>commons-logging</artifactId>
         <version>${commons-logging.version}</version>
       </dependency>
+      <!-- google cloud storage -->
+      <dependency>
+        <groupId>com.google.cloud</groupId>
+        <artifactId>google-cloud-storage</artifactId>
+        <version>1.101.0</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>com.google.protobuf</groupId>
+        <artifactId>protobuf-java</artifactId>
+        <version>3.10.0</version>
+      </dependency>
+      <dependency>
+        <groupId>com.google.api-client</groupId>
+        <artifactId>google-api-client</artifactId>
+        <version>1.30.4</version>
+      </dependency>
+      <dependency>
+        <groupId>com.google.api.grpc</groupId>
+        <artifactId>proto-google-common-protos</artifactId>
+        <version>1.17.0</version>
+      </dependency>
+      <dependency>
+        <groupId>com.google.http-client</groupId>
+        <artifactId>google-http-client</artifactId>
+        <version>1.32.1</version>
+      </dependency>
+      <!-- Due to dependency convergence issues with google cloud storage: 
https://github.com/googleapis/google-cloud-java/issues/4175 -->
+      <dependency>
+        <groupId>com.google.api.grpc</groupId>
+        <artifactId>proto-google-common-protos</artifactId>
+        <version>1.17.0</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>com.google.api.grpc</groupId>
+        <artifactId>proto-google-iam-v1</artifactId>
+        <version>0.13.0</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.google.api.grpc</groupId>
+            <artifactId>proto-google-common-protos</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>com.google.apis</groupId>
+        <artifactId>google-api-services-storage</artifactId>
+        <version>v1-rev20190910-1.30.3</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.api-client</groupId>
+            <artifactId>google-api-client</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>com.google.api</groupId>
+        <artifactId>gax-httpjson</artifactId>
+        <version>0.66.1</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.google.http-client</groupId>
+            <artifactId>google-http-client</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
     </dependencies>
-
   </dependencyManagement>
   <build>
     <defaultGoal>clean install</defaultGoal>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to