http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
new file mode 100644
index 0000000..5425daa
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import javax.security.auth.callback.CallbackHandler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client;
+import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse;
+import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.sentry.hdfs.ServiceConstants.ClientConfig;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
+import org.apache.thrift.protocol.TBinaryProtocol;
+//import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TMultiplexedProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+public class SentryHDFSServiceClient {
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SentryHDFSServiceClient.class);
+
+  public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService";
+
+  public static class SentryAuthzUpdate {
+
+    private final List<PermissionsUpdate> permUpdates;
+    private final List<PathsUpdate> pathUpdates;
+
+    public SentryAuthzUpdate(List<PermissionsUpdate> permUpdates, 
List<PathsUpdate> pathUpdates) {
+      this.permUpdates = permUpdates;
+      this.pathUpdates = pathUpdates;
+    }
+
+    public List<PermissionsUpdate> getPermUpdates() {
+      return permUpdates;
+    }
+
+    public List<PathsUpdate> getPathUpdates() {
+      return pathUpdates;
+    }
+  }
+  
+  /**
+   * This transport wraps the Sasl transports to set up the right UGI context 
for open().
+   */
+  public static class UgiSaslClientTransport extends TSaslClientTransport {
+    protected UserGroupInformation ugi = null;
+
+    public UgiSaslClientTransport(String mechanism, String authorizationId,
+        String protocol, String serverName, Map<String, String> props,
+        CallbackHandler cbh, TTransport transport, boolean wrapUgi)
+        throws IOException {
+      super(mechanism, authorizationId, protocol, serverName, props, cbh,
+          transport);
+      if (wrapUgi) {
+        ugi = UserGroupInformation.getLoginUser();
+      }
+    }
+
+    // open the SASL transport with using the current UserGroupInformation
+    // This is needed to get the current login context stored
+    @Override
+    public void open() throws TTransportException {
+      if (ugi == null) {
+        baseOpen();
+      } else {
+        try {
+          ugi.doAs(new PrivilegedExceptionAction<Void>() {
+            public Void run() throws TTransportException {
+              baseOpen();
+              return null;
+            }
+          });
+        } catch (IOException e) {
+          throw new TTransportException("Failed to open SASL transport", e);
+        } catch (InterruptedException e) {
+          throw new TTransportException(
+              "Interrupted while opening underlying transport", e);
+        }
+      }
+    }
+
+    private void baseOpen() throws TTransportException {
+      super.open();
+    }
+  }
+
+  private final Configuration conf;
+  private final InetSocketAddress serverAddress;
+  private final int connectionTimeout;
+  private boolean kerberos;
+  private TTransport transport;
+
+  private String[] serverPrincipalParts;
+  private Client client;
+  
+  public SentryHDFSServiceClient(Configuration conf) throws IOException {
+    this.conf = conf;
+    Preconditions.checkNotNull(this.conf, "Configuration object cannot be 
null");
+    this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull(
+                           conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config 
key "
+                           + ClientConfig.SERVER_RPC_ADDRESS + " is 
required"), conf.getInt(
+                           ClientConfig.SERVER_RPC_PORT, 
ClientConfig.SERVER_RPC_PORT_DEFAULT));
+    this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT,
+                                         
ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT);
+    kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase(
+        conf.get(ClientConfig.SECURITY_MODE, 
ClientConfig.SECURITY_MODE_KERBEROS).trim());
+    transport = new TSocket(serverAddress.getHostName(),
+        serverAddress.getPort(), connectionTimeout);
+    if (kerberos) {
+      String serverPrincipal = Preconditions.checkNotNull(
+          conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is 
required");
+
+      // Resolve server host in the same way as we are doing on server side
+      serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, 
serverAddress.getAddress());
+      LOGGER.info("Using server kerberos principal: " + serverPrincipal);
+
+      serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal);
+      Preconditions.checkArgument(serverPrincipalParts.length == 3,
+           "Kerberos principal should have 3 parts: " + serverPrincipal);
+      boolean wrapUgi = "true".equalsIgnoreCase(conf
+          .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true"));
+      transport = new 
UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(),
+          null, serverPrincipalParts[0], serverPrincipalParts[1],
+          ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi);
+    } else {
+      serverPrincipalParts = null;
+    }
+    try {
+      transport.open();
+    } catch (TTransportException e) {
+      throw new IOException("Transport exception while opening transport: " + 
e.getMessage(), e);
+    }
+    LOGGER.info("Successfully opened transport: " + transport + " to " + 
serverAddress);
+    TProtocol tProtocol = new TBinaryProtocol(transport);
+//    if (conf.getBoolean(ClientConfig.USE_COMPACT_TRANSPORT,
+//        ClientConfig.USE_COMPACT_TRANSPORT_DEFAULT)) {
+//      tProtocol = new TCompactProtocol(transport);
+//    } else {
+//      tProtocol = new TBinaryProtocol(transport);
+//    }
+    TMultiplexedProtocol protocol = new TMultiplexedProtocol(
+      tProtocol, SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME);
+    client = new SentryHDFSService.Client(protocol);
+    LOGGER.info("Successfully created client");
+  }
+
+  public synchronized void notifyHMSUpdate(PathsUpdate update)
+      throws IOException {
+    try {
+      client.handle_hms_notification(update.toThrift());
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+  }
+
+  public synchronized long getLastSeenHMSPathSeqNum()
+      throws IOException {
+    try {
+      return client.check_hms_seq_num(-1);
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+  }
+
+  public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, 
long pathSeqNum)
+      throws IOException {
+    SentryAuthzUpdate retVal = new SentryAuthzUpdate(new 
LinkedList<PermissionsUpdate>(), new LinkedList<PathsUpdate>());
+    try {
+      TAuthzUpdateResponse sentryUpdates = 
client.get_all_authz_updates_from(permSeqNum, pathSeqNum);
+      if (sentryUpdates.getAuthzPathUpdate() != null) {
+        for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) {
+          retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate));
+        }
+      }
+      if (sentryUpdates.getAuthzPermUpdate() != null) {
+        for (TPermissionsUpdate permsUpdate : 
sentryUpdates.getAuthzPermUpdate()) {
+          retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate));
+        }
+      }
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+    return retVal;
+  }
+
+  public void close() {
+    if (transport != null) {
+      transport.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
new file mode 100644
index 0000000..64cb943
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.sasl.Sasl;
+
+import com.google.common.collect.ImmutableMap;
+
+public class ServiceConstants {
+
+  private static final ImmutableMap<String, String> SASL_PROPERTIES;
+
+  static {
+    Map<String, String> saslProps = new HashMap<String, String>();
+    saslProps.put(Sasl.SERVER_AUTH, "true");
+    saslProps.put(Sasl.QOP, "auth-conf");
+    SASL_PROPERTIES = ImmutableMap.copyOf(saslProps);
+  }
+
+  public static class ServerConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = 
ServiceConstants.SASL_PROPERTIES;
+    /**
+     * This configuration parameter is only meant to be used for testing 
purposes.
+     */
+    public static final String SENTRY_HDFS_INTEGRATION_PATH_PREFIXES = 
"sentry.hdfs.integration.path.prefixes";
+    public static final String[] SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT 
=
+        new String[]{"/user/hive/warehouse"};
+    public static final String SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_MS = 
"sentry.hdfs.init.update.retry.delay.ms";
+    public static final int SENTRY_HDFS_INIT_UPDATE_RETRY_DELAY_DEFAULT = 
10000;
+
+  }
+  public static class ClientConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = 
ServiceConstants.SASL_PROPERTIES;
+
+    public static final String SECURITY_MODE = 
"sentry.hdfs.service.security.mode";
+    public static final String SECURITY_MODE_KERBEROS = "kerberos";
+    public static final String SECURITY_MODE_NONE = "none";
+    public static final String SECURITY_USE_UGI_TRANSPORT = 
"sentry.hdfs.service.security.use.ugi";
+    public static final String PRINCIPAL = 
"sentry.hdfs.service.server.principal";
+
+    public static final String SERVER_RPC_PORT = 
"sentry.hdfs.service.client.server.rpc-port";
+    public static final int SERVER_RPC_PORT_DEFAULT = 8038;
+
+    public static final String SERVER_RPC_ADDRESS = 
"sentry.hdfs.service.client.server.rpc-address";
+
+    public static final String SERVER_RPC_CONN_TIMEOUT = 
"sentry.hdfs.service.client.server.rpc-connection-timeout";
+    public static final int SERVER_RPC_CONN_TIMEOUT_DEFAULT = 200000;
+    public static final String USE_COMPACT_TRANSPORT = 
"sentry.hdfs.service.client.compact.transport";
+    public static final boolean USE_COMPACT_TRANSPORT_DEFAULT = false;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
new file mode 100644
index 0000000..ba932ac
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/Updateable.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.concurrent.locks.ReadWriteLock;
+
+public interface Updateable<K extends Updateable.Update> {
+
+  /**
+   * Thrift currently does not support class inheritance.We need all update
+   * objects to expose a unified API. A wrapper class need to be created 
+   * implementing this interface and containing the generated thrift class as 
+   * a work around
+   */
+  public interface Update {
+
+    boolean hasFullImage();
+    
+    long getSeqNum();
+
+    void setSeqNum(long seqNum);
+
+  }
+
+  /**
+   * Apply multiple partial updates in order
+   * @param update
+   * @param lock External Lock. 
+   * @return
+   */
+  public void updatePartial(Iterable<K> update, ReadWriteLock lock);
+
+  /**
+   * This returns a new object with the full update applied
+   * @param update
+   * @return
+   */
+  public Updateable<K> updateFull(K update);
+
+  /**
+   * Return sequence number of Last Update
+   */
+  public long getLastUpdatedSeqNum();
+
+  /**
+   * Create and Full image update of the local data structure
+   * @param currSeqNum
+   * @return
+   */
+  public K createFullImageUpdate(long currSeqNum);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
new file mode 100644
index 0000000..03b288b
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/UpdateableAuthzPaths.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class UpdateableAuthzPaths implements AuthzPaths, 
Updateable<PathsUpdate> {
+  private static final int MAX_UPDATES_PER_LOCK_USE = 99;
+  private volatile HMSPaths paths;
+  private final AtomicLong seqNum = new AtomicLong(0);
+
+  private static Logger LOG = 
LoggerFactory.getLogger(UpdateableAuthzPaths.class);
+  
+  public UpdateableAuthzPaths(String[] pathPrefixes) {
+    this.paths = new HMSPaths(pathPrefixes);
+  }
+
+  UpdateableAuthzPaths(HMSPaths paths) {
+    this.paths = paths;
+  }
+
+  @Override
+  public boolean isUnderPrefix(String[] pathElements) {
+    return paths.isUnderPrefix(pathElements);
+  }
+
+  @Override
+  public String findAuthzObject(String[] pathElements) {
+    return  paths.findAuthzObject(pathElements);
+  }
+
+  @Override
+  public String findAuthzObjectExactMatch(String[] pathElements) {
+    return  paths.findAuthzObjectExactMatch(pathElements);
+  }
+
+  @Override
+  public UpdateableAuthzPaths updateFull(PathsUpdate update) {
+    UpdateableAuthzPaths other = getPathsDump().initializeFromDump(
+        update.toThrift().getPathsDump());
+    other.seqNum.set(update.getSeqNum());
+    return other;
+  }
+
+  @Override
+  public void updatePartial(Iterable<PathsUpdate> updates, ReadWriteLock lock) 
{
+    lock.writeLock().lock();
+    try {
+      int counter = 0;
+      for (PathsUpdate update : updates) {
+        applyPartialUpdate(update);
+        if (++counter > MAX_UPDATES_PER_LOCK_USE) {
+          counter = 0;
+          lock.writeLock().unlock();
+          lock.writeLock().lock();
+        }
+        seqNum.set(update.getSeqNum());
+        LOG.debug("##### Updated paths seq Num [" + seqNum.get() + "]");
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  private void applyPartialUpdate(PathsUpdate update) {
+    // Handle alter table rename : will have exactly 2 patch changes
+    // 1 an add path and the other a del path
+    if (update.getPathChanges().size() == 2) {
+      List<TPathChanges> pathChanges = update.getPathChanges();
+      TPathChanges newPathInfo = null;
+      TPathChanges oldPathInfo = null;
+      if ((pathChanges.get(0).getAddPathsSize() == 1)
+        && (pathChanges.get(1).getDelPathsSize() == 1)) {
+        newPathInfo = pathChanges.get(0);
+        oldPathInfo = pathChanges.get(1);
+      } else if ((pathChanges.get(1).getAddPathsSize() == 1)
+          && (pathChanges.get(0).getDelPathsSize() == 1)) {
+        newPathInfo = pathChanges.get(1);
+        oldPathInfo = pathChanges.get(0);
+      }
+      if ((newPathInfo != null)&&(oldPathInfo != null)) {
+        paths.renameAuthzObject(
+            oldPathInfo.getAuthzObj(), oldPathInfo.getDelPaths().get(0),
+            newPathInfo.getAuthzObj(), newPathInfo.getAddPaths().get(0));
+        return;
+      }
+    }
+    for (TPathChanges pathChanges : update.getPathChanges()) {
+      paths.addPathsToAuthzObject(pathChanges.getAuthzObj(), pathChanges
+          .getAddPaths(), true);
+      List<List<String>> delPaths = pathChanges.getDelPaths();
+      if ((delPaths.size() == 1) && (delPaths.get(0).size() == 1)
+          && (delPaths.get(0).get(0).equals(PathsUpdate.ALL_PATHS))) {
+        // Remove all paths.. eg. drop table
+        paths.deleteAuthzObject(pathChanges.getAuthzObj());
+      } else {
+        paths.deletePathsFromAuthzObject(pathChanges.getAuthzObj(), pathChanges
+            .getDelPaths());
+      }
+    }
+  }
+
+  @Override
+  public long getLastUpdatedSeqNum() {
+    return seqNum.get();
+  }
+
+  @Override
+  public PathsUpdate createFullImageUpdate(long currSeqNum) {
+    PathsUpdate pathsUpdate = new PathsUpdate(currSeqNum, true);
+    pathsUpdate.toThrift().setPathsDump(getPathsDump().createPathsDump());
+    return pathsUpdate;
+  }
+
+  @Override
+  public AuthzPathsDumper<UpdateableAuthzPaths> getPathsDump() {
+    return new AuthzPathsDumper<UpdateableAuthzPaths>() {
+
+      @Override
+      public TPathsDump createPathsDump() {
+        return 
UpdateableAuthzPaths.this.paths.getPathsDump().createPathsDump();
+      }
+
+      @Override
+      public UpdateableAuthzPaths initializeFromDump(TPathsDump pathsDump) {
+        return new UpdateableAuthzPaths(UpdateableAuthzPaths.this.paths
+            .getPathsDump().initializeFromDump(pathsDump));
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift 
b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
new file mode 100644
index 0000000..fb60855
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
@@ -0,0 +1,87 @@
+#!/usr/local/bin/thrift -java
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#
+# Thrift Service that the MetaStore is built on
+#
+
+include "share/fb303/if/fb303.thrift"
+
+namespace java org.apache.sentry.hdfs.service.thrift
+namespace php sentry.hdfs.thrift
+namespace cpp Apache.Sentry.HDFS.Thrift
+
+struct TPathChanges {
+1: required string authzObj;
+2: required list<list<string>> addPaths;
+3: required list<list<string>> delPaths;
+}
+
+struct TPathEntry {
+1: required byte type;
+2: required string pathElement;
+3: optional string authzObj;
+4: required set<i32> children;
+}
+
+struct TPathsDump {
+1: required i32 rootId;
+2: required map<i32,TPathEntry> nodeMap;
+}
+
+struct TPathsUpdate {
+1: required bool hasFullImage;
+2: optional TPathsDump pathsDump;
+3: required i64 seqNum;
+4: required list<TPathChanges> pathChanges;
+}
+
+struct TPrivilegeChanges {
+1: required string authzObj;
+2: required map<string, string> addPrivileges;
+3: required map<string, string> delPrivileges;
+}
+
+struct TRoleChanges {
+1: required string role;
+2: required list<string> addGroups;
+3: required list<string> delGroups;
+}
+
+struct TPermissionsUpdate {
+1: required bool hasfullImage;
+2: required i64 seqNum;
+3: required map<string, TPrivilegeChanges> privilegeChanges;
+4: required map<string, TRoleChanges> roleChanges; 
+}
+
+struct TAuthzUpdateResponse {
+1: optional list<TPathsUpdate> authzPathUpdate,
+2: optional list<TPermissionsUpdate> authzPermUpdate,
+}
+
+service SentryHDFSService
+{
+  # HMS Path cache
+  void handle_hms_notification(1:TPathsUpdate pathsUpdate);
+  i64 check_hms_seq_num(1:i64 pathSeqNum);
+  TAuthzUpdateResponse get_all_authz_updates_from(1:i64 permSeqNum, 2:i64 
pathSeqNum);
+  map<string, list<string>> get_all_related_paths(1:string path, 2:bool 
exactMatch);
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
new file mode 100644
index 0000000..29868ae
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPaths.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestHMSPaths {
+
+  @Test
+  public void testGetPathElements() {
+    List<String> as2 = HMSPaths.getPathElements(new String("/a/b"));
+    List<String> as1 = HMSPaths.getPathElements(new String("/a/b"));
+    Assert.assertEquals(as1, as2);
+
+    List<String> as = HMSPaths.getPathElements(new String("/a/b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("//a/b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("/a//b"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("/a/b/"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+
+    as = HMSPaths.getPathElements(new String("//a//b//"));
+    Assert.assertEquals(Lists.newArrayList("a", "b"), as);
+  }
+
+  @Test
+  public void testEntryType() {
+    Assert.assertTrue(HMSPaths.EntryType.DIR.isRemoveIfDangling());
+    Assert.assertFalse(HMSPaths.EntryType.PREFIX.isRemoveIfDangling());
+    Assert.assertFalse(
+        HMSPaths.EntryType.AUTHZ_OBJECT.isRemoveIfDangling());
+  }
+  
+  @Test
+  public void testRootEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    root.toString();
+    Assert.assertNull(root.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.DIR, root.getType());
+    Assert.assertNull(root.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR, root.getFullPath());
+    Assert.assertTrue(root.getChildren().isEmpty());
+    root.delete();
+    try {
+      root.find(null, true);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(new String[0], true);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(null, false);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    try {
+      root.find(new String[0], false);
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+    Assert.assertNull(root.find(new String[]{"a"}, true));
+    Assert.assertNull(root.find(new String[]{"a"}, false));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("a")));
+
+    root.delete();
+  }
+
+  @Test
+  public void testRootPrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(true);
+    root.toString();
+
+    Assert.assertNull(root.find(new String[]{"a"}, true));
+    Assert.assertNull(root.find(new String[]{"a"}, false));
+    Assert.assertEquals(root, root.findPrefixEntry(Lists.newArrayList("a")));
+    Assert.assertEquals(root, root.findPrefixEntry(Lists.newArrayList("a", 
"b")));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+  }
+
+  @Test
+  public void testImmediatePrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry entry = root.createPrefix(Lists.newArrayList("a"));
+    entry.toString();
+    
+    Assert.assertEquals(1, root.getChildren().size());
+
+    Assert.assertEquals(root, entry.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType());
+    Assert.assertEquals("a", entry.getPathElement());
+    Assert.assertNull(entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a", entry.getFullPath());
+    Assert.assertTrue(entry.getChildren().isEmpty());
+
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a")));
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a", 
"b")));
+
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+
+    Assert.assertNull(root.find(new String[]{"b"}, false));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("b")));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    entry.delete();
+    Assert.assertTrue(root.getChildren().isEmpty());
+  }
+
+  @Test
+  public void testFurtherPrefixEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry entry = root.createPrefix(Lists.newArrayList("a", "b"));
+    entry.toString();
+
+    Assert.assertEquals(1, root.getChildren().size());
+
+    Assert.assertEquals(root, entry.getParent().getParent());
+    Assert.assertEquals(HMSPaths.EntryType.PREFIX, entry.getType());
+    Assert.assertEquals(HMSPaths.EntryType.DIR, 
+        entry.getParent().getType());
+    Assert.assertEquals("b", entry.getPathElement());
+    Assert.assertEquals("a", entry.getParent().getPathElement());
+    Assert.assertNull(entry.getAuthzObj());
+    Assert.assertNull(entry.getParent().getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b", 
+        entry.getFullPath());
+    Assert.assertEquals(Path.SEPARATOR + "a", entry.getParent().getFullPath());
+    Assert.assertTrue(entry.getChildren().isEmpty());
+    Assert.assertEquals(1, entry.getParent().getChildren().size());
+
+    Assert.assertEquals(entry, root.findPrefixEntry(Lists.newArrayList("a", 
"b")));
+    Assert.assertNull(root.findPrefixEntry(Lists.newArrayList("a")));
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "c"}, false));
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    entry.delete();
+    Assert.assertTrue(root.getChildren().isEmpty());
+  }
+
+  @Test
+  public void testImmediateAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry entry = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "p1"), "A");
+    Assert.assertEquals(prefix, entry.getParent());
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" +
+        Path.SEPARATOR + "p1", entry.getFullPath());
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "p1", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1"}, true));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1"}, false));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "p1", "c"}, 
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "p1", "c"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "p1")));
+
+    root.find(new String[]{"a", "b", "p1"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "p1")));
+
+  }
+
+  @Test
+  public void testFurtherAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry entry = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1"), "A");
+    Assert.assertEquals(prefix, entry.getParent().getParent());
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+    Assert.assertEquals(Path.SEPARATOR + "a" + Path.SEPARATOR + "b" +
+        Path.SEPARATOR + "t" + Path.SEPARATOR + "p1", entry.getFullPath());
+
+    try {
+      root.createPrefix(Lists.newArrayList("a", "b", "p1", "t", "c"));
+      Assert.fail();
+    } catch (IllegalArgumentException ex) {
+      //NOP
+    }
+
+    HMSPaths.Entry ep2 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1", "p2"), "A");
+
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, entry.getType());
+    Assert.assertEquals("p1", entry.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+
+    Assert.assertEquals(HMSPaths.EntryType.AUTHZ_OBJECT, ep2.getType());
+    Assert.assertEquals("p2", ep2.getPathElement());
+    Assert.assertEquals("A", entry.getAuthzObj());
+
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1"},
+        true));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1"},
+        false));
+    Assert.assertEquals(entry, root.find(new String[]{"a", "b", "t", "p1", 
"c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "c"}, 
false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2"},
+        true));
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2"},
+        false));
+    Assert.assertEquals(ep2, root.find(new String[]{"a", "b", "t", "p1", "p2", 
"c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "p2", "c"}, 
false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1", "p2")));
+
+    root.find(new String[]{"a", "b", "t", "p1"}, false).delete();
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"},
+        true));
+    Assert.assertEquals(HMSPaths.EntryType.DIR, entry.getType());
+    Assert.assertNull(entry.getAuthzObj());
+
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertNotNull(root.find(new String[]{"a", "b", "t", "p1", "p2"}, 
false));
+    root.find(new String[]{"a", "b", "t", "p1", "p2"}, false).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b"}, false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+  }
+
+  @Test
+  public void testMultipleAuthzEntry() {
+    HMSPaths.Entry root = HMSPaths.Entry.createRoot(false);
+    HMSPaths.Entry prefix = root.createPrefix(Lists.newArrayList("a", "b"));
+
+    HMSPaths.Entry e1 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p1"), "A");
+    HMSPaths.Entry e2 = root.createAuthzObjPath(
+        Lists.newArrayList("a", "b", "t", "p2"), "A");
+
+
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1"}, 
true));
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1"}, 
+        false));
+    Assert.assertEquals(e1, root.find(new String[]{"a", "b", "t", "p1", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1", "c"}, 
false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p1")));
+
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2"}, 
true));
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2"}, 
+        false));
+    Assert.assertEquals(e2, root.find(new String[]{"a", "b", "t", "p2", "c"},
+        true));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p2", "c"}, 
false));
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p2")));
+
+    root.find(new String[]{"a", "b", "t", "p1"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p1"}, false));
+
+    root.find(new String[]{"a", "b", "t", "p2"}, true).delete();
+    Assert.assertNull(root.find(new String[]{"a", "b", "t", "p2"}, false));
+    Assert.assertNull(root.find(new String[]{"a", "b", "t"}, false));
+
+    Assert.assertEquals(prefix, root.findPrefixEntry(
+        Lists.newArrayList("a", "b", "t", "p3")));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
new file mode 100644
index 0000000..2dfe73c
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestHMSPathsFullDump.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import junit.framework.Assert;
+
+import org.apache.sentry.hdfs.service.thrift.TPathsDump;
+import org.apache.thrift.TDeserializer;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocolFactory;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestHMSPathsFullDump {
+  
+  private static boolean useCompact = true;
+
+  @Test
+  public void testDumpAndInitialize() {
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/user/hive/warehouse", 
"/user/hive/w2"});
+    hmsPaths._addAuthzObject("db1", 
Lists.newArrayList("/user/hive/warehouse/db1"));
+    hmsPaths._addAuthzObject("db1.tbl11", 
Lists.newArrayList("/user/hive/warehouse/db1/tbl11"));
+    hmsPaths._addPathsToAuthzObject("db1.tbl11", Lists.newArrayList(
+        "/user/hive/warehouse/db1/tbl11/part111",
+        "/user/hive/warehouse/db1/tbl11/part112",
+        "/user/hive/warehouse/db1/tbl11/p1=1/p2=x"));
+
+    // Not in prefix paths
+    hmsPaths._addAuthzObject("db2", Lists.newArrayList("/user/hive/w2/db2"));
+    hmsPaths._addAuthzObject("db2.tbl21", 
Lists.newArrayList("/user/hive/w2/db2/tbl21"));
+    hmsPaths._addPathsToAuthzObject("db2.tbl21", 
Lists.newArrayList("/user/hive/w2/db2/tbl21/p1=1/p2=x"));
+
+    Assert.assertEquals("db1", hmsPaths.findAuthzObject(new String[]{"user", 
"hive", "warehouse", "db1"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false));
+
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1", "p2=x"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "p1=1"}, true));
+    Assert.assertEquals("db2.tbl21", hmsPaths.findAuthzObject(new 
String[]{"user", "hive", "w2", "db2", "tbl21", "p1=1"}, true));
+
+    HMSPathsDumper serDe = hmsPaths.getPathsDump();
+    TPathsDump pathsDump = serDe.createPathsDump();
+    HMSPaths hmsPaths2 = new HMSPaths(new String[] 
{"/user/hive/warehouse"}).getPathsDump().initializeFromDump(pathsDump);
+
+    Assert.assertEquals("db1", hmsPaths2.findAuthzObject(new String[]{"user", 
"hive", "warehouse", "db1"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "part111"}, false));
+    Assert.assertEquals("db1.tbl11", hmsPaths2.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db1", "tbl11", "part112"}, false));
+
+    // This path is not under prefix, so should not be deserialized.. 
+    Assert.assertNull(hmsPaths2.findAuthzObject(new String[]{"user", "hive", 
"w2", "db2", "tbl21", "p1=1"}, true));
+  }
+
+  @Test
+  public void testThrftSerialization() throws TException {
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/"});
+    String prefix = "/user/hive/warehouse/";
+    for (int dbNum = 0; dbNum < 10; dbNum++) {
+      String dbName = "db" + dbNum;
+      hmsPaths._addAuthzObject(dbName, Lists.newArrayList(prefix + dbName));
+      for (int tblNum = 0; tblNum < 1000; tblNum++) {
+        String tblName = "tbl" + tblNum;
+        hmsPaths._addAuthzObject(dbName + "." + tblName, 
Lists.newArrayList(prefix + dbName + "/" + tblName));
+        for (int partNum = 0; partNum < 100; partNum++) {
+          String partName = "part" + partNum;
+          hmsPaths
+              ._addPathsToAuthzObject(
+                  dbName + "." + tblName,
+                  Lists.newArrayList(prefix + dbName + "/" + tblName + "/"
+                      + partName));
+        }
+      }
+    }
+    HMSPathsDumper serDe = hmsPaths.getPathsDump();
+    long t1 = System.currentTimeMillis();
+    TPathsDump pathsDump = serDe.createPathsDump();
+    
+    TProtocolFactory protoFactory = useCompact ? new 
TCompactProtocol.Factory() : new TBinaryProtocol.Factory(); 
+    byte[] ser = new TSerializer(protoFactory).serialize(pathsDump);
+    long serTime = System.currentTimeMillis() - t1;
+    System.out.println("Serialization Time: " + serTime + ", " + ser.length);
+
+    t1 = System.currentTimeMillis();
+    TPathsDump tPathsDump = new TPathsDump();
+    new TDeserializer(protoFactory).deserialize(tPathsDump, ser);
+    HMSPaths fromDump = serDe.initializeFromDump(tPathsDump);
+    System.out.println("Deserialization Time: " + (System.currentTimeMillis() 
- t1));
+    Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db9", "tbl999"}, false));
+    Assert.assertEquals("db9.tbl999", fromDump.findAuthzObject(new 
String[]{"user", "hive", "warehouse", "db9", "tbl999", "part99"}, false));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
new file mode 100644
index 0000000..80b765a
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.sentry.hdfs.service.thrift.TPathChanges;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestUpdateableAuthzPaths {
+
+  @Test
+  public void testFullUpdate() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    assertEquals("db1", hmsPaths.findAuthzObjectExactMatch(new 
String[]{"db1"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", hmsPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part112"}));
+
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    PathsUpdate update = new PathsUpdate(1, true);
+    
update.toThrift().setPathsDump(authzPaths.getPathsDump().createPathsDump());
+
+    UpdateableAuthzPaths authzPaths2 = new UpdateableAuthzPaths(new String[] 
{"/"});
+    UpdateableAuthzPaths pre = authzPaths2.updateFull(update);
+    assertFalse(pre == authzPaths2);
+    authzPaths2 = pre;
+
+    assertEquals("db1", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db1"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part112"}));
+
+    // Ensure Full Update wipes old stuff
+    UpdateableAuthzPaths authzPaths3 = new 
UpdateableAuthzPaths(createBaseHMSPaths(2, 1));
+    update = new PathsUpdate(2, true);
+    
update.toThrift().setPathsDump(authzPaths3.getPathsDump().createPathsDump());
+    pre = authzPaths2.updateFull(update);
+    assertFalse(pre == authzPaths2);
+    authzPaths2 = pre;
+
+    assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1"}));
+    assertNull(authzPaths2.findAuthzObjectExactMatch(new String[]{"db1", 
"tbl11"}));
+
+    assertEquals("db2", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db2"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db2", "tbl21"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db2", "tbl21", "part211"}));
+    assertEquals("db2.tbl21", authzPaths2.findAuthzObjectExactMatch(new 
String[]{"db2", "tbl21", "part212"}));
+  }
+
+  @Test
+  public void testPartialUpdateAddPath() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    // Create table
+    PathsUpdate update = new PathsUpdate(2, false);
+    TPathChanges pathChange = update.newPathChange("db1.tbl12");
+    pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+    
+    // Add partition
+    update = new PathsUpdate(3, false);
+    pathChange = update.newPathChange("db1.tbl12");
+    
pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12/part121"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Ensure no change in existing Paths
+    assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part112"}));
+
+    // Verify new Paths
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl12"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl12", "part121"}));
+
+    // Rename table
+    update = new PathsUpdate(4, false);
+    
update.newPathChange("db1.xtbl11").addToAddPaths(PathsUpdate.cleanPath("file:///db1/xtbl11"));
+    
update.newPathChange("db1.tbl11").addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Verify name change
+    assertEquals("db1", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1"}));
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "xtbl11"}));
+    // Explicit set location has to be done on the partition else it will be 
associated to
+    // the old location
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part111"}));
+    assertEquals("db1.xtbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part112"}));
+    // Verify other tables are not touched
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", 
"xtbl12"}));
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", 
"xtbl12", "part121"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl12"}));
+    assertEquals("db1.tbl12", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl12", "part121"}));
+
+  }
+
+  @Test
+  public void testPartialUpdateDelPath() {
+    HMSPaths hmsPaths = createBaseHMSPaths(1, 1);
+    UpdateableAuthzPaths authzPaths = new UpdateableAuthzPaths(hmsPaths);
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11"}));
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part111"}));
+    
+    // Drop partition
+    PathsUpdate update = new PathsUpdate(2, false);
+    TPathChanges pathChange = update.newPathChange("db1.tbl11");
+    
pathChange.addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11/part111"));
+    authzPaths.updatePartial(Lists.newArrayList(update), lock);
+
+    // Verify Paths deleted
+    assertNull(authzPaths.findAuthzObjectExactMatch(new String[]{"db1", 
"tbl11", "part111"}));
+
+    // Verify rest ok
+    assertEquals("db1.tbl11", authzPaths.findAuthzObjectExactMatch(new 
String[]{"db1", "tbl11", "part112"}));
+  }
+
+  private HMSPaths createBaseHMSPaths(int dbNum, int tblNum) {
+    String db = "db" + dbNum;
+    String tbl = "tbl" + dbNum + "" + tblNum;
+    String fullTbl = db + "." + tbl;
+    String dbPath = "/" + db;
+    String tblPath = "/" + db + "/" + tbl;
+    String partPath = tblPath + "/part" + dbNum + "" + tblNum;
+    HMSPaths hmsPaths = new HMSPaths(new String[] {"/"});
+    hmsPaths._addAuthzObject(db, Lists.newArrayList(dbPath));
+    hmsPaths._addAuthzObject(fullTbl, Lists.newArrayList(tblPath));
+    hmsPaths._addPathsToAuthzObject(fullTbl, Lists.newArrayList(
+        partPath + "1", partPath + "2" ));
+    return hmsPaths;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml 
b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
new file mode 100644
index 0000000..c23a431
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <!-- dummy file that gets rewritten by testcases in target test classpath -->
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-dist/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-dist/pom.xml 
b/sentry-hdfs/sentry-hdfs-dist/pom.xml
new file mode 100644
index 0000000..4bbb212
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-dist/pom.xml
@@ -0,0 +1,79 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-dist</artifactId>
+  <name>Sentry HDFS Dist</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-provider-db</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>2.1</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <finalName>sentry-hdfs-${project.version}</finalName>
+              <artifactSet>
+                <includes>
+                  <include>org.apache.sentry:sentry-hdfs-common</include>
+                  
<include>org.apache.sentry:sentry-hdfs-namenode-plugin</include>
+                  <include>org.apache.sentry:sentry-provider-db</include>
+                </includes>
+              </artifactSet>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
new file mode 100644
index 0000000..91ad75b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/.gitignore
@@ -0,0 +1,18 @@
+*.class
+target/
+.classpath
+.project
+.settings
+.metadata
+.idea/
+*.iml
+derby.log
+datanucleus.log
+sentry-core/sentry-core-common/src/gen
+**/TempStatsStore/
+# Package Files #
+*.jar
+*.war
+*.ear
+test-output/
+maven-repo/

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
new file mode 100644
index 0000000..813c2e4
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-namenode-plugin/pom.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd"; 
xmlns="http://maven.apache.org/POM/4.0.0";
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+  <name>Sentry HDFS Namenode Plugin</name>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+      <version>1.5.0-incubating-SNAPSHOT</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
new file mode 100644
index 0000000..cf33b8b
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationConstants.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+public class SentryAuthorizationConstants {
+
+  public static final String CONFIG_FILE = "hdfs-sentry.xml";
+
+  public static final String CONFIG_PREFIX = "sentry.authorization-provider.";
+
+  public static final String HDFS_USER_KEY = CONFIG_PREFIX + "hdfs-user";
+  public static final String HDFS_USER_DEFAULT = "hive";
+
+  public static final String HDFS_GROUP_KEY = CONFIG_PREFIX + "hdfs-group";
+  public static final String HDFS_GROUP_DEFAULT = "hive";
+
+  public static final String HDFS_PERMISSION_KEY = CONFIG_PREFIX + 
+      "hdfs-permission";
+  public static final long HDFS_PERMISSION_DEFAULT = 0770;
+
+  public static final String HDFS_PATH_PREFIXES_KEY = CONFIG_PREFIX + 
+      "hdfs-path-prefixes";
+  public static final String[] HDFS_PATH_PREFIXES_DEFAULT = new String[0];
+
+  public static final String CACHE_REFRESH_INTERVAL_KEY = CONFIG_PREFIX + 
+      "cache-refresh-interval.ms";
+  public static final int CACHE_REFRESH_INTERVAL_DEFAULT = 500;
+
+  public static final String CACHE_STALE_THRESHOLD_KEY = CONFIG_PREFIX + 
+      "cache-stale-threshold.ms";
+  public static final int CACHE_STALE_THRESHOLD_DEFAULT = 60 * 1000;
+
+  public static final String CACHE_REFRESH_RETRY_WAIT_KEY = CONFIG_PREFIX +
+      "cache-refresh-retry-wait.ms";
+  public static final int CACHE_REFRESH_RETRY_WAIT_DEFAULT = 30 * 1000;
+
+  public static final String INCLUDE_HDFS_AUTHZ_AS_ACL_KEY = CONFIG_PREFIX + 
+      "include-hdfs-authz-as-acl";
+  public static final boolean INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT = false;
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
new file mode 100644
index 0000000..3081ae1
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationInfo.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.sentry.hdfs.SentryHDFSServiceClient.SentryAuthzUpdate;
+import org.apache.sentry.hdfs.Updateable.Update;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class SentryAuthorizationInfo implements Runnable {
+  private static Logger LOG =
+      LoggerFactory.getLogger(SentryAuthorizationInfo.class);
+
+  private SentryUpdater updater;
+  private volatile UpdateableAuthzPaths authzPaths;
+  private volatile UpdateableAuthzPermissions authzPermissions;
+
+  private int refreshIntervalMillisec;
+  private int staleThresholdMillisec;
+  private int retryWaitMillisec;
+  private ScheduledExecutorService executor;
+  private volatile long lastUpdate;
+  private volatile long waitUntil;
+  private volatile long lastStaleReport;
+  // We don't need a re-entrant lock.. but we do need a ReadWriteLock
+  // Unfortunately, the ReentrantReadWriteLick is the only available
+  // concrete implementation of a ReadWriteLock.
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+  @VisibleForTesting
+  SentryAuthorizationInfo() {}
+
+  public SentryAuthorizationInfo(Configuration conf) throws Exception {
+    String[] pathPrefixes = conf.getTrimmedStrings(
+        SentryAuthorizationConstants.HDFS_PATH_PREFIXES_KEY, 
+        SentryAuthorizationConstants.HDFS_PATH_PREFIXES_DEFAULT);
+    if (pathPrefixes.length == 0) {
+      LOG.warn("There are not HDFS path prefixes configured in [{}], "
+          + "Sentry authorization won't be enforced on any HDFS location",
+          SentryAuthorizationConstants.HDFS_PATH_PREFIXES_KEY);
+    } else {
+      refreshIntervalMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_KEY,
+          SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_DEFAULT);
+      staleThresholdMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_STALE_THRESHOLD_KEY,
+          SentryAuthorizationConstants.CACHE_STALE_THRESHOLD_DEFAULT);
+      retryWaitMillisec = conf.getInt(
+          SentryAuthorizationConstants.CACHE_REFRESH_RETRY_WAIT_KEY,
+          SentryAuthorizationConstants.CACHE_REFRESH_RETRY_WAIT_DEFAULT);
+
+      LOG.debug("Sentry authorization will enforced in the following HDFS " +
+          "locations: [{}]", StringUtils.arrayToString(pathPrefixes));
+      LOG.debug("Refresh interval [{}]ms, retry wait [{}], stale threshold " +
+              "[{}]ms", new Object[] 
+          {refreshIntervalMillisec, retryWaitMillisec, 
staleThresholdMillisec});
+
+      authzPaths = new UpdateableAuthzPaths(pathPrefixes);
+      authzPermissions = new UpdateableAuthzPermissions();
+      waitUntil = System.currentTimeMillis();
+      lastStaleReport = 0;
+      updater = new SentryUpdater(conf, this);
+    }
+  }
+
+  UpdateableAuthzPaths getAuthzPaths() {
+    return authzPaths;
+  }
+
+  UpdateableAuthzPermissions getAuthzPermissions() {
+    return authzPermissions;
+  }
+
+  private void update() {
+    SentryAuthzUpdate updates = updater.getUpdates();
+    UpdateableAuthzPaths newAuthzPaths = processUpdates(
+        updates.getPathUpdates(), authzPaths);
+    UpdateableAuthzPermissions newAuthzPerms = processUpdates(
+        updates.getPermUpdates(), authzPermissions);
+    // If there were any FULL updates the returned instance would be
+    // different
+    if ((newAuthzPaths != authzPaths)||(newAuthzPerms != authzPermissions)) {
+      lock.writeLock().lock();
+      try {
+        authzPaths = newAuthzPaths;
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("FULL Updated paths seq Num [" + 
authzPaths.getLastUpdatedSeqNum() + "]");
+        }
+        authzPermissions = newAuthzPerms;
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("FULL Updated perms seq Num [" + 
authzPermissions.getLastUpdatedSeqNum() + "]");
+        }
+      } finally {
+        lock.writeLock().unlock();
+      }
+    }
+
+  }
+
+  private <K extends Update, V extends Updateable<K>> V processUpdates(List<K> 
updates,
+      V updateable) {
+    // In a list of Updates, if there is a full Update, it will be the first
+    // one in the List.. all the remaining will be partial updates
+    if (updates.size() > 0) {
+      if (updates.get(0).hasFullImage()) {
+        updateable = (V)updateable.updateFull(updates.remove(0));
+      }
+      // Any more elements ?
+      if (!updates.isEmpty()) {
+        updateable.updatePartial(updates, lock);
+      }
+    }
+    return updateable;
+  }
+
+  public void run() {
+    try {
+      // In case of previous preUpdate failure, we sleep for a retry wait 
+      // interval we can do this because we are using a singledthreadedexecutor
+      // and scheduling the runs with fixed delay.
+      long currTime = System.currentTimeMillis();
+      if (waitUntil > currTime) {
+        Thread.sleep(waitUntil - currTime);
+      }
+      update();
+      // we reset lastUpdate only on successful pulling
+      lastUpdate = System.currentTimeMillis();
+      waitUntil = lastUpdate;
+    } catch (Exception ex) {
+      LOG.warn("Failed to update, will retry in [{}]ms, error: ", 
+          new Object[]{ retryWaitMillisec, ex.getMessage(), ex});
+      waitUntil = System.currentTimeMillis() + retryWaitMillisec;
+    }
+  }
+
+  public void start() {
+    if (authzPaths != null) {
+      try {
+        update();
+      } catch (Exception ex) {
+        LOG.warn("Failed to do initial update, will retry in [{}]ms, error: ",
+            new Object[]{retryWaitMillisec, ex.getMessage(), ex});
+        waitUntil = System.currentTimeMillis() + retryWaitMillisec;
+      }
+      executor = Executors.newSingleThreadScheduledExecutor(
+          new ThreadFactory() {
+            @Override
+            public Thread newThread(Runnable r) {
+              Thread t = new Thread(r, SentryAuthorizationInfo.class.getName() 
+
+                  "-refresher");
+              t.setDaemon(true);
+              return t;
+            }
+          }
+      );
+      executor.scheduleWithFixedDelay(this, refreshIntervalMillisec, 
+          refreshIntervalMillisec, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  public void stop() {
+    if (authzPaths != null) {
+      executor.shutdownNow();
+    }
+  }
+
+  public boolean isStale() {
+    long now = System.currentTimeMillis();
+    boolean stale = now - lastUpdate > staleThresholdMillisec;
+    if (stale && now - lastStaleReport > retryWaitMillisec) {
+      LOG.warn("Authorization information has been stale for [{}]s", 
+          (now - lastUpdate) / 1000);
+      lastStaleReport = now;
+    }
+    return stale;
+  }
+
+  public boolean isManaged(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      return authzPaths.isUnderPrefix(pathElements);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  public boolean doesBelongToAuthzObject(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      return authzPaths.findAuthzObject(pathElements) != null;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  public List<AclEntry> getAclEntries(String[] pathElements) {
+    lock.readLock().lock();
+    try {
+      String authzObj = authzPaths.findAuthzObject(pathElements);
+      return (authzObj != null) ? authzPermissions.getAcls(authzObj) 
+          : Collections.EMPTY_LIST;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/2e509e4b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
----------------------------------------------------------------------
diff --git 
a/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
new file mode 100644
index 0000000..7d2940c
--- /dev/null
+++ 
b/sentry-hdfs/sentry-hdfs-namenode-plugin/src/main/java/org/apache/sentry/hdfs/SentryAuthorizationProvider.java
@@ -0,0 +1,372 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permission and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.AclFeature;
+import org.apache.hadoop.hdfs.server.namenode.AuthorizationProvider;
+import org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider;
+import org.apache.hadoop.security.AccessControlException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+
+public class SentryAuthorizationProvider 
+    extends AuthorizationProvider implements Configurable {
+  
+  static class SentryAclFeature extends AclFeature {
+    public SentryAclFeature(ImmutableList<AclEntry> entries) {
+      super(entries);
+    }
+  }
+
+  private static Logger LOG = 
+      LoggerFactory.getLogger(SentryAuthorizationProvider.class);
+
+  private boolean started;
+  private Configuration conf;
+  private AuthorizationProvider defaultAuthzProvider;
+  private String user;
+  private String group;
+  private FsPermission permission;
+  private boolean originalAuthzAsAcl;
+  private SentryAuthorizationInfo authzInfo;
+
+  public SentryAuthorizationProvider() {
+    this(null);
+  }
+
+  @VisibleForTesting
+  SentryAuthorizationProvider(SentryAuthorizationInfo authzInfo) {
+    this.authzInfo = authzInfo;
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public synchronized void start() {
+    if (started) {
+      throw new IllegalStateException("Provider already started");
+    }
+    started = true;
+    try {
+      if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, 
false)) {
+        throw new RuntimeException("HDFS ACLs must be enabled");
+      }
+
+      defaultAuthzProvider = new DefaultAuthorizationProvider();
+      defaultAuthzProvider.start();
+      // Configuration is read from hdfs-sentry.xml and NN configuration, in
+      // that order of precedence.
+      Configuration conf = new Configuration(this.conf);
+      conf.addResource(SentryAuthorizationConstants.CONFIG_FILE);
+      user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY,
+          SentryAuthorizationConstants.HDFS_USER_DEFAULT);
+      group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY,
+          SentryAuthorizationConstants.HDFS_GROUP_DEFAULT);
+      permission = FsPermission.createImmutable(
+          (short) 
conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY,
+              SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT)
+      );
+      originalAuthzAsAcl = conf.getBoolean(
+          SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY,
+          SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT);
+
+      LOG.info("Starting");
+      LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " +
+          "include-hdfs-authz-as-acl[{}]", new Object[]
+          {user, group, permission, originalAuthzAsAcl});
+
+      if (authzInfo == null) {
+        authzInfo = new SentryAuthorizationInfo(conf);
+      }
+      authzInfo.start();
+    } catch (Exception ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  @Override
+  public synchronized void stop() {
+    LOG.debug("Stopping");
+    authzInfo.stop();
+    defaultAuthzProvider.stop();
+    defaultAuthzProvider = null;
+  }
+
+  @Override
+  public void setSnaphottableDirs(Map<INodeAuthorizationInfo, Integer>
+      snapshotableDirs) {
+    defaultAuthzProvider.setSnaphottableDirs(snapshotableDirs);
+  }
+
+  @Override
+  public void addSnapshottable(INodeAuthorizationInfo dir) {
+    defaultAuthzProvider.addSnapshottable(dir);
+  }
+
+  @Override
+  public void removeSnapshottable(INodeAuthorizationInfo dir) {
+    defaultAuthzProvider.removeSnapshottable(dir);
+  }
+
+  @Override
+  public void createSnapshot(INodeAuthorizationInfo dir, int snapshotId)
+      throws IOException{
+    defaultAuthzProvider.createSnapshot(dir, snapshotId);
+  }
+
+  @Override
+  public void removeSnapshot(INodeAuthorizationInfo dir, int snapshotId)
+      throws IOException {
+    defaultAuthzProvider.removeSnapshot(dir, snapshotId);
+  }
+
+  @Override
+  public void checkPermission(String user, Set<String> groups,
+      INodeAuthorizationInfo[] inodes, int snapshotId,
+      boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess,
+      FsAction access, FsAction subAccess, boolean ignoreEmptyDir)
+      throws AccessControlException, UnresolvedLinkException {
+    defaultAuthzProvider.checkPermission(user, groups, inodes, snapshotId,
+        doCheckOwner, ancestorAccess, parentAccess, access, subAccess,
+        ignoreEmptyDir);
+  }
+
+  private static final String[] EMPTY_STRING_ARRAY = new String[0];
+  
+  private String[] getPathElements(INodeAuthorizationInfo node) {
+    return getPathElements(node, 0);
+  }
+
+  private String[] getPathElements(INodeAuthorizationInfo node, int idx) {
+    String[] paths;
+    INodeAuthorizationInfo parent = node.getParent();
+    if (parent == null) {
+      paths = (idx > 0) ? new String[idx] : EMPTY_STRING_ARRAY;
+    } else {
+      paths = getPathElements(parent, idx + 1);
+      paths[paths.length - 1 - idx] = node.getLocalName();
+    }
+    return paths;
+  }
+
+  @Override
+  public void setUser(INodeAuthorizationInfo node, String user) {
+    defaultAuthzProvider.setUser(node, user);
+  }
+
+  @Override
+  public String getUser(INodeAuthorizationInfo node, int snapshotId) {
+    String user;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      user = defaultAuthzProvider.getUser(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          user = this.user;
+        } else {
+          user = defaultAuthzProvider.getUser(node, snapshotId);
+        }
+      } else {
+        user = this.user;
+      }
+    }
+    return user;
+  }
+
+  @Override
+  public void setGroup(INodeAuthorizationInfo node, String group) {
+    defaultAuthzProvider.setGroup(node, group);
+  }
+
+  @Override
+  public String getGroup(INodeAuthorizationInfo node, int snapshotId) {
+    String group;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      group = defaultAuthzProvider.getGroup(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          group = this.group;
+        } else {
+          group = defaultAuthzProvider.getGroup(node, snapshotId);
+        }
+      } else {
+        group = this.group;
+      }
+    }
+    return group;
+  }
+
+  @Override
+  public void setPermission(INodeAuthorizationInfo node,
+      FsPermission permission) {
+    defaultAuthzProvider.setPermission(node, permission);
+  }
+
+  @Override
+  public FsPermission getFsPermission(
+      INodeAuthorizationInfo node, int snapshotId) {
+    FsPermission permission;
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
+    } else {
+      if (!authzInfo.isStale()) {
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          permission = this.permission;
+        } else {
+          permission = defaultAuthzProvider.getFsPermission(node, snapshotId);
+        }
+      } else {
+        permission = this.permission;
+      }
+    }
+    return permission;
+  }
+
+  private List<AclEntry> createAclEntries(String user, String group,
+      FsPermission permission) {
+    List<AclEntry> list = new ArrayList<AclEntry>();
+    AclEntry.Builder builder = new AclEntry.Builder();
+    FsPermission fsPerm = new FsPermission(permission);
+    builder.setName(user);
+    builder.setType(AclEntryType.USER);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getUserAction());
+    list.add(builder.build());
+    builder.setName(group);
+    builder.setType(AclEntryType.GROUP);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getGroupAction());
+    list.add(builder.build());
+    builder.setName(null);
+    builder.setType(AclEntryType.OTHER);
+    builder.setScope(AclEntryScope.ACCESS);
+    builder.setPermission(fsPerm.getOtherAction());
+    list.add(builder.build());
+    return list;
+  }
+
+  @Override
+  public AclFeature getAclFeature(INodeAuthorizationInfo node, int snapshotId) 
{
+    AclFeature f = null;
+    String[] pathElements = getPathElements(node);
+    String p = Arrays.toString(pathElements);
+    boolean isManaged = false;
+    boolean isStale = false;
+    boolean hasAuthzObj = false;
+    if (!authzInfo.isManaged(pathElements)) {
+      isManaged = false;
+      f = defaultAuthzProvider.getAclFeature(node, snapshotId);
+    } else {
+      isManaged = true;
+      List<AclEntry> list = new ArrayList<AclEntry>();
+      if (originalAuthzAsAcl) {
+        String user = defaultAuthzProvider.getUser(node, snapshotId);
+        String group = defaultAuthzProvider.getGroup(node, snapshotId);
+        INodeAuthorizationInfo pNode = node.getParent();
+        while  (group == null && pNode != null) {
+          group = defaultAuthzProvider.getGroup(pNode, snapshotId);
+          pNode = pNode.getParent();
+        }
+        FsPermission perm = defaultAuthzProvider.getFsPermission(node, 
snapshotId);
+        list.addAll(createAclEntries(user, group, perm));
+      } else {
+        list.addAll(createAclEntries(this.user, this.group, this.permission));
+      }
+      if (!authzInfo.isStale()) { 
+        isStale = false;
+        if (authzInfo.doesBelongToAuthzObject(pathElements)) {
+          hasAuthzObj = true;
+          list.addAll(authzInfo.getAclEntries(pathElements));
+          f = new SentryAclFeature(ImmutableList.copyOf(list));
+        } else {
+          hasAuthzObj = false;
+          f = defaultAuthzProvider.getAclFeature(node, snapshotId);
+        }
+      } else {
+        isStale = true;
+        f = new SentryAclFeature(ImmutableList.copyOf(list));
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("### getAclEntry [" + (p == null ? "null" : p) + "] : ["
+          + "isManaged=" + isManaged
+          + ", isStale=" + isStale
+          + ", hasAuthzObj=" + hasAuthzObj
+          + ", origAuthzAsAcl=" + originalAuthzAsAcl + "]"
+          + "[" + (f == null ? "null" : f.getEntries()) + "]");
+    }
+    return f;
+  }
+
+  @Override
+  public void removeAclFeature(INodeAuthorizationInfo node) {
+    AclFeature aclFeature = node.getAclFeature(CURRENT_STATE_ID);
+    if (aclFeature.getClass() != SentryAclFeature.class) {
+      defaultAuthzProvider.removeAclFeature(node);
+    }
+  }
+
+  @Override
+  public void addAclFeature(INodeAuthorizationInfo node, AclFeature f) {
+    String[] pathElements = getPathElements(node);
+    if (!authzInfo.isManaged(pathElements)) {
+      defaultAuthzProvider.addAclFeature(node, f);
+    }
+  }
+
+//  @Override 
+//  public boolean doesAllowChanges(INodeAuthorizationInfo node) {
+//    String[] pathElements = getPathElements(node);
+//    if (!authzInfo.isManaged(pathElements)) {
+//      return defaultAuthzProvider.doesAllowChanges(node);
+//    }
+//    return !authzInfo.doesBelongToAuthzObject(getPathElements(node));
+//  }
+
+}

Reply via email to