SENTRY-2062: Support the new Hive 2.3.2 DbNotificationListener (Sergio Pena, reviewed by kalyan kumar kalvagadda)
Project: http://git-wip-us.apache.org/repos/asf/sentry/repo Commit: http://git-wip-us.apache.org/repos/asf/sentry/commit/2de4adff Tree: http://git-wip-us.apache.org/repos/asf/sentry/tree/2de4adff Diff: http://git-wip-us.apache.org/repos/asf/sentry/diff/2de4adff Branch: refs/heads/master Commit: 2de4adff96110ed3fa4ed13e0a2f3172448a8cc8 Parents: 782d132 Author: Sergio Pena <[email protected]> Authored: Wed Nov 22 18:00:03 2017 -0600 Committer: Sergio Pena <[email protected]> Committed: Wed Nov 22 18:00:03 2017 -0600 ---------------------------------------------------------------------- .../json/SentryJSONAddPartitionMessage.java | 18 +- .../json/SentryJSONAlterPartitionMessage.java | 38 +- .../json/SentryJSONAlterTableMessage.java | 16 +- .../json/SentryJSONCreateDatabaseMessage.java | 2 +- .../json/SentryJSONCreateTableMessage.java | 9 +- .../json/SentryJSONDropDatabaseMessage.java | 2 +- .../json/SentryJSONDropPartitionMessage.java | 20 +- .../json/SentryJSONDropTableMessage.java | 2 +- .../json/SentryJSONMessageDeserializer.java | 20 +- .../json/SentryJSONMessageFactory.java | 92 ++-- .../SentryMetastorePostEventListener.java | 431 ------------------- .../SentryMetastorePostEventListenerBase.java | 417 ------------------ ...tastorePostEventListenerNotificationLog.java | 396 ----------------- .../service/thrift/FullUpdateModifier.java | 8 +- .../service/thrift/NotificationProcessor.java | 2 +- .../sentry/service/thrift/SentryHMSClient.java | 3 +- .../TestHMSFollowerSentryStoreIntegration.java | 6 +- .../service/thrift/TestFullUpdateModifier.java | 63 ++- .../sentry/service/thrift/TestHMSFollower.java | 52 +-- .../thrift/TestNotificationProcessor.java | 37 +- .../tests/e2e/hdfs/TestHDFSIntegrationBase.java | 4 +- .../AbstractTestWithStaticConfiguration.java | 17 +- ...NotificationListenerInBuiltDeserializer.java | 52 +-- ...bNotificationListenerSentryDeserializer.java | 39 -- .../TestSentryListenerInBuiltDeserializer.java | 37 -- .../TestSentryListenerSentryDeserializer.java | 401 ----------------- 26 files changed, 267 insertions(+), 1917 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAddPartitionMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAddPartitionMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAddPartitionMessage.java index 10d7a93..baa8787 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAddPartitionMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAddPartitionMessage.java @@ -18,11 +18,14 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONAddPartitionMessage; +import java.util.Iterator; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddPartitionMessage; import org.codehaus.jackson.annotate.JsonProperty; import java.util.List; -import java.util.Map; public class SentryJSONAddPartitionMessage extends JSONAddPartitionMessage { @JsonProperty @@ -31,9 +34,14 @@ public class SentryJSONAddPartitionMessage extends JSONAddPartitionMessage { public SentryJSONAddPartitionMessage() { } - public SentryJSONAddPartitionMessage(String server, String servicePrincipal, String db, String table, - List<Map<String, String>> partitions, Long timestamp, List<String> locations) { - super(server, servicePrincipal, db, table, partitions, timestamp); + public SentryJSONAddPartitionMessage(String server, String servicePrincipal, Table tableObj, + Iterator<Partition> partitionsIterator, Iterator<PartitionFiles> partitionFileIter, Long timestamp) { + super(server, servicePrincipal, tableObj, partitionsIterator, partitionFileIter, timestamp); + } + + public SentryJSONAddPartitionMessage(String server, String servicePrincipal, Table tableObj, + Iterator<Partition> partitionsIterator, Iterator<PartitionFiles> partitionFileIter, Long timestamp, List<String> locations) { + super(server, servicePrincipal, tableObj, partitionsIterator, partitionFileIter, timestamp); this.locations = locations; } http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterPartitionMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterPartitionMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterPartitionMessage.java index 25de808..6ef0cd9 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterPartitionMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterPartitionMessage.java @@ -18,14 +18,12 @@ package org.apache.sentry.binding.metastore.messaging.json; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; -import java.util.Collections; -import org.apache.hive.hcatalog.messaging.json.JSONAlterPartitionMessage; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterPartitionMessage; import org.codehaus.jackson.annotate.JsonProperty; import java.util.List; -import java.util.Map; public class SentryJSONAlterPartitionMessage extends JSONAlterPartitionMessage { @JsonProperty @@ -36,28 +34,22 @@ public class SentryJSONAlterPartitionMessage extends JSONAlterPartitionMessage { private List<String> newValues; public SentryJSONAlterPartitionMessage() { - super("", "", "", "", ImmutableMap.<String, String>of(), null); } - public SentryJSONAlterPartitionMessage(String server, String servicePrincipal, - String db, String table, - Map<String, String> values, List<String> newValues, - Long timestamp, String oldlocation, - String newLocation) { - super(server, servicePrincipal, db, table, values, timestamp); - this.newLocation = newLocation; - this.oldLocation = oldlocation; - this.newValues = newValues; + public SentryJSONAlterPartitionMessage(String server, String servicePrincipal, Table tableObj, + Partition partitionObjBefore, Partition partitionObjAfter, Long timestamp) { + this(server, servicePrincipal, tableObj, partitionObjBefore, partitionObjAfter, timestamp, + partitionObjBefore.getSd().getLocation(), partitionObjAfter.getSd().getLocation(), + partitionObjAfter.getValues()); } - @VisibleForTesting - public SentryJSONAlterPartitionMessage(String server, String servicePrincipal, - String db, String table, - Long timestamp, String oldlocation, - String newLocation) { - this(server, servicePrincipal, db, table, - Collections.<String, String>emptyMap(), Collections.<String>emptyList(), - timestamp, oldlocation, newLocation); + public SentryJSONAlterPartitionMessage(String server, String servicePrincipal, Table tableObj, + Partition partitionObjBefore, Partition partitionObjAfter, Long timestamp, String oldLocation, + String newLocation, List<String> newValues) { + super(server, servicePrincipal, tableObj, partitionObjBefore, partitionObjAfter, timestamp); + this.newLocation = newLocation; + this.oldLocation = oldLocation; + this.newValues = newValues; } public String getNewLocation() { http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterTableMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterTableMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterTableMessage.java index 4670494..1e22bb7 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterTableMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONAlterTableMessage.java @@ -18,7 +18,8 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONAlterTableMessage; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterTableMessage; import org.codehaus.jackson.annotate.JsonProperty; public class SentryJSONAlterTableMessage extends JSONAlterTableMessage { @@ -28,13 +29,18 @@ public class SentryJSONAlterTableMessage extends JSONAlterTableMessage { private String oldLocation; public SentryJSONAlterTableMessage() { - super("", "", "", "", null); + } + + public SentryJSONAlterTableMessage(String server, String servicePrincipal, Table tableObjBefore, + Table tableObjAfter, Long timestamp) { + this(server, servicePrincipal, tableObjBefore, tableObjAfter, timestamp, + tableObjBefore.getSd().getLocation(), tableObjAfter.getSd().getLocation()); } public SentryJSONAlterTableMessage(String server, String servicePrincipal, - String db, String table, Long timestamp, - String oldLocation, String newLocation) { - super(server, servicePrincipal, db, table, timestamp); + Table tableObjBefore, Table tableObjAfter, Long timestamp, String oldLocation, + String newLocation) { + super(server, servicePrincipal, tableObjBefore, tableObjAfter, timestamp); this.newLocation = newLocation; this.oldLocation = oldLocation; } http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateDatabaseMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateDatabaseMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateDatabaseMessage.java index 8c62758..a519fd7 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateDatabaseMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateDatabaseMessage.java @@ -18,7 +18,7 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONCreateDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateDatabaseMessage; import org.codehaus.jackson.annotate.JsonProperty; public class SentryJSONCreateDatabaseMessage extends JSONCreateDatabaseMessage { http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateTableMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateTableMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateTableMessage.java index d15bc48..61d9481 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateTableMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONCreateTableMessage.java @@ -18,7 +18,9 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONCreateTableMessage; +import java.util.Iterator; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateTableMessage; import org.codehaus.jackson.annotate.JsonProperty; public class SentryJSONCreateTableMessage extends JSONCreateTableMessage { @@ -33,6 +35,11 @@ public class SentryJSONCreateTableMessage extends JSONCreateTableMessage { this.location = location; } + public SentryJSONCreateTableMessage(String server, String servicePrincipal, Table tableObj, Iterator<String> fileIter, Long timestamp) { + super(server, servicePrincipal, tableObj, fileIter, timestamp); + this.location = tableObj.getSd().getLocation(); + } + public String getLocation() { return location; } http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropDatabaseMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropDatabaseMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropDatabaseMessage.java index c8e7c75..d1d4b8c 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropDatabaseMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropDatabaseMessage.java @@ -18,7 +18,7 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONDropDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropDatabaseMessage; import org.codehaus.jackson.annotate.JsonProperty; public class SentryJSONDropDatabaseMessage extends JSONDropDatabaseMessage { http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropPartitionMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropPartitionMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropPartitionMessage.java index d5f899c..25c1123 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropPartitionMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropPartitionMessage.java @@ -18,7 +18,8 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONDropPartitionMessage; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropPartitionMessage; import org.codehaus.jackson.annotate.JsonProperty; import java.util.List; @@ -29,13 +30,22 @@ public class SentryJSONDropPartitionMessage extends JSONDropPartitionMessage { private List<String> locations; public SentryJSONDropPartitionMessage() { + super(); } - public SentryJSONDropPartitionMessage(String server, String servicePrincipal, - String db, String table, - List<Map<String, String>> partitions, - Long timestamp, List<String> locations) { + public SentryJSONDropPartitionMessage(String server, String servicePrincipal, String db, String table, + List<Map<String, String>> partitions, Long timestamp) { super(server, servicePrincipal, db, table, partitions, timestamp); + } + + public SentryJSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj, + List<Map<String, String>> partitionKeyValues, long timestamp) { + super(server, servicePrincipal, tableObj, partitionKeyValues, timestamp); + } + + public SentryJSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj, + List<Map<String, String>> partitionKeyValues, long timestamp, List<String> locations) { + super(server, servicePrincipal, tableObj, partitionKeyValues, timestamp); this.locations = locations; } http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropTableMessage.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropTableMessage.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropTableMessage.java index e67f562..905aa28 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropTableMessage.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONDropTableMessage.java @@ -18,7 +18,7 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.json.JSONDropTableMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropTableMessage; import org.codehaus.jackson.annotate.JsonProperty; http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageDeserializer.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageDeserializer.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageDeserializer.java index 929ac8c..09f1634 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageDeserializer.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageDeserializer.java @@ -18,13 +18,19 @@ package org.apache.sentry.binding.metastore.messaging.json; -import org.apache.hive.hcatalog.messaging.*; -import org.apache.hive.hcatalog.messaging.json.JSONAlterIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONCreateFunctionMessage; -import org.apache.hive.hcatalog.messaging.json.JSONCreateIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONDropFunctionMessage; -import org.apache.hive.hcatalog.messaging.json.JSONDropIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONInsertMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.InsertMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONInsertMessage; import org.codehaus.jackson.map.DeserializationConfig; import org.codehaus.jackson.map.ObjectMapper; http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageFactory.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageFactory.java b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageFactory.java index 0af02d1..0d0c73f 100644 --- a/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageFactory.java +++ b/sentry-binding/sentry-binding-hive-follower/src/main/java/org/apache/sentry/binding/metastore/messaging/json/SentryJSONMessageFactory.java @@ -26,15 +26,26 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hive.hcatalog.messaging.*; -import org.apache.hive.hcatalog.messaging.json.JSONAlterIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONCreateFunctionMessage; -import org.apache.hive.hcatalog.messaging.json.JSONCreateIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONDropFunctionMessage; -import org.apache.hive.hcatalog.messaging.json.JSONDropIndexMessage; -import org.apache.hive.hcatalog.messaging.json.JSONInsertMessage; import java.util.*; +import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; +import org.apache.hadoop.hive.metastore.messaging.InsertMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropIndexMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONInsertMessage; public class SentryJSONMessageFactory extends MessageFactory { private static final Log LOG = LogFactory.getLog(SentryJSONMessageFactory.class.getName()); @@ -72,46 +83,50 @@ public class SentryJSONMessageFactory extends MessageFactory { return "json"; } + @Override public SentryJSONCreateDatabaseMessage buildCreateDatabaseMessage(Database db) { - return new SentryJSONCreateDatabaseMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, db.getName(), + return new SentryJSONCreateDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db.getName(), now(), db.getLocationUri()); } + @Override public SentryJSONDropDatabaseMessage buildDropDatabaseMessage(Database db) { - return new SentryJSONDropDatabaseMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, db.getName(), + return new SentryJSONDropDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db.getName(), now(), db.getLocationUri()); } - public SentryJSONCreateTableMessage buildCreateTableMessage(Table table) { - return new SentryJSONCreateTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), - table.getTableName(), now(), table.getSd().getLocation()); + @Override + public SentryJSONCreateTableMessage buildCreateTableMessage(Table table, Iterator<String> fileIter) { + // fileIter is used to iterate through a full list of files that partition have. This + // may be too verbose and it is overloading the Sentry store. Sentry does not use these files + // so it is safe to ignore them. + return new SentryJSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, Collections.emptyIterator(), now()); } + @Override public SentryJSONAlterTableMessage buildAlterTableMessage(Table before, Table after) { - return new SentryJSONAlterTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(), - before.getTableName(), now(), before.getSd().getLocation(), after.getSd().getLocation()); + return new SentryJSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, now()); } - public SentryJSONDropTableMessage buildDropTableMessage(Table table) { - return new SentryJSONDropTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), + @Override + public DropTableMessage buildDropTableMessage(Table table) { + return new SentryJSONDropTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), now(), table.getSd().getLocation()); } @Override public SentryJSONAlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, Partition after) { - return new SentryJSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, - before.getDbName(), before.getTableName(), getPartitionKeyValues(table, before), - after.getValues(), now(), before.getSd().getLocation(), after.getSd().getLocation()); + return new SentryJSONAlterPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, + table, before, after, now()); } @Override public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator<Partition> partitions) { PartitionBasicInfo partitionBasicInfo = getPartitionBasicInfo(table, partitions); - return new SentryJSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, - table.getDbName(), table.getTableName(), partitionBasicInfo.getPartitionList(), - now(), partitionBasicInfo.getLocations()); + return new SentryJSONDropPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, + table, partitionBasicInfo.getPartitionList(), now(), partitionBasicInfo.getLocations()); } @Override @@ -119,7 +134,7 @@ public class SentryJSONMessageFactory extends MessageFactory { // Sentry would be not be interested in CreateFunctionMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for CreateFunctionMessage - return new JSONCreateFunctionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, function, now()); + return new JSONCreateFunctionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, function, now()); } @Override @@ -127,7 +142,7 @@ public class SentryJSONMessageFactory extends MessageFactory { // Sentry would be not be interested in DropFunctionMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for DropFunctionMessage - return new JSONDropFunctionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, function, now()); + return new JSONDropFunctionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, function, now()); } @@ -136,7 +151,7 @@ public class SentryJSONMessageFactory extends MessageFactory { // Sentry would be not be interested in CreateIndexMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for CreateIndexMessage - return new JSONCreateIndexMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, index, now()); + return new JSONCreateIndexMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, index, now()); } @Override @@ -144,7 +159,7 @@ public class SentryJSONMessageFactory extends MessageFactory { // Sentry would be not be interested in DropIndexMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for DropIndexMessage - return new JSONDropIndexMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, index, now()); + return new JSONDropIndexMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, index, now()); } @Override @@ -152,32 +167,29 @@ public class SentryJSONMessageFactory extends MessageFactory { // Sentry would be not be interested in AlterIndexMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for AlterIndexMessage - return new JSONAlterIndexMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before, after, now()); + return new JSONAlterIndexMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, now()); } @Override - public InsertMessage buildInsertMessage(String db, String table, Map<String,String> partKeyVals, - List<String> files) { + public InsertMessage buildInsertMessage(String db, String table, Map<String, String> partKeyVals, + Iterator<String> fileIter) { // Sentry would be not be interested in InsertMessage as these are generated when is data is // added inserted. This method is implemented for completeness. This is reason why, new sentry // JSON class is not defined for InsertMessage. - return new JSONInsertMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, db, table, partKeyVals, - files, now()); + return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, table, partKeyVals, + fileIter, now()); } @Override public AddPartitionMessage buildAddPartitionMessage(Table table, - Iterator<Partition> partitionsIterator) { + Iterator<Partition> partitionsIterator, Iterator<PartitionFiles> partitionFileIter) { PartitionBasicInfo partitionBasicInfo = getPartitionBasicInfo(table, partitionsIterator); - return new SentryJSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), - table.getTableName(), partitionBasicInfo.getPartitionList(), now(), - partitionBasicInfo.getLocations()); - } - - public AddPartitionMessage buildAddPartitionMessage(Table table, - List<Partition> partitions) { - return buildAddPartitionMessage (table, partitions.iterator()); + // partitionFileIter is used to iterate through a full list of files that partition have. This + // may be too verbose and it is overloading the Sentry store. Sentry does not use these files + // so it is safe to ignore them. + return new SentryJSONAddPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, + partitionsIterator, Collections.emptyIterator(), now(), partitionBasicInfo.getLocations()); } private PartitionBasicInfo getPartitionBasicInfo(Table table, Iterator<Partition> iterator) { http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java deleted file mode 100644 index 11b6b4a..0000000 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListener.java +++ /dev/null @@ -1,431 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.binding.metastore; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreEventListener; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterTableEvent; -import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.CreateTableEvent; -import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; -import org.apache.hadoop.hive.metastore.events.DropTableEvent; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.sentry.core.common.exception.SentryUserException; -import org.apache.sentry.binding.hive.conf.HiveAuthzConf; -import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; -import org.apache.sentry.core.common.Authorizable; -import org.apache.sentry.core.model.db.Database; -import org.apache.sentry.core.model.db.Server; -import org.apache.sentry.core.model.db.Table; -import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin; -import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; -import org.apache.sentry.service.thrift.SentryServiceClientFactory; -import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * SentryMetastorePostEventListener class is HMS plugin for listening to - * all DDL events and deliver those events to Sentry server. This class - * sends all DDL events to the Sentry server through thrift API. - * - * In case any actual event fails, skipping deliver the event to Sentry server. - */ -public class SentryMetastorePostEventListener extends MetaStoreEventListener { - - private static final Logger LOGGER = LoggerFactory.getLogger(SentryMetastoreListenerPlugin.class); - private final HiveAuthzConf authzConf; - private final Server server; - - private List<SentryMetastoreListenerPlugin> sentryPlugins = new ArrayList<SentryMetastoreListenerPlugin>(); - - public SentryMetastorePostEventListener(Configuration config) { - super(config); - - if (!(config instanceof HiveConf)) { - String error = "Could not initialize Plugin - Configuration is not an instanceof HiveConf"; - LOGGER.error(error); - throw new RuntimeException(error); - } - - authzConf = HiveAuthzConf.getAuthzConf((HiveConf)config); - server = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); - Iterable<String> pluginClasses = ConfUtilties.CLASS_SPLITTER - .split(config.get(ServerConfig.SENTRY_METASTORE_PLUGINS, ServerConfig.SENTRY_METASTORE_PLUGINS_DEFAULT).trim()); - - try { - for (String pluginClassStr : pluginClasses) { - Class<?> clazz = config.getClassByName(pluginClassStr); - if (!SentryMetastoreListenerPlugin.class.isAssignableFrom(clazz)) { - throw new IllegalArgumentException("Class \\" - + pluginClassStr + "\\ is not a " - + SentryMetastoreListenerPlugin.class.getName()); - } - SentryMetastoreListenerPlugin plugin = (SentryMetastoreListenerPlugin) clazz - .getConstructor(Configuration.class, Configuration.class) - .newInstance(config, authzConf); - sentryPlugins.add(plugin); - } - } catch (Exception e) { - LOGGER.error("Could not initialize HMS Plugin: SentryMetastorePostEventListener !!", e); - throw new RuntimeException(e); - } - } - - @Override - public void onCreateTable (CreateTableEvent tableEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip sync paths/privileges with Sentry server for onCreateTable event," + - " since the operation failed. \n"); - return; - } - - if (tableEvent.getTable().getSd().getLocation() != null) { - String authzObj = tableEvent.getTable().getDbName() + "." - + tableEvent.getTable().getTableName(); - String path = tableEvent.getTable().getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - - // drop the privileges on the given table, in case if anything was left - // behind during the drop - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { - return; - } - - dropSentryTablePrivilege(tableEvent.getTable().getDbName(), - tableEvent.getTable().getTableName()); - } - - @Override - public void onDropTable(DropTableEvent tableEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropTable event," + - " since the operation failed. \n"); - return; - } - - if (tableEvent.getTable().getSd().getLocation() != null) { - String authzObj = tableEvent.getTable().getDbName() + "." - + tableEvent.getTable().getTableName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.removeAllPaths(authzObj, null); - } - } - // drop the privileges on the given table - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { - return; - } - - if (!tableEvent.getStatus()) { - return; - } - - dropSentryTablePrivilege(tableEvent.getTable().getDbName(), - tableEvent.getTable().getTableName()); - } - - @Override - public void onCreateDatabase(CreateDatabaseEvent dbEvent) - throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onCreateDatabase event," + - " since the operation failed. \n"); - return; - } - - if (dbEvent.getDatabase().getLocationUri() != null) { - String authzObj = dbEvent.getDatabase().getName(); - String path = dbEvent.getDatabase().getLocationUri(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - // drop the privileges on the database, in case anything left behind during - // last drop db - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { - return; - } - - dropSentryDbPrivileges(dbEvent.getDatabase().getName()); - } - - /** - * Drop the privileges on the database. Note that child tables will be - * dropped individually by client, so we just need to handle the removing - * the db privileges. The table drop should cleanup the table privileges. - */ - @Override - public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropDatabase event," + " since the operation failed. \n"); - return; - } - - String authzObj = dbEvent.getDatabase().getName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - List<String> tNames = dbEvent.getHandler().get_all_tables(authzObj); - plugin.removeAllPaths(authzObj, tNames); - } - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { - return; - } - - dropSentryDbPrivileges(dbEvent.getDatabase().getName()); - } - - /** - * Adjust the privileges when table is renamed - */ - @Override - public void onAlterTable (AlterTableEvent tableEvent) throws MetaException { - - // don't sync privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip syncing privileges with Sentry server for onAlterTable event," + - " since the operation failed. \n"); - return; - } - - String oldLoc = null, newLoc = null; - - org.apache.hadoop.hive.metastore.api.Table oldTal = tableEvent.getOldTable(); - org.apache.hadoop.hive.metastore.api.Table newTal = tableEvent.getNewTable(); - - if(oldTal != null && oldTal.getSd() !=null) { - oldLoc = oldTal.getSd().getLocation(); - } - if (newTal != null && newTal.getSd() != null) { - newLoc = newTal.getSd().getLocation(); - } - if(oldLoc != null && newLoc != null && !oldLoc.equals(newLoc)) { - String oldDbName = tableEvent.getOldTable().getDbName(); - String oldTbName = tableEvent.getOldTable().getTableName(); - String newTbName = tableEvent.getNewTable().getTableName(); - String newDbName = tableEvent.getNewTable().getDbName(); - renameSentryTablePrivilege(oldDbName, oldTbName, oldLoc, newDbName, newTbName, newLoc); - } - } - - @Override - public void onAlterPartition(AlterPartitionEvent partitionEvent) - throws MetaException { - - // don't sync privileges if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing privileges with Sentry server for onAlterPartition event," + - " since the operation failed. \n"); - return; - } - - String oldLoc = null, newLoc = null; - if (partitionEvent.getOldPartition() != null) { - oldLoc = partitionEvent.getOldPartition().getSd().getLocation(); - } - if (partitionEvent.getNewPartition() != null) { - newLoc = partitionEvent.getNewPartition().getSd().getLocation(); - } - - if (oldLoc != null && newLoc != null && !oldLoc.equals(newLoc)) { - String authzObj = - partitionEvent.getOldPartition().getDbName() + "." - + partitionEvent.getOldPartition().getTableName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.renameAuthzObject(authzObj, oldLoc, - authzObj, newLoc); - } - } - } - - @Override - public void onAddPartition(AddPartitionEvent partitionEvent) - throws MetaException { - - // don't sync path if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing path with Sentry server for onAddPartition event," + " since the operation failed. \n"); - return; - } - - Iterator<Partition> partitionIterator = partitionEvent.getPartitionIterator(); - while (partitionIterator.hasNext()) { - Partition part = partitionIterator.next(); - if (part.getSd() != null && part.getSd().getLocation() != null) { - String authzObj = part.getDbName() + "." + part.getTableName(); - String path = part.getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - } - super.onAddPartition(partitionEvent); - } - - @Override - public void onDropPartition(DropPartitionEvent partitionEvent) - throws MetaException { - - // don't sync path if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing path with Sentry server for onDropPartition event," + - " since the operation failed. \n"); - return; - } - - String authzObj = partitionEvent.getTable().getDbName() + "." - + partitionEvent.getTable().getTableName(); - Iterator<Partition> partitionIterator = partitionEvent.getPartitionIterator(); - while (partitionIterator.hasNext()) { - Partition part = partitionIterator.next(); - String path = part.getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.removePath(authzObj, path); - } - super.onDropPartition(partitionEvent); - } - } - - private SentryPolicyServiceClient getSentryServiceClient() - throws MetaException { - try { - return SentryServiceClientFactory.create(authzConf); - } catch (Exception e) { - throw new MetaException("Failed to connect to Sentry service " - + e.getMessage()); - } - } - - private void dropSentryDbPrivileges(String dbName) throws MetaException { - List<Authorizable> authorizableTable = new ArrayList<Authorizable>(); - authorizableTable.add(server); - authorizableTable.add(new Database(dbName)); - try { - dropSentryPrivileges(authorizableTable); - } catch (SentryUserException e) { - throw new MetaException("Failed to remove Sentry policies for drop DB " - + dbName + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } - - } - - private void dropSentryTablePrivilege(String dbName, String tabName) - throws MetaException { - List<Authorizable> authorizableTable = new ArrayList<Authorizable>(); - authorizableTable.add(server); - authorizableTable.add(new Database(dbName)); - authorizableTable.add(new Table(tabName)); - - try { - dropSentryPrivileges(authorizableTable); - } catch (SentryUserException e) { - throw new MetaException( - "Failed to remove Sentry policies for drop table " + dbName + "." - + tabName + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } - - } - private void dropSentryPrivileges( - List<? extends Authorizable> authorizableTable) - throws SentryUserException, IOException, MetaException { - String requestorUserName = UserGroupInformation.getCurrentUser() - .getShortUserName(); - try (SentryPolicyServiceClient sentryClient = SentryServiceClientFactory.create(authzConf)) { - sentryClient.dropPrivileges(requestorUserName, authorizableTable); - } catch (Exception e) { - throw new MetaException("Failed to connect to Sentry service " - + e.getMessage()); - } - } - - private void renameSentryTablePrivilege(String oldDbName, String oldTabName, - String oldPath, String newDbName, String newTabName, String newPath) - throws MetaException { - List<Authorizable> oldAuthorizableTable = new ArrayList<Authorizable>(); - oldAuthorizableTable.add(server); - oldAuthorizableTable.add(new Database(oldDbName)); - oldAuthorizableTable.add(new Table(oldTabName)); - - List<Authorizable> newAuthorizableTable = new ArrayList<Authorizable>(); - newAuthorizableTable.add(server); - newAuthorizableTable.add(new Database(newDbName)); - newAuthorizableTable.add(new Table(newTabName)); - - if (!oldTabName.equalsIgnoreCase(newTabName) - && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) { - - SentryPolicyServiceClient sentryClient = getSentryServiceClient(); - - try { - String requestorUserName = UserGroupInformation.getCurrentUser() - .getShortUserName(); - sentryClient.renamePrivileges(requestorUserName, oldAuthorizableTable, newAuthorizableTable); - } catch (SentryUserException e) { - throw new MetaException( - "Failed to remove Sentry policies for rename table " + oldDbName - + "." + oldTabName + "to " + newDbName + "." + newTabName - + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } finally { - - // Close the connection after renaming privileges is done. - try { - sentryClient.close(); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - // The HDFS plugin needs to know if it's a path change (set location) - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.renameAuthzObject(oldDbName + "." + oldTabName, oldPath, - newDbName + "." + newTabName, newPath); - } - } - - private boolean syncWithPolicyStore(AuthzConfVars syncConfVar) { - return Boolean.parseBoolean(authzConf.get(syncConfVar.getVar(), "true")); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerBase.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerBase.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerBase.java deleted file mode 100644 index 40cf17a..0000000 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerBase.java +++ /dev/null @@ -1,417 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.binding.metastore; - -import java.util.Iterator; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreEventListener; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterTableEvent; -import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.CreateTableEvent; -import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; -import org.apache.hadoop.hive.metastore.events.DropTableEvent; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.sentry.binding.hive.conf.HiveAuthzConf; -import org.apache.sentry.binding.hive.conf.HiveAuthzConf.AuthzConfVars; -import org.apache.sentry.core.common.Authorizable; -import org.apache.sentry.core.common.exception.SentryUserException; -import org.apache.sentry.core.model.db.Database; -import org.apache.sentry.core.model.db.Server; -import org.apache.sentry.core.model.db.Table; -import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin; -import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient; -import org.apache.sentry.service.thrift.SentryServiceClientFactory; -import org.apache.sentry.service.thrift.ServiceConstants.ConfUtilties; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class SentryMetastorePostEventListenerBase extends MetaStoreEventListener { - - private static final Logger LOGGER = LoggerFactory.getLogger(SentryMetastoreListenerPlugin.class); - private final HiveAuthzConf authzConf; - private final Server server; - - protected List<SentryMetastoreListenerPlugin> sentryPlugins = new ArrayList<SentryMetastoreListenerPlugin>(); - - public SentryMetastorePostEventListenerBase(Configuration config) { - super(config); - - if (!(config instanceof HiveConf)) { - String error = "Could not initialize Plugin - Configuration is not an instanceof HiveConf"; - LOGGER.error(error); - throw new RuntimeException(error); - } - - authzConf = HiveAuthzConf.getAuthzConf((HiveConf)config); - server = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); - Iterable<String> pluginClasses = ConfUtilties.CLASS_SPLITTER - .split(config.get(ServerConfig.SENTRY_METASTORE_PLUGINS, - ServerConfig.SENTRY_METASTORE_PLUGINS_DEFAULT).trim()); - - try { - for (String pluginClassStr : pluginClasses) { - Class<?> clazz = config.getClassByName(pluginClassStr); - if (!SentryMetastoreListenerPlugin.class.isAssignableFrom(clazz)) { - throw new IllegalArgumentException("Class [" - + pluginClassStr + "] is not a " - + SentryMetastoreListenerPlugin.class.getName()); - } - SentryMetastoreListenerPlugin plugin = (SentryMetastoreListenerPlugin) clazz - .getConstructor(Configuration.class, Configuration.class) - .newInstance(config, authzConf); - sentryPlugins.add(plugin); - } - } catch (Exception e) { - LOGGER.error("Could not initialize Plugin !!", e); - throw new RuntimeException(e); - } - } - - @Override - public void onCreateTable (CreateTableEvent tableEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip sync paths/privileges with Sentry server for onCreateTable event," + - " since the operation failed. \n"); - return; - } - - if (tableEvent.getTable().getSd().getLocation() != null) { - String authzObj = tableEvent.getTable().getDbName() + "." - + tableEvent.getTable().getTableName(); - String path = tableEvent.getTable().getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - - // drop the privileges on the given table, in case if anything was left - // behind during the drop - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { - return; - } - - dropSentryTablePrivilege(tableEvent.getTable().getDbName(), - tableEvent.getTable().getTableName()); - } - - @Override - public void onDropTable(DropTableEvent tableEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropTable event," + - " since the operation failed. \n"); - return; - } - - if (tableEvent.getTable().getSd().getLocation() != null) { - String authzObj = tableEvent.getTable().getDbName() + "." - + tableEvent.getTable().getTableName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.removeAllPaths(authzObj, null); - } - } - // drop the privileges on the given table - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { - return; - } - - if (!tableEvent.getStatus()) { - return; - } - - dropSentryTablePrivilege(tableEvent.getTable().getDbName(), - tableEvent.getTable().getTableName()); - } - - @Override - public void onCreateDatabase(CreateDatabaseEvent dbEvent) - throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onCreateDatabase event," + - " since the operation failed. \n"); - return; - } - - if (dbEvent.getDatabase().getLocationUri() != null) { - String authzObj = dbEvent.getDatabase().getName(); - String path = dbEvent.getDatabase().getLocationUri(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - // drop the privileges on the database, in case anything left behind during - // last drop db - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_CREATE_WITH_POLICY_STORE)) { - return; - } - - dropSentryDbPrivileges(dbEvent.getDatabase().getName()); - } - - /** - * Drop the privileges on the database. Note that child tables will be - * dropped individually by client, so we just need to handle the removing - * the db privileges. The table drop should cleanup the table privileges. - */ - @Override - public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { - - // don't sync paths/privileges if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.debug("Skip syncing paths/privileges with Sentry server for onDropDatabase event," + - " since the operation failed. \n"); - return; - } - - String authzObj = dbEvent.getDatabase().getName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - List<String> tNames = dbEvent.getHandler().get_all_tables(authzObj); - plugin.removeAllPaths(authzObj, tNames); - } - if (!syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_DROP_WITH_POLICY_STORE)) { - return; - } - - dropSentryDbPrivileges(dbEvent.getDatabase().getName()); - } - - /** - * Adjust the privileges when table is renamed - */ - @Override - public void onAlterTable (AlterTableEvent tableEvent) throws MetaException { - - // don't sync privileges if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.debug("Skip syncing privileges with Sentry server for onAlterTable event," + - " since the operation failed. \n"); - return; - } - String oldLoc = null, newLoc = null; - org.apache.hadoop.hive.metastore.api.Table oldTal = tableEvent.getOldTable(); - org.apache.hadoop.hive.metastore.api.Table newTal = tableEvent.getNewTable(); - if (oldTal != null && oldTal.getSd() != null) { - oldLoc = oldTal.getSd().getLocation(); - } - if (newTal != null && newTal.getSd() != null) { - newLoc = newTal.getSd().getLocation(); - } - if (oldLoc != null && newLoc != null && !oldLoc.equals(newLoc)) { - String oldDbName = tableEvent.getOldTable().getDbName(); - String oldTbName = tableEvent.getOldTable().getTableName(); - String newTbName = tableEvent.getNewTable().getTableName(); - String newDbName = tableEvent.getNewTable().getDbName(); - renameSentryTablePrivilege(oldDbName, oldTbName, oldLoc, newDbName, newTbName, newLoc); - } - } - - @Override - public void onAlterPartition(AlterPartitionEvent partitionEvent) - throws MetaException { - - // don't sync privileges if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing privileges with Sentry server for onAlterPartition event," + - " since the operation failed. \n"); - return; - } - - String oldLoc = null, newLoc = null; - if (partitionEvent.getOldPartition() != null) { - oldLoc = partitionEvent.getOldPartition().getSd().getLocation(); - } - if (partitionEvent.getNewPartition() != null) { - newLoc = partitionEvent.getNewPartition().getSd().getLocation(); - } - - if (oldLoc != null && newLoc != null && !oldLoc.equals(newLoc)) { - String authzObj = - partitionEvent.getOldPartition().getDbName() + "." - + partitionEvent.getOldPartition().getTableName(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.renameAuthzObject(authzObj, oldLoc, - authzObj, newLoc); - } - } - } - - @Override - public void onAddPartition(AddPartitionEvent partitionEvent) - throws MetaException { - - // don't sync path if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing path with Sentry server for onAddPartition event," + - " since the operation failed. \n"); - return; - } - - Iterator<Partition> partitionIterator = partitionEvent.getPartitionIterator(); - while (partitionIterator.hasNext()) { - Partition part = partitionIterator.next(); - if (part.getSd() != null && part.getSd().getLocation() != null) { - String authzObj = part.getDbName() + "." + part.getTableName(); - String path = part.getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.addPath(authzObj, path); - } - } - } - super.onAddPartition(partitionEvent); - } - - @Override - public void onDropPartition(DropPartitionEvent partitionEvent) - throws MetaException { - - // don't sync path if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.debug("Skip syncing path with Sentry server for onDropPartition event," + - " since the operation failed. \n"); - return; - } - - String authzObj = partitionEvent.getTable().getDbName() + "." - + partitionEvent.getTable().getTableName(); - - Iterator<Partition> partitionIterator = partitionEvent.getPartitionIterator(); - while (partitionIterator.hasNext()) { - Partition part = partitionIterator.next(); - String path = part.getSd().getLocation(); - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.removePath(authzObj, path); - } - super.onDropPartition(partitionEvent); - } - } - - private SentryPolicyServiceClient getSentryServiceClient() - throws MetaException { - try { - return SentryServiceClientFactory.create(authzConf); - } catch (Exception e) { - throw new MetaException("Failed to connect to Sentry service " - + e.getMessage()); - } - } - - private void dropSentryDbPrivileges(String dbName) throws MetaException { - List<Authorizable> authorizableTable = new ArrayList<Authorizable>(); - authorizableTable.add(server); - authorizableTable.add(new Database(dbName)); - try { - dropSentryPrivileges(authorizableTable); - } catch (SentryUserException e) { - throw new MetaException("Failed to remove Sentry policies for drop DB " - + dbName + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } - - } - - private void dropSentryTablePrivilege(String dbName, String tabName) - throws MetaException { - List<Authorizable> authorizableTable = new ArrayList<Authorizable>(); - authorizableTable.add(server); - authorizableTable.add(new Database(dbName)); - authorizableTable.add(new Table(tabName)); - - try { - dropSentryPrivileges(authorizableTable); - } catch (SentryUserException e) { - throw new MetaException( - "Failed to remove Sentry policies for drop table " + dbName + "." - + tabName + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } - - } - private void dropSentryPrivileges( - List<? extends Authorizable> authorizableTable) - throws SentryUserException, IOException, MetaException { - String requestorUserName = UserGroupInformation.getCurrentUser() - .getShortUserName(); - try(SentryPolicyServiceClient sentryClient = getSentryServiceClient()) { - sentryClient.dropPrivileges(requestorUserName, authorizableTable); - } catch (Exception e) { - e.printStackTrace(); - } - } - - private void renameSentryTablePrivilege(String oldDbName, String oldTabName, - String oldPath, String newDbName, String newTabName, String newPath) - throws MetaException { - List<Authorizable> oldAuthorizableTable = new ArrayList<Authorizable>(); - oldAuthorizableTable.add(server); - oldAuthorizableTable.add(new Database(oldDbName)); - oldAuthorizableTable.add(new Table(oldTabName)); - - List<Authorizable> newAuthorizableTable = new ArrayList<Authorizable>(); - newAuthorizableTable.add(server); - newAuthorizableTable.add(new Database(newDbName)); - newAuthorizableTable.add(new Table(newTabName)); - - if (!oldTabName.equalsIgnoreCase(newTabName) - && syncWithPolicyStore(AuthzConfVars.AUTHZ_SYNC_ALTER_WITH_POLICY_STORE)) { - - try (SentryPolicyServiceClient sentryClient = getSentryServiceClient()){ - String requestorUserName = UserGroupInformation.getCurrentUser() - .getShortUserName(); - sentryClient.renamePrivileges(requestorUserName, oldAuthorizableTable, newAuthorizableTable); - } catch (SentryUserException e) { - throw new MetaException( - "Failed to remove Sentry policies for rename table " + oldDbName - + "." + oldTabName + "to " + newDbName + "." + newTabName - + " Error: " + e.getMessage()); - } catch (IOException e) { - throw new MetaException("Failed to find local user " + e.getMessage()); - } catch (Exception e) { - e.printStackTrace(); - } - } - // The HDFS plugin needs to know if it's a path change (set location) - for (SentryMetastoreListenerPlugin plugin : sentryPlugins) { - plugin.renameAuthzObject(oldDbName + "." + oldTabName, oldPath, - newDbName + "." + newTabName, newPath); - } - } - - private boolean syncWithPolicyStore(AuthzConfVars syncConfVar) { - return "true" - .equalsIgnoreCase(authzConf.get(syncConfVar.getVar(), "true")); - } - -} http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerNotificationLog.java ---------------------------------------------------------------------- diff --git a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerNotificationLog.java b/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerNotificationLog.java deleted file mode 100644 index 9050231..0000000 --- a/sentry-binding/sentry-binding-hive/src/main/java/org/apache/sentry/binding/metastore/SentryMetastorePostEventListenerNotificationLog.java +++ /dev/null @@ -1,396 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.binding.metastore; - -import java.util.concurrent.TimeUnit; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreEventListener; -import org.apache.hadoop.hive.metastore.RawStore; -import org.apache.hadoop.hive.metastore.RawStoreProxy; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; -import org.apache.hadoop.hive.metastore.events.AlterTableEvent; -import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.CreateTableEvent; -import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; -import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; -import org.apache.hadoop.hive.metastore.events.DropTableEvent; -import org.apache.hive.hcatalog.common.HCatConstants; -import org.apache.sentry.binding.metastore.messaging.json.SentryJSONMessageFactory; -import org.apache.sentry.provider.db.SentryMetastoreListenerPlugin; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.commons.lang3.builder.ToStringBuilder; -/* -A HMS listener class which should ideally go into the transaction which persists the Hive metadata. -This class writes all DDL events to the NotificationLog through rawstore.addNotificationEvent(event) -This class is very similar to DbNotificationListener, except: -1. It uses a custom SentryJSONMessageFactory which adds additional information to the message part of the event - to avoid another round trip from the clients -2. It handles the cases where actual operation has failed, and hence skips writing to the notification log. -3. Has additional validations to make sure event has the required information. - -This can be replaced with DbNotificationListener in future and sentry's message factory can be plugged in if: -- HIVE-14011 is fixed: Make MessageFactory truly pluggable -- 2 and 3 above are handled in DbNotificationListener -*/ - -public class SentryMetastorePostEventListenerNotificationLog extends MetaStoreEventListener { - - private static final Logger LOGGER = LoggerFactory.getLogger(SentryMetastoreListenerPlugin.class); - private RawStore rs; - private HiveConf hiveConf; - SentryJSONMessageFactory messageFactory; - - private static SentryMetastorePostEventListenerNotificationLog.CleanerThread cleaner = null; - - //Same as DbNotificationListener to make the transition back easy - private synchronized void init(HiveConf conf) { - try { - this.rs = RawStoreProxy.getProxy(conf, conf, conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), 999999); - } catch (MetaException var3) { - LOGGER.error("Unable to connect to raw store, notifications will not be tracked", var3); - this.rs = null; - } - - if(cleaner == null && this.rs != null) { - cleaner = new SentryMetastorePostEventListenerNotificationLog.CleanerThread(conf, this.rs); - cleaner.start(); - } - } - - public SentryMetastorePostEventListenerNotificationLog(Configuration config) { - super(config); - // The code in MetastoreUtils.getMetaStoreListeners() that calls this looks for a constructor - // with a Configuration parameter, so we have to declare config as Configuration. But it - // actually passes a HiveConf, which we need. So we'll do this ugly down cast. - if (!(config instanceof HiveConf)) { - String error = "Could not initialize Plugin - Configuration is not an instanceof HiveConf"; - LOGGER.error(error); - throw new RuntimeException(error); - } - hiveConf = (HiveConf)config; - messageFactory = new SentryJSONMessageFactory(); - init(hiveConf); - } - - @Override - public void onCreateDatabase(CreateDatabaseEvent dbEvent) - throws MetaException { - - // do not write to Notification log if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Create database event failed"); - return; - } - - String location = dbEvent.getDatabase().getLocationUri(); - if (Strings.isNullOrEmpty(location)) { - throw new SentryMalformedEventException("CreateDatabaseEvent has invalid location", dbEvent); - } - String dbName = dbEvent.getDatabase().getName(); - if (Strings.isNullOrEmpty(dbName)) { - throw new SentryMalformedEventException("CreateDatabaseEvent has invalid dbName", dbEvent); - } - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_CREATE_DATABASE_EVENT, - messageFactory.buildCreateDatabaseMessage(dbEvent.getDatabase()).toString()); - event.setDbName(dbName); - this.enqueue(event); - } - - @Override - public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { - - // do not write to Notification log if the operation has failed - if (!dbEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Drop database event failed"); - return; - } - - String dbName = dbEvent.getDatabase().getName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("DropDatabaseEvent has invalid dbName", dbEvent); - } - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_DROP_DATABASE_EVENT, - messageFactory.buildDropDatabaseMessage(dbEvent.getDatabase()).toString()); - event.setDbName(dbName); - this.enqueue(event); - } - - @Override - public void onCreateTable (CreateTableEvent tableEvent) throws MetaException { - - // do not write to Notification log if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Create table event failed"); - return; - } - - String dbName = tableEvent.getTable().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("CreateTableEvent has invalid dbName", tableEvent); - } - String tableName = tableEvent.getTable().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("CreateTableEvent has invalid tableName", tableEvent); - } - // Create table event should also contain a location. - // But, Create view also generates a Create table event, but it does not have a location. - // Create view is identified by the tableType. But turns out tableType is not set in some cases. - // We assume that tableType is set for all create views. - //TODO: Location can be null/empty, handle that in HMSFollower - String tableType = tableEvent.getTable().getTableType(); - if(!(tableType != null && tableType.equals(TableType.VIRTUAL_VIEW.name()))) { - if (tableType == null) { - LOGGER.warn("TableType is null, assuming it is not TableType.VIRTUAL_VIEW: tableEvent", tableEvent); - } - String location = tableEvent.getTable().getSd().getLocation(); - if (location == null || location.isEmpty()) { - throw new SentryMalformedEventException("CreateTableEvent has invalid location", tableEvent); - } - } - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_CREATE_TABLE_EVENT, - messageFactory.buildCreateTableMessage(tableEvent.getTable()).toString()); - event.setDbName(dbName); - event.setTableName(tableName); - this.enqueue(event); - } - - @Override - public void onDropTable(DropTableEvent tableEvent) throws MetaException { - - // do not write to Notification log if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Drop table event failed"); - return; - } - - String dbName = tableEvent.getTable().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("DropTableEvent has invalid dbName", tableEvent); - } - String tableName = tableEvent.getTable().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("DropTableEvent has invalid tableName", tableEvent); - } - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_DROP_TABLE_EVENT, - messageFactory.buildDropTableMessage(tableEvent.getTable()).toString()); - event.setDbName(dbName); - event.setTableName(tableName); - this.enqueue(event); - } - - @Override - public void onAlterTable (AlterTableEvent tableEvent) throws MetaException { - - // do not write to Notification log if the operation has failed - if (!tableEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Alter table event failed"); - return; - } - - String dbName = tableEvent.getNewTable().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's newTable has invalid dbName", tableEvent); - } - String tableName = tableEvent.getNewTable().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's newTable has invalid tableName", tableEvent); - } - dbName = tableEvent.getOldTable().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's oldTable has invalid dbName", tableEvent); - } - tableName = tableEvent.getOldTable().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's oldTable has invalid tableName", tableEvent); - } - //Alter view also generates an alter table event, but it does not have a location - //TODO: Handle this case in Sentry - if(!tableEvent.getOldTable().getTableType().equals(TableType.VIRTUAL_VIEW.name())) { - String location = tableEvent.getNewTable().getSd().getLocation(); - if (location == null || location.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's newTable has invalid location", tableEvent); - } - location = tableEvent.getOldTable().getSd().getLocation(); - if (location == null || location.isEmpty()) { - throw new SentryMalformedEventException("AlterTableEvent's oldTable has invalid location", tableEvent); - } - } - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_ALTER_TABLE_EVENT, - messageFactory.buildAlterTableMessage(tableEvent.getOldTable(), tableEvent.getNewTable()).toString()); - event.setDbName(tableEvent.getNewTable().getDbName()); - event.setTableName(tableEvent.getNewTable().getTableName()); - this.enqueue(event); - } - - @Override - public void onAlterPartition(AlterPartitionEvent partitionEvent) - throws MetaException { - - // do not write to Notification log if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Alter partition event failed"); - return; - } - - String dbName = partitionEvent.getNewPartition().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("AlterPartitionEvent's newPartition has invalid dbName", partitionEvent); - } - String tableName = partitionEvent.getNewPartition().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("AlterPartitionEvent's newPartition has invalid tableName", partitionEvent); - } - - //TODO: Need more validations, but it is tricky as there are many variations and validations change for each one - // Alter partition Location - // Alter partition property - // Any more? - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_ALTER_PARTITION_EVENT, - messageFactory.buildAlterPartitionMessage(partitionEvent.getTable(), - partitionEvent.getOldPartition(), partitionEvent.getNewPartition()).toString()); - - event.setDbName(partitionEvent.getNewPartition().getDbName()); - event.setTableName(partitionEvent.getNewPartition().getTableName()); - this.enqueue(event); - } - - @Override - public void onAddPartition(AddPartitionEvent partitionEvent) - throws MetaException { - - // do not write to Notification log if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Add partition event failed"); - return; - } - - String dbName = partitionEvent.getTable().getDbName(); - if (dbName == null || dbName.isEmpty()) { - throw new SentryMalformedEventException("AddPartitionEvent has invalid dbName", partitionEvent); - } - String tableName = partitionEvent.getTable().getTableName(); - if (tableName == null || tableName.isEmpty()) { - throw new SentryMalformedEventException("AddPartitionEvent's newPartition has invalid tableName", partitionEvent); - } - - //TODO: Need more validations? - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_ADD_PARTITION_EVENT, - messageFactory.buildAddPartitionMessage(partitionEvent.getTable(), partitionEvent.getPartitionIterator()).toString()); - - event.setDbName(partitionEvent.getTable().getDbName()); - event.setTableName(partitionEvent.getTable().getTableName()); - this.enqueue(event); - } - - @Override - public void onDropPartition(DropPartitionEvent partitionEvent) - throws MetaException { - - // do not write to Notification log if the operation has failed - if (!partitionEvent.getStatus()) { - LOGGER.info("Skipping writing to NotificationLog as the Drop partition event failed"); - return; - } - - NotificationEvent event = new NotificationEvent(0L, now(), HCatConstants.HCAT_DROP_PARTITION_EVENT, - messageFactory.buildDropPartitionMessage(partitionEvent.getTable(), partitionEvent.getPartitionIterator()).toString()); - //TODO: Why is this asymmetric with add partitions(s)? - // Seems like adding multiple partitions generate a single event - // where as single partition drop generated an event? - - event.setDbName(partitionEvent.getTable().getDbName()); - event.setTableName(partitionEvent.getTable().getTableName()); - this.enqueue(event); - } - - private int now() { - long millis = System.currentTimeMillis(); - millis /= 1000; - if (millis > Integer.MAX_VALUE) { - LOGGER.warn("We've passed max int value in seconds since the epoch, " + - "all notification times will be the same!"); - return Integer.MAX_VALUE; - } - return (int)millis; - } - - //Same as DbNotificationListener to make the transition back easy - private void enqueue(NotificationEvent event) { - if(this.rs != null) { - this.rs.addNotificationEvent(event); - } else { - LOGGER.warn("Dropping event " + event + " since notification is not running."); - } - } - - //Same as DbNotificationListener to make the transition back easy - private static class CleanerThread extends Thread { - private RawStore rs; - private int ttl; - - CleanerThread(HiveConf conf, RawStore rs) { - super("CleanerThread"); - this.rs = rs; - this.setTimeToLive(conf.getTimeVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, TimeUnit.SECONDS)); - this.setDaemon(true); - } - - public void run() { - while(true) { - this.rs.cleanNotificationEvents(this.ttl); - - try { - Thread.sleep(60000L); - } catch (InterruptedException var2) { - LOGGER.info("Cleaner thread sleep interupted", var2); - } - } - } - - public void setTimeToLive(long configTtl) { - if(configTtl > 2147483647L) { - this.ttl = 2147483647; - } else { - this.ttl = (int)configTtl; - } - - } - } - private class SentryMalformedEventException extends MetaException { - SentryMalformedEventException(String msg, Object event) { - //toString is not implemented in Event classes, - // hence using reflection to print the details of the Event object. - super(msg + "Event: " + ToStringBuilder.reflectionToString(event)); - } - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/sentry/blob/2de4adff/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/FullUpdateModifier.java ---------------------------------------------------------------------- diff --git a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/FullUpdateModifier.java b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/FullUpdateModifier.java index 2501970..c30d982 100644 --- a/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/FullUpdateModifier.java +++ b/sentry-provider/sentry-provider-db/src/main/java/org/apache/sentry/service/thrift/FullUpdateModifier.java @@ -20,8 +20,8 @@ package org.apache.sentry.service.thrift; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hive.hcatalog.messaging.HCatEventMessage; -import org.apache.hive.hcatalog.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.sentry.binding.metastore.messaging.json.SentryJSONAddPartitionMessage; import org.apache.sentry.binding.metastore.messaging.json.SentryJSONAlterPartitionMessage; import org.apache.sentry.binding.metastore.messaging.json.SentryJSONAlterTableMessage; @@ -71,8 +71,8 @@ final class FullUpdateModifier { // Tests use mock serializers and thus we do not have to construct proper events. static void applyEvent(Map<String, Collection<String>> image, NotificationEvent event, MessageDeserializer deserializer) { - HCatEventMessage.EventType eventType = - HCatEventMessage.EventType.valueOf(event.getEventType()); + EventMessage.EventType eventType = + EventMessage.EventType.valueOf(event.getEventType()); switch (eventType) { case CREATE_DATABASE:
