bowenliang123 commented on code in PR #3197:
URL: https://github.com/apache/incubator-kyuubi/pull/3197#discussion_r939907558
##########
extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RuleApplyRowFilterAndDataMasking.scala:
##########
@@ -30,29 +30,40 @@ import
org.apache.kyuubi.plugin.spark.authz.util.RowFilterAndDataMaskingMarker
class RuleApplyRowFilterAndDataMasking(spark: SparkSession) extends
Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
- // Apply FilterAndMasking and wrap HiveTableRelation/LogicalRelation with
+ // Apply FilterAndMasking and wrap
HiveTableRelation/LogicalRelation/DataSourceV2Relation with
// RowFilterAndDataMaskingMarker if it is not wrapped yet.
plan mapChildren {
case p: RowFilterAndDataMaskingMarker => p
case hiveTableRelation if hasResolvedHiveTable(hiveTableRelation) =>
val table = getHiveTable(hiveTableRelation)
- applyFilterAndMasking(hiveTableRelation, table, spark)
+ applyFilterAndMasking(hiveTableRelation, table.identifier, spark)
case logicalRelation if hasResolvedDatasourceTable(logicalRelation) =>
val table = getDatasourceTable(logicalRelation)
if (table.isEmpty) {
logicalRelation
} else {
- applyFilterAndMasking(logicalRelation, table.get, spark)
+ applyFilterAndMasking(logicalRelation, table.get.identifier, spark)
+ }
+ case datasourceV2Relation if
hasResolvedDatasourceV2Table(datasourceV2Relation) =>
+ val table = getDatasourceV2Table(datasourceV2Relation)
+ if (table == null) {
+ datasourceV2Relation
+ } else {
+ val catalogDbTable = table.name.split("\\.")
Review Comment:
I have also noticed the shortcomings. The current implementation is not
elegant as espected.
In `org.apache.spark.sql.connector.catalog.Table`, `name()` method is not
providing enough infomation to identify all these situations.
Suggestion 0: (current implementation)
forcing spliting table name to identify catalog、db、table. In my case, I am
using iceberg via spark_catlog, so the missing catalog name works fine.
Suggestion 1:
only apply to `org.apache.iceberg.spark.source.SparkTable` for iceberg
tables instead all DatasourceV2 tables.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]