rdblue commented on a change in pull request #1783:
URL: https://github.com/apache/iceberg/pull/1783#discussion_r533621395



##########
File path: 
spark3/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java
##########
@@ -56,48 +61,62 @@ public boolean supportsExternalMetadata() {
   }
 
   @Override
-  public SparkTable getTable(StructType schema, Transform[] partitioning, 
Map<String, String> options) {
-    // Get Iceberg table from options
-    Configuration conf = SparkSession.active().sessionState().newHadoopConf();
-    Table icebergTable = getTableAndResolveHadoopConfiguration(options, conf);
-
-    // Build Spark table based on Iceberg table, and return it
-    // Eagerly refresh the table before reading to ensure views containing 
this table show up-to-date data
-    return new SparkTable(icebergTable, schema, true);
+  public Table getTable(StructType schema, Transform[] partitioning, 
Map<String, String> options) {
+    String catalogName = extractCatalog(new CaseInsensitiveStringMap(options));
+    Identifier ident = extractIdentifier(new 
CaseInsensitiveStringMap(options));
+    CatalogManager catalogManager = 
SparkSession.active().sessionState().catalogManager();
+    CatalogPlugin catalog = catalogManager.catalog(catalogName);
+    try {
+      if (catalog instanceof TableCatalog) {
+        return ((TableCatalog) catalog).loadTable(ident);
+      }
+    } catch (NoSuchTableException e) {
+      throw new org.apache.iceberg.exceptions.NoSuchTableException(e, "Cannot 
find table for %s.", ident);
+    }
+    throw new org.apache.iceberg.exceptions.NoSuchTableException("Cannot find 
table for %s.", ident);
   }
 
-  protected Table findTable(Map<String, String> options, Configuration conf) {
+  private Pair<String, TableIdentifier> 
tableIdentifier(CaseInsensitiveStringMap options) {
+    CatalogManager catalogManager = 
SparkSession.active().sessionState().catalogManager();
+    Namespace defaultNamespace = 
Namespace.of(catalogManager.currentNamespace());
     Preconditions.checkArgument(options.containsKey("path"), "Cannot open 
table: path is not set");
     String path = options.get("path");
-
-    if (path.contains("/")) {
-      HadoopTables tables = new HadoopTables(conf);
-      return tables.load(path);
+    List<String> ident;
+    try {
+      ident = 
scala.collection.JavaConverters.seqAsJavaList(SparkSession.active().sessionState().sqlParser().parseMultipartIdentifier(path));
+    } catch (ParseException e) {
+      try {
+        ident = 
scala.collection.JavaConverters.seqAsJavaList(SparkSession.active().sessionState().sqlParser().parseMultipartIdentifier(String.format("`%s`",
 path)));
+      } catch (ParseException ignored) {
+        throw new RuntimeException(e);
+      }
+    }
+    if (ident.size() == 1) {
+      return Pair.of(null, TableIdentifier.of(defaultNamespace, ident.get(0)));
+    } else if (ident.size() == 2) {
+      if (catalogManager.isCatalogRegistered(ident.get(0))) {
+        return Pair.of(ident.get(0), TableIdentifier.of(defaultNamespace, 
ident.get(1))); //todo what if path?

Review comment:
       This shouldn't fill in the default namespace. If the identifier was two 
parts, like `prod.items`, then it is not correct to modify that to be 
`prod.default.items`.
   
   I think that this should use the same logic as the `else`.

##########
File path: 
spark3/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java
##########
@@ -56,48 +61,62 @@ public boolean supportsExternalMetadata() {
   }
 
   @Override
-  public SparkTable getTable(StructType schema, Transform[] partitioning, 
Map<String, String> options) {
-    // Get Iceberg table from options
-    Configuration conf = SparkSession.active().sessionState().newHadoopConf();
-    Table icebergTable = getTableAndResolveHadoopConfiguration(options, conf);
-
-    // Build Spark table based on Iceberg table, and return it
-    // Eagerly refresh the table before reading to ensure views containing 
this table show up-to-date data
-    return new SparkTable(icebergTable, schema, true);
+  public Table getTable(StructType schema, Transform[] partitioning, 
Map<String, String> options) {
+    String catalogName = extractCatalog(new CaseInsensitiveStringMap(options));
+    Identifier ident = extractIdentifier(new 
CaseInsensitiveStringMap(options));
+    CatalogManager catalogManager = 
SparkSession.active().sessionState().catalogManager();
+    CatalogPlugin catalog = catalogManager.catalog(catalogName);
+    try {
+      if (catalog instanceof TableCatalog) {
+        return ((TableCatalog) catalog).loadTable(ident);
+      }
+    } catch (NoSuchTableException e) {
+      throw new org.apache.iceberg.exceptions.NoSuchTableException(e, "Cannot 
find table for %s.", ident);
+    }
+    throw new org.apache.iceberg.exceptions.NoSuchTableException("Cannot find 
table for %s.", ident);
   }
 
-  protected Table findTable(Map<String, String> options, Configuration conf) {
+  private Pair<String, TableIdentifier> 
tableIdentifier(CaseInsensitiveStringMap options) {
+    CatalogManager catalogManager = 
SparkSession.active().sessionState().catalogManager();
+    Namespace defaultNamespace = 
Namespace.of(catalogManager.currentNamespace());
     Preconditions.checkArgument(options.containsKey("path"), "Cannot open 
table: path is not set");
     String path = options.get("path");
-
-    if (path.contains("/")) {
-      HadoopTables tables = new HadoopTables(conf);
-      return tables.load(path);
+    List<String> ident;
+    try {
+      ident = 
scala.collection.JavaConverters.seqAsJavaList(SparkSession.active().sessionState().sqlParser().parseMultipartIdentifier(path));
+    } catch (ParseException e) {
+      try {
+        ident = 
scala.collection.JavaConverters.seqAsJavaList(SparkSession.active().sessionState().sqlParser().parseMultipartIdentifier(String.format("`%s`",
 path)));
+      } catch (ParseException ignored) {
+        throw new RuntimeException(e);
+      }
+    }
+    if (ident.size() == 1) {

Review comment:
       Nit: could you separate the control flow statements with a newline?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to