wangyum commented on code in PR #26754:
URL: https://github.com/apache/spark/pull/26754#discussion_r949808294


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala:
##########
@@ -220,27 +220,47 @@ case class DataSourceAnalysis(conf: SQLConf) extends 
Rule[LogicalPlan] with Cast
  * data source.
  */
 class FindDataSourceTable(sparkSession: SparkSession) extends 
Rule[LogicalPlan] {
-  private def readDataSourceTable(table: CatalogTable): LogicalPlan = {
+  private def readDataSourceTable(
+      table: CatalogTable, partialListing: Boolean = false): LogicalPlan = {
     val qualifiedTableName = QualifiedTableName(table.database, 
table.identifier.table)
     val catalog = sparkSession.sessionState.catalog
-    catalog.getCachedPlan(qualifiedTableName, () => {
-      val pathOption = table.storage.locationUri.map("path" -> 
CatalogUtils.URIToString(_))
-      val dataSource =
-        DataSource(
-          sparkSession,
-          // In older version(prior to 2.1) of Spark, the table schema can be 
empty and should be
-          // inferred at runtime. We should still support it.
-          userSpecifiedSchema = if (table.schema.isEmpty) None else 
Some(table.schema),
-          partitionColumns = table.partitionColumnNames,
-          bucketSpec = table.bucketSpec,
-          className = table.provider.get,
-          options = table.storage.properties ++ pathOption,
-          catalogTable = Some(table))
-      LogicalRelation(dataSource.resolveRelation(checkFilesExist = false), 
table)
-    })
+    val pathOption = table.storage.locationUri.map("path" -> 
CatalogUtils.URIToString(_))
+    val dataSource =
+      DataSource(
+        sparkSession,
+        // In older version(prior to 2.1) of Spark, the table schema can be 
empty and should be
+        // inferred at runtime. We should still support it.
+        userSpecifiedSchema = if (table.schema.isEmpty) None else 
Some(table.schema),
+        partitionColumns = table.partitionColumnNames,
+        bucketSpec = table.bucketSpec,
+        className = table.provider.get,
+        options = table.storage.properties ++ pathOption,
+        catalogTable = Some(table))
+    if (partialListing) {
+      LogicalRelation(dataSource.resolveRelation(checkFilesExist = false, 
partialListing), table)
+    } else {
+      catalog.getCachedPlan(qualifiedTableName, () => {
+        LogicalRelation(dataSource.resolveRelation(checkFilesExist = false, 
partialListing), table)
+      })
+    }
   }
 
   override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case l @ GlobalLimit(_, LocalLimit(limitExpr,
+      Project(projectList, UnresolvedCatalogRelation(tableMeta))))

Review Comment:
   `projectList` should not contain aggregate functions. For example:
   ```sql
   select min(id) from tbl limit 10;
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to