This is an automated email from the ASF dual-hosted git repository.
chengpan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git
The following commit(s) were added to refs/heads/master by this push:
new f22c73f9d [KYUUBI #4937] [FOLLOWUP] Remove redundant quoteIfNeeded
method
f22c73f9d is described below
commit f22c73f9dbf3fad3693e7339d1c890f61dfbbb9b
Author: liangbowen <[email protected]>
AuthorDate: Sun Jun 18 14:17:15 2023 +0800
[KYUUBI #4937] [FOLLOWUP] Remove redundant quoteIfNeeded method
### _Why are the changes needed?_
- Remove redundant quoteIfNeeded method
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including
negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run
test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests)
locally before make a pull request
Closes #4973 from bowenliang123/redundant-quoteifneeded.
Closes #4937
acec0fb09 [liangbowen] Remove redundant quoteIfNeeded method
Authored-by: liangbowen <[email protected]>
Signed-off-by: Cheng Pan <[email protected]>
---
.../org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala | 14 +-------------
1 file changed, 1 insertion(+), 13 deletions(-)
diff --git
a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
index 170f108b2..c0f9d61c2 100644
---
a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
+++
b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
@@ -26,7 +26,6 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.{CollectLimitExec, LocalTableScanExec,
SparkPlan, SQLExecution}
-import org.apache.spark.sql.execution.{CollectLimitExec, SparkPlan,
SQLExecution}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
import org.apache.spark.sql.execution.arrow.KyuubiArrowConverters
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
@@ -35,6 +34,7 @@ import org.apache.spark.sql.types._
import org.apache.kyuubi.engine.spark.KyuubiSparkUtil
import org.apache.kyuubi.engine.spark.schema.RowSet
+import org.apache.kyuubi.engine.spark.util.SparkCatalogUtils.quoteIfNeeded
import org.apache.kyuubi.util.reflect.DynMethods
import org.apache.kyuubi.util.reflect.ReflectUtils._
@@ -133,18 +133,6 @@ object SparkDatasetHelper extends Logging {
df.select(cols: _*)
}
- /**
- * Fork from Apache Spark-3.3.1
org.apache.spark.sql.catalyst.util.quoteIfNeeded to adapt to
- * Spark-3.1.x
- */
- private def quoteIfNeeded(part: String): String = {
- if (part.matches("[a-zA-Z0-9_]+") && !part.matches("\\d+")) {
- part
- } else {
- s"`${part.replace("`", "``")}`"
- }
- }
-
private lazy val maxBatchSize: Long = {
// respect spark connect config
KyuubiSparkUtil.globalSparkContext