This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new db84869d184 [SPARK-41078][CONNECT] Implement DataFrame 
`withColumnsRenamed` in Connect proto
db84869d184 is described below

commit db84869d184680a06e8b030fd750f4aeca6ca29a
Author: Rui Wang <rui.w...@databricks.com>
AuthorDate: Thu Nov 10 13:05:02 2022 +0800

    [SPARK-41078][CONNECT] Implement DataFrame `withColumnsRenamed` in Connect 
proto
    
    ### What changes were proposed in this pull request?
    
    `RenameColumns` proto is added by 
https://github.com/apache/spark/pull/38475, DataFrame `withColumnsRenamed` 
should use a different proto. This PR:
    1. Add a proto for `withColumnsRenamed`.
    2. Rename `RenameColumns`.
    
    ### Why are the changes needed?
    
    Improve API coverage.
    
    ### Does this PR introduce _any_ user-facing change?
    
    NO
    
    ### How was this patch tested?
    
    UT
    
    Closes #38587 from amaliujia/withColumnsRenamed.
    
    Authored-by: Rui Wang <rui.w...@databricks.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../main/protobuf/spark/connect/relations.proto    |  23 +++-
 .../org/apache/spark/sql/connect/dsl/package.scala |  15 ++-
 .../sql/connect/planner/SparkConnectPlanner.scala  |  17 ++-
 .../connect/planner/SparkConnectProtoSuite.scala   |  22 +++-
 python/pyspark/sql/connect/proto/relations_pb2.py  | 134 +++++++++++----------
 python/pyspark/sql/connect/proto/relations_pb2.pyi |  87 +++++++++++--
 6 files changed, 214 insertions(+), 84 deletions(-)

diff --git a/connector/connect/src/main/protobuf/spark/connect/relations.proto 
b/connector/connect/src/main/protobuf/spark/connect/relations.proto
index cce9f3b939e..8f6ebf1984a 100644
--- a/connector/connect/src/main/protobuf/spark/connect/relations.proto
+++ b/connector/connect/src/main/protobuf/spark/connect/relations.proto
@@ -47,7 +47,8 @@ message Relation {
     Range range = 15;
     SubqueryAlias subquery_alias = 16;
     Repartition repartition = 17;
-    RenameColumns rename_columns = 18;
+    RenameColumnsBySameLengthNames rename_columns_by_same_length_names = 18;
+    RenameColumnsByNameToNameMap rename_columns_by_name_to_name_map = 19;
 
     StatFunction stat_function = 100;
 
@@ -275,8 +276,8 @@ message StatFunction {
   }
 }
 
-// Rename columns on the input relation.
-message RenameColumns {
+// Rename columns on the input relation by the same length of names.
+message RenameColumnsBySameLengthNames {
   // Required. The input relation.
   Relation input = 1;
 
@@ -286,3 +287,19 @@ message RenameColumns {
   // of this field. If this is not true, an exception will be returned.
   repeated string column_names = 2;
 }
+
+
+// Rename columns on the input relation by a map with name to name mapping.
+message RenameColumnsByNameToNameMap {
+  // Required. The input relation.
+  Relation input = 1;
+
+
+  // Required.
+  //
+  // Renaming column names of input relation from A to B where A is the map key
+  // and B is the map value. This is a no-op if schema doesn't contain any A. 
It
+  // does not require that all input relation column names to present as keys.
+  // duplicated B are not allowed.
+  map<string, string> rename_columns_map = 2;
+}
diff --git 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala
 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala
index ec14333fdc3..7405d43e86b 100644
--- 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala
+++ 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala
@@ -482,13 +482,24 @@ package object dsl {
       def toDF(columnNames: String*): Relation =
         Relation
           .newBuilder()
-          .setRenameColumns(
-            RenameColumns
+          .setRenameColumnsBySameLengthNames(
+            RenameColumnsBySameLengthNames
               .newBuilder()
               .setInput(logicalPlan)
               .addAllColumnNames(columnNames.asJava))
           .build()
 
+      def withColumnsRenamed(renameColumnsMap: Map[String, String]): Relation 
= {
+        Relation
+          .newBuilder()
+          .setRenameColumnsByNameToNameMap(
+            RenameColumnsByNameToNameMap
+              .newBuilder()
+              .setInput(logicalPlan)
+              .putAllRenameColumnsMap(renameColumnsMap.asJava))
+          .build()
+      }
+
       private def createSetOperation(
           left: Relation,
           right: Relation,
diff --git 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala
 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala
index cb0af99e18b..ac25e978582 100644
--- 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala
+++ 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala
@@ -69,8 +69,10 @@ class SparkConnectPlanner(plan: proto.Relation, session: 
SparkSession) {
       case proto.Relation.RelTypeCase.REPARTITION => 
transformRepartition(rel.getRepartition)
       case proto.Relation.RelTypeCase.STAT_FUNCTION =>
         transformStatFunction(rel.getStatFunction)
-      case proto.Relation.RelTypeCase.RENAME_COLUMNS =>
-        transformRenameColumns(rel.getRenameColumns)
+      case proto.Relation.RelTypeCase.RENAME_COLUMNS_BY_SAME_LENGTH_NAMES =>
+        
transformRenameColumnsBySamelenghtNames(rel.getRenameColumnsBySameLengthNames)
+      case proto.Relation.RelTypeCase.RENAME_COLUMNS_BY_NAME_TO_NAME_MAP =>
+        
transformRenameColumnsByNameToNameMap(rel.getRenameColumnsByNameToNameMap)
       case proto.Relation.RelTypeCase.RELTYPE_NOT_SET =>
         throw new IndexOutOfBoundsException("Expected Relation to be set, but 
is empty.")
       case _ => throw InvalidPlanInput(s"${rel.getUnknown} not supported.")
@@ -135,13 +137,22 @@ class SparkConnectPlanner(plan: proto.Relation, session: 
SparkSession) {
     }
   }
 
-  private def transformRenameColumns(rel: proto.RenameColumns): LogicalPlan = {
+  private def transformRenameColumnsBySamelenghtNames(
+      rel: proto.RenameColumnsBySameLengthNames): LogicalPlan = {
     Dataset
       .ofRows(session, transformRelation(rel.getInput))
       .toDF(rel.getColumnNamesList.asScala.toSeq: _*)
       .logicalPlan
   }
 
+  private def transformRenameColumnsByNameToNameMap(
+      rel: proto.RenameColumnsByNameToNameMap): LogicalPlan = {
+    Dataset
+      .ofRows(session, transformRelation(rel.getInput))
+      .withColumnsRenamed(rel.getRenameColumnsMap)
+      .logicalPlan
+  }
+
   private def transformDeduplicate(rel: proto.Deduplicate): LogicalPlan = {
     if (!rel.hasInput) {
       throw InvalidPlanInput("Deduplicate needs a plan input")
diff --git 
a/connector/connect/src/test/scala/org/apache/spark/sql/connect/planner/SparkConnectProtoSuite.scala
 
b/connector/connect/src/test/scala/org/apache/spark/sql/connect/planner/SparkConnectProtoSuite.scala
index cb22cf1e7e7..3612c5e0d0a 100644
--- 
a/connector/connect/src/test/scala/org/apache/spark/sql/connect/planner/SparkConnectProtoSuite.scala
+++ 
b/connector/connect/src/test/scala/org/apache/spark/sql/connect/planner/SparkConnectProtoSuite.scala
@@ -18,7 +18,7 @@ package org.apache.spark.sql.connect.planner
 
 import org.apache.spark.connect.proto
 import org.apache.spark.connect.proto.Join.JoinType
-import org.apache.spark.sql.{Column, DataFrame, Row}
+import org.apache.spark.sql.{AnalysisException, Column, DataFrame, Row}
 import org.apache.spark.sql.catalyst.analysis
 import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.catalyst.plans.{FullOuter, Inner, LeftAnti, 
LeftOuter, LeftSemi, PlanTest, RightOuter}
@@ -277,6 +277,26 @@ class SparkConnectProtoSuite extends PlanTest with 
SparkConnectPlanTest {
     comparePlans(connectTestRelation.toDF("col1", "col2"), 
sparkTestRelation.toDF("col1", "col2"))
   }
 
+  test("Test withColumnsRenamed") {
+    comparePlans(
+      connectTestRelation.withColumnsRenamed(Map("id" -> "id1")),
+      sparkTestRelation.withColumnsRenamed(Map("id" -> "id1")))
+    comparePlans(
+      connectTestRelation.withColumnsRenamed(Map("id" -> "id1", "name" -> 
"name1")),
+      sparkTestRelation.withColumnsRenamed(Map("id" -> "id1", "name" -> 
"name1")))
+    comparePlans(
+      connectTestRelation.withColumnsRenamed(Map("id" -> "id1", "col1" -> 
"col2")),
+      sparkTestRelation.withColumnsRenamed(Map("id" -> "id1", "col1" -> 
"col2")))
+    comparePlans(
+      connectTestRelation.withColumnsRenamed(Map("id" -> "id1", "id" -> 
"id2")),
+      sparkTestRelation.withColumnsRenamed(Map("id" -> "id1", "id" -> "id2")))
+
+    val e = intercept[AnalysisException](
+      transform(connectTestRelation.withColumnsRenamed(
+        Map("id" -> "duplicatedCol", "name" -> "duplicatedCol"))))
+    assert(e.getMessage.contains("Found duplicate column(s)"))
+  }
+
   private def createLocalRelationProtoByQualifiedAttributes(
       attrs: Seq[proto.Expression.QualifiedAttribute]): proto.Relation = {
     val localRelationBuilder = proto.LocalRelation.newBuilder()
diff --git a/python/pyspark/sql/connect/proto/relations_pb2.py 
b/python/pyspark/sql/connect/proto/relations_pb2.py
index 06b59ea5f45..029f98acb91 100644
--- a/python/pyspark/sql/connect/proto/relations_pb2.py
+++ b/python/pyspark/sql/connect/proto/relations_pb2.py
@@ -32,7 +32,7 @@ from pyspark.sql.connect.proto import expressions_pb2 as 
spark_dot_connect_dot_e
 
 
 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
-    
b'\n\x1dspark/connect/relations.proto\x12\rspark.connect\x1a\x1fspark/connect/expressions.proto"\xd7\x08\n\x08Relation\x12\x35\n\x06\x63ommon\x18\x01
 
\x01(\x0b\x32\x1d.spark.connect.RelationCommonR\x06\x63ommon\x12)\n\x04read\x18\x02
 
\x01(\x0b\x32\x13.spark.connect.ReadH\x00R\x04read\x12\x32\n\x07project\x18\x03 
\x01(\x0b\x32\x16.spark.connect.ProjectH\x00R\x07project\x12/\n\x06\x66ilter\x18\x04
 
\x01(\x0b\x32\x15.spark.connect.FilterH\x00R\x06\x66ilter\x12)\n\x04join\x18\x05
 \x01(\x0 [...]
+    
b'\n\x1dspark/connect/relations.proto\x12\rspark.connect\x1a\x1fspark/connect/expressions.proto"\x87\n\n\x08Relation\x12\x35\n\x06\x63ommon\x18\x01
 
\x01(\x0b\x32\x1d.spark.connect.RelationCommonR\x06\x63ommon\x12)\n\x04read\x18\x02
 
\x01(\x0b\x32\x13.spark.connect.ReadH\x00R\x04read\x12\x32\n\x07project\x18\x03 
\x01(\x0b\x32\x16.spark.connect.ProjectH\x00R\x07project\x12/\n\x06\x66ilter\x18\x04
 
\x01(\x0b\x32\x15.spark.connect.FilterH\x00R\x06\x66ilter\x12)\n\x04join\x18\x05
 \x01(\x0b\ [...]
 )
 
 _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
@@ -43,68 +43,74 @@ if _descriptor._USE_C_DESCRIPTORS == False:
     DESCRIPTOR._serialized_options = 
b"\n\036org.apache.spark.connect.protoP\001"
     _READ_DATASOURCE_OPTIONSENTRY._options = None
     _READ_DATASOURCE_OPTIONSENTRY._serialized_options = b"8\001"
+    _RENAMECOLUMNSBYNAMETONAMEMAP_RENAMECOLUMNSMAPENTRY._options = None
+    _RENAMECOLUMNSBYNAMETONAMEMAP_RENAMECOLUMNSMAPENTRY._serialized_options = 
b"8\001"
     _RELATION._serialized_start = 82
-    _RELATION._serialized_end = 1193
-    _UNKNOWN._serialized_start = 1195
-    _UNKNOWN._serialized_end = 1204
-    _RELATIONCOMMON._serialized_start = 1206
-    _RELATIONCOMMON._serialized_end = 1255
-    _SQL._serialized_start = 1257
-    _SQL._serialized_end = 1284
-    _READ._serialized_start = 1287
-    _READ._serialized_end = 1697
-    _READ_NAMEDTABLE._serialized_start = 1429
-    _READ_NAMEDTABLE._serialized_end = 1490
-    _READ_DATASOURCE._serialized_start = 1493
-    _READ_DATASOURCE._serialized_end = 1684
-    _READ_DATASOURCE_OPTIONSENTRY._serialized_start = 1626
-    _READ_DATASOURCE_OPTIONSENTRY._serialized_end = 1684
-    _PROJECT._serialized_start = 1699
-    _PROJECT._serialized_end = 1816
-    _FILTER._serialized_start = 1818
-    _FILTER._serialized_end = 1930
-    _JOIN._serialized_start = 1933
-    _JOIN._serialized_end = 2383
-    _JOIN_JOINTYPE._serialized_start = 2196
-    _JOIN_JOINTYPE._serialized_end = 2383
-    _SETOPERATION._serialized_start = 2386
-    _SETOPERATION._serialized_end = 2749
-    _SETOPERATION_SETOPTYPE._serialized_start = 2635
-    _SETOPERATION_SETOPTYPE._serialized_end = 2749
-    _LIMIT._serialized_start = 2751
-    _LIMIT._serialized_end = 2827
-    _OFFSET._serialized_start = 2829
-    _OFFSET._serialized_end = 2908
-    _AGGREGATE._serialized_start = 2911
-    _AGGREGATE._serialized_end = 3121
-    _SORT._serialized_start = 3124
-    _SORT._serialized_end = 3655
-    _SORT_SORTFIELD._serialized_start = 3273
-    _SORT_SORTFIELD._serialized_end = 3461
-    _SORT_SORTDIRECTION._serialized_start = 3463
-    _SORT_SORTDIRECTION._serialized_end = 3571
-    _SORT_SORTNULLS._serialized_start = 3573
-    _SORT_SORTNULLS._serialized_end = 3655
-    _DEDUPLICATE._serialized_start = 3658
-    _DEDUPLICATE._serialized_end = 3800
-    _LOCALRELATION._serialized_start = 3802
-    _LOCALRELATION._serialized_end = 3895
-    _SAMPLE._serialized_start = 3898
-    _SAMPLE._serialized_end = 4138
-    _SAMPLE_SEED._serialized_start = 4112
-    _SAMPLE_SEED._serialized_end = 4138
-    _RANGE._serialized_start = 4141
-    _RANGE._serialized_end = 4339
-    _RANGE_NUMPARTITIONS._serialized_start = 4285
-    _RANGE_NUMPARTITIONS._serialized_end = 4339
-    _SUBQUERYALIAS._serialized_start = 4341
-    _SUBQUERYALIAS._serialized_end = 4455
-    _REPARTITION._serialized_start = 4457
-    _REPARTITION._serialized_end = 4582
-    _STATFUNCTION._serialized_start = 4585
-    _STATFUNCTION._serialized_end = 4819
-    _STATFUNCTION_SUMMARY._serialized_start = 4766
-    _STATFUNCTION_SUMMARY._serialized_end = 4807
-    _RENAMECOLUMNS._serialized_start = 4821
-    _RENAMECOLUMNS._serialized_end = 4918
+    _RELATION._serialized_end = 1369
+    _UNKNOWN._serialized_start = 1371
+    _UNKNOWN._serialized_end = 1380
+    _RELATIONCOMMON._serialized_start = 1382
+    _RELATIONCOMMON._serialized_end = 1431
+    _SQL._serialized_start = 1433
+    _SQL._serialized_end = 1460
+    _READ._serialized_start = 1463
+    _READ._serialized_end = 1873
+    _READ_NAMEDTABLE._serialized_start = 1605
+    _READ_NAMEDTABLE._serialized_end = 1666
+    _READ_DATASOURCE._serialized_start = 1669
+    _READ_DATASOURCE._serialized_end = 1860
+    _READ_DATASOURCE_OPTIONSENTRY._serialized_start = 1802
+    _READ_DATASOURCE_OPTIONSENTRY._serialized_end = 1860
+    _PROJECT._serialized_start = 1875
+    _PROJECT._serialized_end = 1992
+    _FILTER._serialized_start = 1994
+    _FILTER._serialized_end = 2106
+    _JOIN._serialized_start = 2109
+    _JOIN._serialized_end = 2559
+    _JOIN_JOINTYPE._serialized_start = 2372
+    _JOIN_JOINTYPE._serialized_end = 2559
+    _SETOPERATION._serialized_start = 2562
+    _SETOPERATION._serialized_end = 2925
+    _SETOPERATION_SETOPTYPE._serialized_start = 2811
+    _SETOPERATION_SETOPTYPE._serialized_end = 2925
+    _LIMIT._serialized_start = 2927
+    _LIMIT._serialized_end = 3003
+    _OFFSET._serialized_start = 3005
+    _OFFSET._serialized_end = 3084
+    _AGGREGATE._serialized_start = 3087
+    _AGGREGATE._serialized_end = 3297
+    _SORT._serialized_start = 3300
+    _SORT._serialized_end = 3831
+    _SORT_SORTFIELD._serialized_start = 3449
+    _SORT_SORTFIELD._serialized_end = 3637
+    _SORT_SORTDIRECTION._serialized_start = 3639
+    _SORT_SORTDIRECTION._serialized_end = 3747
+    _SORT_SORTNULLS._serialized_start = 3749
+    _SORT_SORTNULLS._serialized_end = 3831
+    _DEDUPLICATE._serialized_start = 3834
+    _DEDUPLICATE._serialized_end = 3976
+    _LOCALRELATION._serialized_start = 3978
+    _LOCALRELATION._serialized_end = 4071
+    _SAMPLE._serialized_start = 4074
+    _SAMPLE._serialized_end = 4314
+    _SAMPLE_SEED._serialized_start = 4288
+    _SAMPLE_SEED._serialized_end = 4314
+    _RANGE._serialized_start = 4317
+    _RANGE._serialized_end = 4515
+    _RANGE_NUMPARTITIONS._serialized_start = 4461
+    _RANGE_NUMPARTITIONS._serialized_end = 4515
+    _SUBQUERYALIAS._serialized_start = 4517
+    _SUBQUERYALIAS._serialized_end = 4631
+    _REPARTITION._serialized_start = 4633
+    _REPARTITION._serialized_end = 4758
+    _STATFUNCTION._serialized_start = 4761
+    _STATFUNCTION._serialized_end = 4995
+    _STATFUNCTION_SUMMARY._serialized_start = 4942
+    _STATFUNCTION_SUMMARY._serialized_end = 4983
+    _RENAMECOLUMNSBYSAMELENGTHNAMES._serialized_start = 4997
+    _RENAMECOLUMNSBYSAMELENGTHNAMES._serialized_end = 5111
+    _RENAMECOLUMNSBYNAMETONAMEMAP._serialized_start = 5114
+    _RENAMECOLUMNSBYNAMETONAMEMAP._serialized_end = 5373
+    _RENAMECOLUMNSBYNAMETONAMEMAP_RENAMECOLUMNSMAPENTRY._serialized_start = 
5306
+    _RENAMECOLUMNSBYNAMETONAMEMAP_RENAMECOLUMNSMAPENTRY._serialized_end = 5373
 # @@protoc_insertion_point(module_scope)
diff --git a/python/pyspark/sql/connect/proto/relations_pb2.pyi 
b/python/pyspark/sql/connect/proto/relations_pb2.pyi
index bef74b03659..09de13a8c29 100644
--- a/python/pyspark/sql/connect/proto/relations_pb2.pyi
+++ b/python/pyspark/sql/connect/proto/relations_pb2.pyi
@@ -76,7 +76,8 @@ class Relation(google.protobuf.message.Message):
     RANGE_FIELD_NUMBER: builtins.int
     SUBQUERY_ALIAS_FIELD_NUMBER: builtins.int
     REPARTITION_FIELD_NUMBER: builtins.int
-    RENAME_COLUMNS_FIELD_NUMBER: builtins.int
+    RENAME_COLUMNS_BY_SAME_LENGTH_NAMES_FIELD_NUMBER: builtins.int
+    RENAME_COLUMNS_BY_NAME_TO_NAME_MAP_FIELD_NUMBER: builtins.int
     STAT_FUNCTION_FIELD_NUMBER: builtins.int
     UNKNOWN_FIELD_NUMBER: builtins.int
     @property
@@ -114,7 +115,9 @@ class Relation(google.protobuf.message.Message):
     @property
     def repartition(self) -> global___Repartition: ...
     @property
-    def rename_columns(self) -> global___RenameColumns: ...
+    def rename_columns_by_same_length_names(self) -> 
global___RenameColumnsBySameLengthNames: ...
+    @property
+    def rename_columns_by_name_to_name_map(self) -> 
global___RenameColumnsByNameToNameMap: ...
     @property
     def stat_function(self) -> global___StatFunction: ...
     @property
@@ -139,7 +142,8 @@ class Relation(google.protobuf.message.Message):
         range: global___Range | None = ...,
         subquery_alias: global___SubqueryAlias | None = ...,
         repartition: global___Repartition | None = ...,
-        rename_columns: global___RenameColumns | None = ...,
+        rename_columns_by_same_length_names: 
global___RenameColumnsBySameLengthNames | None = ...,
+        rename_columns_by_name_to_name_map: 
global___RenameColumnsByNameToNameMap | None = ...,
         stat_function: global___StatFunction | None = ...,
         unknown: global___Unknown | None = ...,
     ) -> None: ...
@@ -170,8 +174,10 @@ class Relation(google.protobuf.message.Message):
             b"read",
             "rel_type",
             b"rel_type",
-            "rename_columns",
-            b"rename_columns",
+            "rename_columns_by_name_to_name_map",
+            b"rename_columns_by_name_to_name_map",
+            "rename_columns_by_same_length_names",
+            b"rename_columns_by_same_length_names",
             "repartition",
             b"repartition",
             "sample",
@@ -217,8 +223,10 @@ class Relation(google.protobuf.message.Message):
             b"read",
             "rel_type",
             b"rel_type",
-            "rename_columns",
-            b"rename_columns",
+            "rename_columns_by_name_to_name_map",
+            b"rename_columns_by_name_to_name_map",
+            "rename_columns_by_same_length_names",
+            b"rename_columns_by_same_length_names",
             "repartition",
             b"repartition",
             "sample",
@@ -256,7 +264,8 @@ class Relation(google.protobuf.message.Message):
         "range",
         "subquery_alias",
         "repartition",
-        "rename_columns",
+        "rename_columns_by_same_length_names",
+        "rename_columns_by_name_to_name_map",
         "stat_function",
         "unknown",
     ] | None: ...
@@ -1143,8 +1152,8 @@ class StatFunction(google.protobuf.message.Message):
 
 global___StatFunction = StatFunction
 
-class RenameColumns(google.protobuf.message.Message):
-    """Rename columns on the input relation."""
+class RenameColumnsBySameLengthNames(google.protobuf.message.Message):
+    """Rename columns on the input relation by the same length of names."""
 
     DESCRIPTOR: google.protobuf.descriptor.Descriptor
 
@@ -1176,4 +1185,60 @@ class RenameColumns(google.protobuf.message.Message):
         field_name: typing_extensions.Literal["column_names", b"column_names", 
"input", b"input"],
     ) -> None: ...
 
-global___RenameColumns = RenameColumns
+global___RenameColumnsBySameLengthNames = RenameColumnsBySameLengthNames
+
+class RenameColumnsByNameToNameMap(google.protobuf.message.Message):
+    """Rename columns on the input relation by a map with name to name 
mapping."""
+
+    DESCRIPTOR: google.protobuf.descriptor.Descriptor
+
+    class RenameColumnsMapEntry(google.protobuf.message.Message):
+        DESCRIPTOR: google.protobuf.descriptor.Descriptor
+
+        KEY_FIELD_NUMBER: builtins.int
+        VALUE_FIELD_NUMBER: builtins.int
+        key: builtins.str
+        value: builtins.str
+        def __init__(
+            self,
+            *,
+            key: builtins.str = ...,
+            value: builtins.str = ...,
+        ) -> None: ...
+        def ClearField(
+            self, field_name: typing_extensions.Literal["key", b"key", 
"value", b"value"]
+        ) -> None: ...
+
+    INPUT_FIELD_NUMBER: builtins.int
+    RENAME_COLUMNS_MAP_FIELD_NUMBER: builtins.int
+    @property
+    def input(self) -> global___Relation:
+        """Required. The input relation."""
+    @property
+    def rename_columns_map(
+        self,
+    ) -> google.protobuf.internal.containers.ScalarMap[builtins.str, 
builtins.str]:
+        """Required.
+
+        Renaming column names of input relation from A to B where A is the map 
key
+        and B is the map value. This is a no-op if schema doesn't contain any 
A. It
+        does not require that all input relation column names to present as 
keys.
+        duplicated B are not allowed.
+        """
+    def __init__(
+        self,
+        *,
+        input: global___Relation | None = ...,
+        rename_columns_map: collections.abc.Mapping[builtins.str, 
builtins.str] | None = ...,
+    ) -> None: ...
+    def HasField(
+        self, field_name: typing_extensions.Literal["input", b"input"]
+    ) -> builtins.bool: ...
+    def ClearField(
+        self,
+        field_name: typing_extensions.Literal[
+            "input", b"input", "rename_columns_map", b"rename_columns_map"
+        ],
+    ) -> None: ...
+
+global___RenameColumnsByNameToNameMap = RenameColumnsByNameToNameMap


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to