This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new a4ca577445d [SPARK-41305][CONNECT] Improve Documentation for Command 
proto
a4ca577445d is described below

commit a4ca577445d7867db777cbb668f36f44c99ac1ce
Author: Rui Wang <rui.w...@databricks.com>
AuthorDate: Mon Dec 5 09:59:56 2022 +0900

    [SPARK-41305][CONNECT] Improve Documentation for Command proto
    
    ### What changes were proposed in this pull request?
    
    Improve Command.proto documentation by following 
https://github.com/apache/spark/blob/master/connector/connect/docs/adding-proto-messages.md.
    
    ### Why are the changes needed?
    
    Documentation qualify
    
    ### Does this PR introduce _any_ user-facing change?
    
    NO
    
    ### How was this patch tested?
    
    N/A
    
    Closes #38890 from amaliujia/improve_command_proto_documentation.
    
    Authored-by: Rui Wang <rui.w...@databricks.com>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 .../src/main/protobuf/spark/connect/commands.proto | 38 +++++++++++++++-------
 python/pyspark/sql/connect/proto/commands_pb2.pyi  | 31 ++++++++++--------
 2 files changed, 45 insertions(+), 24 deletions(-)

diff --git a/connector/connect/src/main/protobuf/spark/connect/commands.proto 
b/connector/connect/src/main/protobuf/spark/connect/commands.proto
index 086d4d0cc92..650f4fb7fa1 100644
--- a/connector/connect/src/main/protobuf/spark/connect/commands.proto
+++ b/connector/connect/src/main/protobuf/spark/connect/commands.proto
@@ -43,11 +43,19 @@ message Command {
 // TODO(SPARK-40532) It is required to add the interpreter / language version 
to the command
 //   parameters.
 message CreateScalarFunction {
-  // Fully qualified name of the function including the catalog / schema names.
+  // (Required) Fully qualified name of the function including the catalog / 
schema names.
   repeated string parts = 1;
+
+  // (Required) the function language.
   FunctionLanguage language = 2;
+
+  // (Required) if this is a temporary function.
   bool temporary = 3;
+
+  // (Optional) A list of argument types. Can be empty when the function does 
not take an argument.
   repeated DataType argument_types = 4;
+
+  // (Required) the return type of the function.
   DataType return_type = 5;
 
   // How the function body is defined:
@@ -68,16 +76,16 @@ message CreateScalarFunction {
 
 // A command that can create DataFrame global temp view or local temp view.
 message CreateDataFrameViewCommand {
-  // Required. The relation that this view will be built on.
+  // (Required) The relation that this view will be built on.
   Relation input = 1;
 
-  // Required. View name.
+  // (Required) View name.
   string name = 2;
 
-  // Required. Whether this is global temp view or local temp view.
+  // (Required) Whether this is global temp view or local temp view.
   bool is_global = 3;
 
-  // Required.
+  // (Required)
   //
   // If true, and if the view already exists, updates it; if false, and if the 
view
   // already exists, throws exception.
@@ -86,24 +94,32 @@ message CreateDataFrameViewCommand {
 
 // As writes are not directly handled during analysis and planning, they are 
modeled as commands.
 message WriteOperation {
-  // The output of the `input` relation will be persisted according to the 
options.
+  // (Required) The output of the `input` relation will be persisted according 
to the options.
   Relation input = 1;
-  // Format value according to the Spark documentation. Examples are: text, 
parquet, delta.
+
+  // (Required) Format value according to the Spark documentation. Examples 
are: text, parquet, delta.
   string source = 2;
+
   // The destination of the write operation must be either a path or a table.
   oneof save_type {
     string path = 3;
     string table_name = 4;
   }
+
+  // (Required) the save mode.
   SaveMode mode = 5;
-  // List of columns to sort the output by.
+
+  // (Optional) List of columns to sort the output by.
   repeated string sort_column_names = 6;
-  // List of columns for partitioning.
+
+  // (Optional) List of columns for partitioning.
   repeated string partitioning_columns = 7;
-  // Optional bucketing specification. Bucketing must set the number of 
buckets and the columns
+
+  // (Optional) Bucketing specification. Bucketing must set the number of 
buckets and the columns
   // to bucket by.
   BucketBy bucket_by = 8;
-  // Optional list of configuration options.
+
+  // (Optional) A list of configuration options.
   map<string, string> options = 9;
 
   message BucketBy {
diff --git a/python/pyspark/sql/connect/proto/commands_pb2.pyi 
b/python/pyspark/sql/connect/proto/commands_pb2.pyi
index 9b9880e0b93..c3c7d31222b 100644
--- a/python/pyspark/sql/connect/proto/commands_pb2.pyi
+++ b/python/pyspark/sql/connect/proto/commands_pb2.pyi
@@ -153,17 +153,21 @@ class 
CreateScalarFunction(google.protobuf.message.Message):
     def parts(
         self,
     ) -> 
google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
-        """Fully qualified name of the function including the catalog / schema 
names."""
+        """(Required) Fully qualified name of the function including the 
catalog / schema names."""
     language: global___CreateScalarFunction.FunctionLanguage.ValueType
+    """(Required) the function language."""
     temporary: builtins.bool
+    """(Required) if this is a temporary function."""
     @property
     def argument_types(
         self,
     ) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[
         pyspark.sql.connect.proto.types_pb2.DataType
-    ]: ...
+    ]:
+        """(Optional) A list of argument types. Can be empty when the function 
does not take an argument."""
     @property
-    def return_type(self) -> pyspark.sql.connect.proto.types_pb2.DataType: ...
+    def return_type(self) -> pyspark.sql.connect.proto.types_pb2.DataType:
+        """(Required) the return type of the function."""
     serialized_function: builtins.bytes
     """As a raw string serialized:"""
     literal_string: builtins.str
@@ -231,13 +235,13 @@ class 
CreateDataFrameViewCommand(google.protobuf.message.Message):
     REPLACE_FIELD_NUMBER: builtins.int
     @property
     def input(self) -> pyspark.sql.connect.proto.relations_pb2.Relation:
-        """Required. The relation that this view will be built on."""
+        """(Required) The relation that this view will be built on."""
     name: builtins.str
-    """Required. View name."""
+    """(Required) View name."""
     is_global: builtins.bool
-    """Required. Whether this is global temp view or local temp view."""
+    """(Required) Whether this is global temp view or local temp view."""
     replace: builtins.bool
-    """Required.
+    """(Required)
 
     If true, and if the view already exists, updates it; if false, and if the 
view
     already exists, throws exception.
@@ -342,30 +346,31 @@ class WriteOperation(google.protobuf.message.Message):
     OPTIONS_FIELD_NUMBER: builtins.int
     @property
     def input(self) -> pyspark.sql.connect.proto.relations_pb2.Relation:
-        """The output of the `input` relation will be persisted according to 
the options."""
+        """(Required) The output of the `input` relation will be persisted 
according to the options."""
     source: builtins.str
-    """Format value according to the Spark documentation. Examples are: text, 
parquet, delta."""
+    """(Required) Format value according to the Spark documentation. Examples 
are: text, parquet, delta."""
     path: builtins.str
     table_name: builtins.str
     mode: global___WriteOperation.SaveMode.ValueType
+    """(Required) the save mode."""
     @property
     def sort_column_names(
         self,
     ) -> 
google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
-        """List of columns to sort the output by."""
+        """(Optional) List of columns to sort the output by."""
     @property
     def partitioning_columns(
         self,
     ) -> 
google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
-        """List of columns for partitioning."""
+        """(Optional) List of columns for partitioning."""
     @property
     def bucket_by(self) -> global___WriteOperation.BucketBy:
-        """Optional bucketing specification. Bucketing must set the number of 
buckets and the columns
+        """(Optional) Bucketing specification. Bucketing must set the number 
of buckets and the columns
         to bucket by.
         """
     @property
     def options(self) -> 
google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:
-        """Optional list of configuration options."""
+        """(Optional) A list of configuration options."""
     def __init__(
         self,
         *,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to