HyukjinKwon commented on code in PR #39212:
URL: https://github.com/apache/spark/pull/39212#discussion_r1057455800


##########
python/pyspark/sql/connect/client.py:
##########
@@ -60,7 +108,18 @@ class ChannelBuilder:
 
     DEFAULT_PORT = 15002
 
-    def __init__(self, url: str) -> None:
+    def __init__(self, url: str, channelOptions: Optional[List[Tuple[str, 
Any]]] = None) -> None:
+        """
+        Constructs a new channel builder. This is used to create the proper 
GRPC channel from
+        the connection string.
+
+        Parameters
+        ----------
+        url : str
+            Spark Connect connection string
+        channelOptions: Optional[List[tuple]]

Review Comment:
   ```suggestion
           channelOptions : list of tuple, optional
   ```
   
   I referred to 
https://numpydoc.readthedocs.io/en/latest/format.html#parameters



##########
python/pyspark/sql/connect/client.py:
##########
@@ -377,6 +469,19 @@ def _analyze_plan_request_with_metadata(self) -> 
pb2.AnalyzePlanRequest:
         return req
 
     def _analyze(self, plan: pb2.Plan, explain_mode: str = "extended") -> 
AnalyzeResult:
+        """
+        Call the analyze RPC of Spark Connect.

Review Comment:
   ```suggestion
           Call the analyze RPC of Spark Connect.
   
   ```
   
   Otherwise, documentation rending is broken.



##########
python/pyspark/sql/connect/client.py:
##########
@@ -377,6 +469,19 @@ def _analyze_plan_request_with_metadata(self) -> 
pb2.AnalyzePlanRequest:
         return req
 
     def _analyze(self, plan: pb2.Plan, explain_mode: str = "extended") -> 
AnalyzeResult:
+        """
+        Call the analyze RPC of Spark Connect.
+        Parameters
+        ----------
+        plan: pb2.Plan
+           Proto representation of the plan.
+        explain_mode: str
+           Explain mode

Review Comment:
   ```suggestion
           plan : :class:`pyspark.sql.connect.proto.Plan`
              Proto representation of the plan.
           explain_mode : str
              Explain mode
   ```



##########
python/pyspark/sql/connect/client.py:
##########
@@ -397,30 +502,57 @@ def _analyze(self, plan: pb2.Plan, explain_mode: str = 
"extended") -> AnalyzeRes
         else:  # formatted
             req.explain.explain_mode = pb2.Explain.ExplainMode.FORMATTED
 
-        resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
-        return AnalyzeResult.fromProto(resp)
+        try:
+            resp = self._stub.AnalyzePlan(req, 
metadata=self._builder.metadata())
+            return AnalyzeResult.fromProto(resp)
+        except grpc.RpcError as rpc_error:
+            self._handle_error(rpc_error)
 
     def _process_batch(self, arrow_batch: pb2.ExecutePlanResponse.ArrowBatch) 
-> "pandas.DataFrame":
         with pa.ipc.open_stream(arrow_batch.data) as rd:
             return rd.read_pandas()
 
     def _execute(self, req: pb2.ExecutePlanRequest) -> None:
-        for b in self._stub.ExecutePlan(req, 
metadata=self._builder.metadata()):
-            continue
-        return
+        """
+        Execute the passed request `req` and drop all results.
+
+        Parameters
+        ----------
+        req : pb2.ExecutePlanRequest
+            Proto representation of the plan.
+
+        Returns
+        -------
+

Review Comment:
   ```suggestion
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to