Yikun commented on code in PR #36464:
URL: https://github.com/apache/spark/pull/36464#discussion_r873426625


##########
python/pyspark/pandas/groupby.py:
##########
@@ -2110,22 +2110,79 @@ def _limit(self, n: int, asc: bool) -> FrameLike:
         groupkey_scols = [psdf._internal.spark_column_for(label) for label in 
groupkey_labels]
 
         sdf = psdf._internal.spark_frame
-        tmp_col = verify_temp_column_name(sdf, "__row_number__")
+        tmp_row_num_col = verify_temp_column_name(sdf, "__row_number__")
 
+        window = Window.partitionBy(*groupkey_scols)
         # This part is handled differently depending on whether it is a tail 
or a head.
-        window = (
-            
Window.partitionBy(*groupkey_scols).orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).asc())
+        ordered_window = (
+            window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).asc())
             if asc
-            else Window.partitionBy(*groupkey_scols).orderBy(
-                F.col(NATURAL_ORDER_COLUMN_NAME).desc()
-            )
+            else window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).desc())
         )
 
-        sdf = (
-            sdf.withColumn(tmp_col, F.row_number().over(window))
-            .filter(F.col(tmp_col) <= n)
-            .drop(tmp_col)
-        )
+        if n >= 0 or LooseVersion(pd.__version__) < LooseVersion("1.4.0"):
+
+            sdf = (
+                sdf.withColumn(tmp_row_num_col, 
F.row_number().over(ordered_window))
+                .filter(F.col(tmp_row_num_col) <= n)
+                .drop(tmp_row_num_col)
+            )
+        else:
+            # Pandas supports Groupby positional indexing since v1.4.0
+            # 
https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#groupby-positional-indexing
+            #
+            # To support groupby positional indexing, we need add two columns 
to help we filter
+            # target rows:
+            # - Add `__row_number__` and `__group_count__` columns.
+            # - Use `F.col(tmp_row_num_col) - F.col(tmp_cnt_col) <= 
positional_index_number` to
+            #   filter target rows.
+            # - Then drop `__row_number__` and `__group_count__` columns.
+            #
+            # For example for the dataframe:
+            # >>> df = ps.DataFrame([["g", "g0"],
+            # ...                   ["g", "g1"],
+            # ...                   ["g", "g2"],
+            # ...                   ["g", "g3"],
+            # ...                   ["h", "h0"],
+            # ...                   ["h", "h1"]], columns=["A", "B"])
+            # >>> df.groupby("A").head(-1)
+            #
+            # Below is an example to show the `__row_number__` column and 
`__group_count__` column
+            # for above df:
+            # >>> sdf.withColumn(tmp_row_num_col, F.row_number().over(window))
+            #        .withColumn(tmp_cnt_col, F.count("*").over(window)).show()
+            # 
+---------------+------------+---+---+------------+--------------+---------------+
+            # |__index_level..|__groupkey..|  A|  
B|__natural_..|__row_number__|__group_count__|
+            # 
+---------------+------------+---+---+------------+--------------+---------------+
+            # |              0|           g|  g| g0| 17179869184|             
1|              4|
+            # |              1|           g|  g| g1| 42949672960|             
2|              4|
+            # |              2|           g|  g| g2| 60129542144|             
3|              4|
+            # |              3|           g|  g| g3| 85899345920|             
4|              4|
+            # |              4|           h|  h| h0|111669149696|             
1|              2|
+            # |              5|           h|  h| h1|128849018880|             
2|              2|
+            # 
+---------------+------------+---+---+------------+--------------+---------------+
+            #
+            # The limit n is `-1`, we need to filter rows[:-1] in each group:
+            #
+            # >>> sdf.withColumn(tmp_row_num_col, F.row_number().over(window))
+            #        .withColumn(tmp_cnt_col, F.count("*").over(window))
+            #        .filter(F.col(tmp_row_num_col) - F.col(tmp_cnt_col) <= 
-1).show()

Review Comment:
   @zhengruifeng I think `lag` is better in here: 1 `WindowsExec` + 2 `sort` + 
1 `shuffle`, same cost with orignal implments
   
   <details><summary>== Physical Plan ==</summary>
   
   ```
   == Physical Plan ==
   AdaptiveSparkPlan isFinalPlan=false
   +- Project [__index_level_0__#0, __groupkey_0__#19L, a#1L, b#2L, c#3L, 
__natural_order__#8L]
      +- Filter isnull(__tmp_lag__#447)
         +- Window [lag(0, -2, null) windowspecdefinition(__groupkey_0__#19L, 
__natural_order__#8L ASC NULLS FIRST, specifiedwindowframe(RowFrame, -2, -2)) 
AS __tmp_lag__#447], [__groupkey_0__#19L], [__natural_order__#8L ASC NULLS 
FIRST]
            +- Sort [__groupkey_0__#19L ASC NULLS FIRST, __natural_order__#8L 
ASC NULLS FIRST], false, 0
               +- Exchange hashpartitioning(__groupkey_0__#19L, 200), 
ENSURE_REQUIREMENTS, [id=#1049]
                  +- Project [__index_level_0__#0, a#1L AS __groupkey_0__#19L, 
a#1L, b#2L, c#3L, __natural_order__#8L]
                     +- Project [__index_level_0__#0, a#1L, b#2L, c#3L, 
monotonically_increasing_id() AS __natural_order__#8L]
                        +- Scan ExistingRDD 
arrow[__index_level_0__#0,a#1L,b#2L,c#3L]
   ```
   </details>
   
   So, I updated it in my PR.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to