Github user felixcheung commented on a diff in the pull request:

    https://github.com/apache/spark/pull/22107#discussion_r210488890
  
    --- Diff: R/pkg/R/DataFrame.R ---
    @@ -2876,6 +2905,37 @@ setMethod("except",
                 dataFrame(excepted)
               })
     
    +#' exceptAll
    +#'
    +#' Return a new SparkDataFrame containing rows in this SparkDataFrame
    +#' but not in another SparkDataFrame while preserving the duplicates.
    +#' This is equivalent to \code{EXCEPT ALL} in SQL. Also as standard in
    +#' SQL, this function resolves columns by position (not by name).
    +#'
    +#' @param x a SparkDataFrame.
    +#' @param y a SparkDataFrame.
    +#' @return A SparkDataFrame containing the result of the except all 
operation.
    +#' @family SparkDataFrame functions
    +#' @aliases exceptAll,SparkDataFrame,SparkDataFrame-method
    +#' @rdname exceptAll
    +#' @name exceptAll
    +#' @examples
    +#'\dontrun{
    +#' sparkR.session()
    +#' df1 <- read.json(path)
    +#' df2 <- read.json(path2)
    +#' exceptAllDF <- exceptAll(df1, df2)
    +#' }
    +#' @rdname exceptAll
    +#' @note exceptAll since 2.4.0
    +setMethod("exceptAll",
    +          signature(x = "SparkDataFrame", y = "SparkDataFrame"),
    +          function(x, y) {
    +            excepted <- callJMethod(x@sdf, "exceptAll", y@sdf)
    +            dataFrame(excepted)
    +          })
    +
    --- End diff --
    
    nit: remove one of the two empty lines


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to