Github user maropu commented on a diff in the pull request: https://github.com/apache/spark/pull/18300#discussion_r123416779 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala --- @@ -1764,6 +1765,70 @@ class Dataset[T] private[sql]( } /** + * Returns a new Dataset containing union of rows in this Dataset and another Dataset. + * + * This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set + * union (that does deduplication of elements), use this function followed by a [[distinct]]. + * + * The difference between this function and [[union]] is that this function + * resolves columns by name (not by position): + * + * {{{ + * val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2") + * val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0") + * df1.unionByName(df2).show + * + * // output: + * // +----+----+----+ + * // |col0|col1|col2| + * // +----+----+----+ + * // | 1| 2| 3| + * // | 6| 4| 5| + * // +----+----+----+ + * }}} + * + * @group typedrel + * @since 2.3.0 + */ + def unionByName(other: Dataset[T]): Dataset[T] = withSetOperator { + // Resolves children first to reorder output attributes in `other` by name + val leftPlan = sparkSession.sessionState.executePlan(logicalPlan) + val rightPlan = sparkSession.sessionState.executePlan(other.logicalPlan) + leftPlan.assertAnalyzed() + rightPlan.assertAnalyzed() + + // Check column name duplication + val resolver = sparkSession.sessionState.analyzer.resolver + val leftOutputAttrs = leftPlan.analyzed.output + val rightOutputAttrs = rightPlan.analyzed.output + // SchemaUtils.checkColumnNameDuplication( + // leftOutputAttrs.map(_.name), + // "in the left attributes", + // sparkSession.sessionState.conf.caseSensitiveAnalysis) + // SchemaUtils.checkColumnNameDuplication( + // rightOutputAttrs.map(_.name), + // "in the right attributes", + // sparkSession.sessionState.conf.caseSensitiveAnalysis) --- End diff -- The function to check name duplication is discussed in #17758. I'm planning to use the func to check the duplication and then do union-by. See the discussion: https://github.com/apache/spark/pull/18300#discussion_r122116812
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org