sryza commented on PR #51003: URL: https://github.com/apache/spark/pull/51003#issuecomment-2922923993
> We definitely import Dataframe into this file. Hmm you are right. Not sure what's going on here. This is in auto-generated Java code that doesn't appear to have `DataFrame` imported. Can we just take out the square brackets for now to get past this? <details> <summary>Java code – Click to expand</summary> ```java package org.apache.spark.sql.pipelines.graph; /** * Holds the {@link DataFrame} returned by a {@link FlowFunction} along with the inputs used to * construct it. * param: usedBatchInputs the identifiers of the complete inputs read by the flow * param: usedStreamingInputs the identifiers of the incremental inputs read by the flow * param: usedExternalInputs the identifiers of the external inputs read by the flow * param: dataFrame the {@link DataFrame} expression executed by the flow if the flow can be resolved */ public class FlowFunctionResult implements scala.Product, java.io.Serializable { static public abstract R apply (T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) ; static public java.lang.String toString () { throw new RuntimeException(); } public scala.collection.immutable.Set<org.apache.spark.sql.catalyst.TableIdentifier> requestedInputs () { throw new RuntimeException(); } public scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> usedBatchInputs () { throw new RuntimeException(); } public scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> usedStreamingInputs () { throw new RuntimeException(); } public scala.collection.immutable.Set<java.lang.String> usedExternalInputs () { throw new RuntimeException(); } public scala.util.Try<org.apache.spark.sql.classic.Dataset<org.apache.spark.sql.Row>> dataFrame () { throw new RuntimeException(); } public scala.collection.immutable.Map<java.lang.String, java.lang.String> sqlConf () { throw new RuntimeException(); } public scala.collection.immutable.Seq<org.apache.spark.sql.pipelines.AnalysisWarning> analysisWarnings () { throw new RuntimeException(); } // not preceding public FlowFunctionResult (scala.collection.immutable.Set<org.apache.spark.sql.catalyst.TableIdentifier> requestedInputs, scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> usedBatchInputs, scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> usedStreamingInputs, scala.collection.immutable.Set<java.lang.String> usedExternalInputs, scala.util.Try<org.apache.spark.sql.classic.Dataset<org.apache.spark.sql.Row>> dataFrame, scala.collection.immutable.Map<java.lang.String, java.lang.String> sqlConf, scala.collection.immutable.Seq<org.apache.spark.sql.pipelines.AnalysisWarning> analysisWarnings) { throw new RuntimeException(); } /** * Returns the names of all of the {@link Input}s used when resolving this {@link Flow}. If the * flow failed to resolve, we return the all the datasets that were requested when evaluating the * flow. * @return (undocumented) */ public scala.collection.immutable.Set<org.apache.spark.sql.catalyst.TableIdentifier> inputs () { throw new RuntimeException(); } /** Names of {@link Input}s read completely by this {@link Flow}. */ public scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> batchInputs () { throw new RuntimeException(); } /** Names of {@link Input}s read incrementally by this {@link Flow}. */ public scala.collection.immutable.Set<org.apache.spark.sql.pipelines.graph.ResolvedInput> streamingInputs () { throw new RuntimeException(); } /** Returns errors that occurred when attempting to analyze this {@link Flow}. */ public scala.collection.immutable.Seq<java.lang.Throwable> failure () { throw new RuntimeException(); } /** Whether this {@link Flow} is successfully analyzed. */ public final boolean resolved () { throw new RuntimeException(); } } ``` </details> -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org