This is an automated email from the ASF dual-hosted git repository.

milenkovicm pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/datafusion-ballista.git


The following commit(s) were added to refs/heads/main by this push:
     new b5ce80e4 fix failing documentation (#1339)
b5ce80e4 is described below

commit b5ce80e46053aae761563c0fc97e5e102d8577b5
Author: Marko Milenković <[email protected]>
AuthorDate: Fri Oct 31 10:28:45 2025 +0000

    fix failing documentation (#1339)
---
 ballista/scheduler/src/display.rs               | 2 +-
 ballista/scheduler/src/state/execution_graph.rs | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/ballista/scheduler/src/display.rs 
b/ballista/scheduler/src/display.rs
index ea6d488e..5cdd8802 100644
--- a/ballista/scheduler/src/display.rs
+++ b/ballista/scheduler/src/display.rs
@@ -66,7 +66,7 @@ pub struct DisplayableBallistaExecutionPlan<'a> {
 }
 
 impl<'a> DisplayableBallistaExecutionPlan<'a> {
-    /// Create a wrapper around an [`'ExecutionPlan'] which can be
+    /// Create a wrapper around an ['ExecutionPlan'] which can be
     /// pretty printed with aggregated metrics.
     pub fn new(inner: &'a dyn ExecutionPlan, metrics: &'a Vec<MetricsSet>) -> 
Self {
         Self { inner, metrics }
diff --git a/ballista/scheduler/src/state/execution_graph.rs 
b/ballista/scheduler/src/state/execution_graph.rs
index 38339a3d..16744141 100644
--- a/ballista/scheduler/src/state/execution_graph.rs
+++ b/ballista/scheduler/src/state/execution_graph.rs
@@ -63,17 +63,18 @@ use crate::state::task_manager::UpdatedStages;
 ///
 /// This will produce a DataFusion execution plan that looks something like
 ///
-///
+/// ```text
 ///   CoalesceBatchesExec: target_batch_size=4096
 ///     RepartitionExec: partitioning=Hash([Column { name: "id", index: 0 }], 
4)
 ///       AggregateExec: mode=Partial, gby=[id\@0 as id], 
aggr=[SUM(some_table.gmv)]
 ///         TableScan: some_table
+/// ```
 ///
 /// The Ballista `DistributedPlanner` will turn this into a distributed plan 
by creating a shuffle
 /// boundary (called a "Stage") whenever the underlying plan needs to perform 
a repartition.
 /// In this case we end up with a distributed plan with two stages:
 ///
-///
+/// ```text
 /// ExecutionGraph[job_id=job, session_id=session, available_tasks=1, 
complete=false]
 /// =========UnResolvedStage[id=2, children=1]=========
 /// Inputs{1: StageOutput { partition_locations: {}, complete: false }}
@@ -85,7 +86,7 @@ use crate::state::task_manager::UpdatedStages;
 /// ShuffleWriterExec: Some(Hash([Column { name: "id", index: 0 }], 4))
 ///   AggregateExec: mode=Partial, gby=[id\@0 as id], aggr=[SUM(?table?.gmv)]
 ///     TableScan: some_table
-///
+/// ```
 ///
 /// The DAG structure of this `ExecutionGraph` is encoded in the stages. Each 
stage's `input` field
 /// will indicate which stages it depends on, and each stage's `output_links` 
will indicate which


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to