jaylmiller commented on code in PR #5658:
URL: https://github.com/apache/arrow-datafusion/pull/5658#discussion_r1143906801


##########
benchmarks/src/lib.rs:
##########
@@ -15,4 +15,116 @@
 // specific language governing permissions and limitations
 // under the License.
 
+use datafusion::DATAFUSION_VERSION;
+use serde::Serialize;
+use serde_json::Value;
+use std::{collections::HashMap, time::SystemTime};
+
 pub mod tpch;
+
+#[derive(Debug, Serialize)]
+pub struct RunContext {
+    /// Benchmark crate version
+    pub benchmark_version: String,
+    /// DataFusion crate version
+    pub datafusion_version: String,
+    /// Number of CPU cores
+    pub num_cpus: usize,
+    /// Start time
+    pub start_time: u64,
+    /// CLI arguments
+    pub arguments: Vec<String>,
+}
+
+impl Default for RunContext {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl RunContext {
+    pub fn new() -> Self {
+        Self {
+            benchmark_version: env!("CARGO_PKG_VERSION").to_owned(),
+            datafusion_version: DATAFUSION_VERSION.to_owned(),
+            num_cpus: num_cpus::get(),
+            start_time: SystemTime::now()
+                .duration_since(SystemTime::UNIX_EPOCH)
+                .expect("current time is later than UNIX_EPOCH")
+                .as_secs(),
+            arguments: std::env::args().skip(1).collect::<Vec<String>>(),
+        }
+    }
+}
+
+/// A single iteration of a benchmark query
+#[derive(Debug, Serialize)]
+struct QueryIter {
+    elapsed: f64,
+    row_count: usize,
+}
+/// A single benchmark case
+#[derive(Debug, Serialize)]
+pub struct BenchQuery {
+    query: String,
+    iterations: Vec<QueryIter>,
+    start_time: u64,

Review Comment:
   sure that sounds good to me



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to