This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion.git


The following commit(s) were added to refs/heads/main by this push:
     new f16bc8bd93 Rename `SessionContext::with_config_rt` to 
`SessionContext::new_with_config_from_rt`, etc (#7631)
f16bc8bd93 is described below

commit f16bc8bd93b9ea29c8f84f6e0ddf2f3500503c19
Author: Andrew Lamb <[email protected]>
AuthorDate: Wed Oct 4 14:32:40 2023 -0400

    Rename `SessionContext::with_config_rt` to 
`SessionContext::new_with_config_from_rt`, etc (#7631)
    
    * Rename `SessionContext::with_config_rt` to 
`SessionContext::new_with_config_from_rt`, etc
    
    * update other references
    
    * Apply suggestions from code review
    
    Co-authored-by: Liang-Chi Hsieh <[email protected]>
    
    ---------
    
    Co-authored-by: Liang-Chi Hsieh <[email protected]>
---
 benchmarks/src/bin/h2o.rs                          |   2 +-
 benchmarks/src/clickbench.rs                       |   2 +-
 benchmarks/src/parquet_filter.rs                   |   2 +-
 benchmarks/src/sort.rs                             |   2 +-
 benchmarks/src/tpch/convert.rs                     |   2 +-
 benchmarks/src/tpch/run.rs                         |   2 +-
 datafusion-cli/src/main.rs                         |   2 +-
 datafusion-examples/examples/flight_sql_server.rs  |   2 +-
 datafusion/core/benches/parquet_query_sql.rs       |   2 +-
 datafusion/core/benches/sort_limit_query_sql.rs    |   5 +-
 datafusion/core/benches/sql_query_with_io.rs       |   2 +-
 datafusion/core/benches/topk_aggregate.rs          |   2 +-
 datafusion/core/src/dataframe.rs                   |   4 +-
 datafusion/core/src/datasource/file_format/avro.rs |   2 +-
 datafusion/core/src/datasource/file_format/csv.rs  |  18 ++--
 datafusion/core/src/datasource/file_format/json.rs |   2 +-
 .../core/src/datasource/file_format/parquet.rs     |   4 +-
 datafusion/core/src/datasource/listing/table.rs    |   6 +-
 .../core/src/datasource/physical_plan/csv.rs       |   5 +-
 .../core/src/datasource/physical_plan/json.rs      |   5 +-
 .../core/src/datasource/physical_plan/parquet.rs   |   5 +-
 datafusion/core/src/datasource/view.rs             |  18 ++--
 datafusion/core/src/execution/context.rs           | 100 ++++++++++++++-------
 .../core/src/physical_optimizer/enforce_sorting.rs |   2 +-
 datafusion/core/src/physical_planner.rs            |   2 +-
 datafusion/core/tests/dataframe/mod.rs             |   2 +-
 datafusion/core/tests/fifo.rs                      |   6 +-
 datafusion/core/tests/fuzz_cases/aggregate_fuzz.rs |   2 +-
 datafusion/core/tests/fuzz_cases/join_fuzz.rs      |   2 +-
 datafusion/core/tests/fuzz_cases/merge_fuzz.rs     |   2 +-
 datafusion/core/tests/fuzz_cases/sort_fuzz.rs      |   4 +-
 .../fuzz_cases/sort_preserving_repartition_fuzz.rs |   2 +-
 datafusion/core/tests/fuzz_cases/window_fuzz.rs    |   2 +-
 datafusion/core/tests/memory_limit.rs              |   4 +-
 datafusion/core/tests/parquet/file_statistics.rs   |   2 +-
 datafusion/core/tests/parquet/filter_pushdown.rs   |   2 +-
 datafusion/core/tests/parquet/mod.rs               |   2 +-
 datafusion/core/tests/sql/aggregates.rs            |   2 +-
 datafusion/core/tests/sql/create_drop.rs           |   8 +-
 datafusion/core/tests/sql/displayable.rs           |   2 +-
 datafusion/core/tests/sql/explain_analyze.rs       |  12 +--
 datafusion/core/tests/sql/group_by.rs              |   2 +-
 datafusion/core/tests/sql/joins.rs                 |   6 +-
 datafusion/core/tests/sql/mod.rs                   |   7 +-
 datafusion/core/tests/sql/order.rs                 |   2 +-
 datafusion/core/tests/sql/partitioned_csv.rs       |   3 +-
 datafusion/core/tests/sql/repartition.rs           |   2 +-
 datafusion/core/tests/sql/select.rs                |   5 +-
 datafusion/core/tests/tpcds_planning.rs            |   2 +-
 .../core/tests/user_defined/user_defined_plan.rs   |   4 +-
 .../proto/tests/cases/roundtrip_logical_plan.rs    |   4 +-
 datafusion/sqllogictest/src/test_context.rs        |   2 +-
 .../tests/cases/roundtrip_logical_plan.rs          |   4 +-
 53 files changed, 169 insertions(+), 130 deletions(-)

diff --git a/benchmarks/src/bin/h2o.rs b/benchmarks/src/bin/h2o.rs
index d75f9a30b4..1bb8cb9d43 100644
--- a/benchmarks/src/bin/h2o.rs
+++ b/benchmarks/src/bin/h2o.rs
@@ -72,7 +72,7 @@ async fn group_by(opt: &GroupBy) -> Result<()> {
     let mut config = ConfigOptions::from_env()?;
     config.execution.batch_size = 65535;
 
-    let ctx = SessionContext::with_config(config.into());
+    let ctx = SessionContext::new_with_config(config.into());
 
     let schema = Schema::new(vec![
         Field::new("id1", DataType::Utf8, false),
diff --git a/benchmarks/src/clickbench.rs b/benchmarks/src/clickbench.rs
index 98ef6dd805..a6d32eb39f 100644
--- a/benchmarks/src/clickbench.rs
+++ b/benchmarks/src/clickbench.rs
@@ -81,7 +81,7 @@ impl RunOpt {
         };
 
         let config = self.common.config();
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         self.register_hits(&ctx).await?;
 
         let iterations = self.common.iterations;
diff --git a/benchmarks/src/parquet_filter.rs b/benchmarks/src/parquet_filter.rs
index ceea12de92..e19596b80f 100644
--- a/benchmarks/src/parquet_filter.rs
+++ b/benchmarks/src/parquet_filter.rs
@@ -144,7 +144,7 @@ impl RunOpt {
                 ));
                 for i in 0..self.common.iterations {
                     let config = 
self.common.update_config(scan_options.config());
-                    let ctx = SessionContext::with_config(config);
+                    let ctx = SessionContext::new_with_config(config);
 
                     let (rows, elapsed) = exec_scan(
                         &ctx,
diff --git a/benchmarks/src/sort.rs b/benchmarks/src/sort.rs
index d1baae8687..5643c85619 100644
--- a/benchmarks/src/sort.rs
+++ b/benchmarks/src/sort.rs
@@ -150,7 +150,7 @@ impl RunOpt {
             for i in 0..self.common.iterations {
                 let config =
                     
SessionConfig::new().with_target_partitions(self.common.partitions);
-                let ctx = SessionContext::with_config(config);
+                let ctx = SessionContext::new_with_config(config);
                 let (rows, elapsed) =
                     exec_sort(&ctx, &expr, &test_file, 
self.common.debug).await?;
                 let ms = elapsed.as_secs_f64() * 1000.0;
diff --git a/benchmarks/src/tpch/convert.rs b/benchmarks/src/tpch/convert.rs
index f1ed081c43..2fc74ce388 100644
--- a/benchmarks/src/tpch/convert.rs
+++ b/benchmarks/src/tpch/convert.rs
@@ -78,7 +78,7 @@ impl ConvertOpt {
                 .file_extension(".tbl");
 
             let config = SessionConfig::new().with_batch_size(self.batch_size);
-            let ctx = SessionContext::with_config(config);
+            let ctx = SessionContext::new_with_config(config);
 
             // build plan to read the TBL file
             let mut csv = ctx.read_csv(&input_path, options).await?;
diff --git a/benchmarks/src/tpch/run.rs b/benchmarks/src/tpch/run.rs
index cf5c7b9f67..171b074d2a 100644
--- a/benchmarks/src/tpch/run.rs
+++ b/benchmarks/src/tpch/run.rs
@@ -110,7 +110,7 @@ impl RunOpt {
             .common
             .config()
             .with_collect_statistics(!self.disable_statistics);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
 
         // register tables
         self.register_tables(&ctx).await?;
diff --git a/datafusion-cli/src/main.rs b/datafusion-cli/src/main.rs
index 33a1caeb1b..c069f458f1 100644
--- a/datafusion-cli/src/main.rs
+++ b/datafusion-cli/src/main.rs
@@ -178,7 +178,7 @@ pub async fn main() -> Result<()> {
     let runtime_env = create_runtime_env(rn_config.clone())?;
 
     let mut ctx =
-        SessionContext::with_config_rt(session_config.clone(), 
Arc::new(runtime_env));
+        SessionContext::new_with_config_rt(session_config.clone(), 
Arc::new(runtime_env));
     ctx.refresh_catalogs().await?;
     // install dynamic catalog provider that knows how to open files
     ctx.register_catalog_list(Arc::new(DynamicFileCatalog::new(
diff --git a/datafusion-examples/examples/flight_sql_server.rs 
b/datafusion-examples/examples/flight_sql_server.rs
index aad63fc2bc..ed5b86d0b6 100644
--- a/datafusion-examples/examples/flight_sql_server.rs
+++ b/datafusion-examples/examples/flight_sql_server.rs
@@ -105,7 +105,7 @@ impl FlightSqlServiceImpl {
         let session_config = SessionConfig::from_env()
             .map_err(|e| Status::internal(format!("Error building plan: 
{e}")))?
             .with_information_schema(true);
-        let ctx = Arc::new(SessionContext::with_config(session_config));
+        let ctx = Arc::new(SessionContext::new_with_config(session_config));
 
         let testdata = datafusion::test_util::parquet_test_data();
 
diff --git a/datafusion/core/benches/parquet_query_sql.rs 
b/datafusion/core/benches/parquet_query_sql.rs
index 876b1fe7e1..6c9ab31576 100644
--- a/datafusion/core/benches/parquet_query_sql.rs
+++ b/datafusion/core/benches/parquet_query_sql.rs
@@ -193,7 +193,7 @@ fn criterion_benchmark(c: &mut Criterion) {
 
     let partitions = 4;
     let config = SessionConfig::new().with_target_partitions(partitions);
-    let context = SessionContext::with_config(config);
+    let context = SessionContext::new_with_config(config);
 
     let local_rt = tokio::runtime::Builder::new_current_thread()
         .build()
diff --git a/datafusion/core/benches/sort_limit_query_sql.rs 
b/datafusion/core/benches/sort_limit_query_sql.rs
index 6216006714..efed5a04e7 100644
--- a/datafusion/core/benches/sort_limit_query_sql.rs
+++ b/datafusion/core/benches/sort_limit_query_sql.rs
@@ -86,8 +86,9 @@ fn create_context() -> Arc<Mutex<SessionContext>> {
 
     rt.block_on(async {
         // create local session context
-        let ctx =
-            
SessionContext::with_config(SessionConfig::new().with_target_partitions(1));
+        let ctx = SessionContext::new_with_config(
+            SessionConfig::new().with_target_partitions(1),
+        );
 
         let table_provider = Arc::new(csv.await);
         let mem_table = MemTable::load(table_provider, Some(partitions), 
&ctx.state())
diff --git a/datafusion/core/benches/sql_query_with_io.rs 
b/datafusion/core/benches/sql_query_with_io.rs
index 1d96df0cec..1f9b4dc6cc 100644
--- a/datafusion/core/benches/sql_query_with_io.rs
+++ b/datafusion/core/benches/sql_query_with_io.rs
@@ -120,7 +120,7 @@ async fn setup_context(object_store: Arc<dyn ObjectStore>) 
-> SessionContext {
     let config = SessionConfig::new().with_target_partitions(THREADS);
     let rt = Arc::new(RuntimeEnv::default());
     rt.register_object_store(&Url::parse("data://my_store").unwrap(), 
object_store);
-    let context = SessionContext::with_config_rt(config, rt);
+    let context = SessionContext::new_with_config_rt(config, rt);
 
     for table_id in 0..TABLES {
         let table_name = table_name(table_id);
diff --git a/datafusion/core/benches/topk_aggregate.rs 
b/datafusion/core/benches/topk_aggregate.rs
index f50a8ec047..eaafea2488 100644
--- a/datafusion/core/benches/topk_aggregate.rs
+++ b/datafusion/core/benches/topk_aggregate.rs
@@ -45,7 +45,7 @@ async fn create_context(
     let mut cfg = SessionConfig::new();
     let opts = cfg.options_mut();
     opts.optimizer.enable_topk_aggregation = use_topk;
-    let ctx = SessionContext::with_config(cfg);
+    let ctx = SessionContext::new_with_config(cfg);
     let _ = ctx.register_table("traces", mem_table)?;
     let sql = format!("select trace_id, max(timestamp_ms) from traces group by 
trace_id order by max(timestamp_ms) desc limit {limit};");
     let df = ctx.sql(sql.as_str()).await?;
diff --git a/datafusion/core/src/dataframe.rs b/datafusion/core/src/dataframe.rs
index 640f57f3d5..f649080112 100644
--- a/datafusion/core/src/dataframe.rs
+++ b/datafusion/core/src/dataframe.rs
@@ -1238,7 +1238,7 @@ impl DataFrame {
     /// # }
     /// ```
     pub async fn cache(self) -> Result<DataFrame> {
-        let context = SessionContext::with_state(self.session_state.clone());
+        let context = 
SessionContext::new_with_state(self.session_state.clone());
         let mem_table = MemTable::try_new(
             SchemaRef::from(self.schema().clone()),
             self.collect_partitioned().await?,
@@ -2011,7 +2011,7 @@ mod tests {
                 "datafusion.sql_parser.enable_ident_normalization".to_owned(),
                 "false".to_owned(),
             )]))?;
-        let mut ctx = SessionContext::with_config(config);
+        let mut ctx = SessionContext::new_with_config(config);
         let name = "aggregate_test_100";
         register_aggregate_csv(&mut ctx, name).await?;
         let df = ctx.table(name);
diff --git a/datafusion/core/src/datasource/file_format/avro.rs 
b/datafusion/core/src/datasource/file_format/avro.rs
index e68a4cad22..cc6a03ba3f 100644
--- a/datafusion/core/src/datasource/file_format/avro.rs
+++ b/datafusion/core/src/datasource/file_format/avro.rs
@@ -112,7 +112,7 @@ mod tests {
     #[tokio::test]
     async fn read_small_batches() -> Result<()> {
         let config = SessionConfig::new().with_batch_size(2);
-        let session_ctx = SessionContext::with_config(config);
+        let session_ctx = SessionContext::new_with_config(config);
         let state = session_ctx.state();
         let task_ctx = state.task_ctx();
         let projection = None;
diff --git a/datafusion/core/src/datasource/file_format/csv.rs 
b/datafusion/core/src/datasource/file_format/csv.rs
index 897174659e..c3295042b5 100644
--- a/datafusion/core/src/datasource/file_format/csv.rs
+++ b/datafusion/core/src/datasource/file_format/csv.rs
@@ -622,7 +622,7 @@ mod tests {
     #[tokio::test]
     async fn read_small_batches() -> Result<()> {
         let config = SessionConfig::new().with_batch_size(2);
-        let session_ctx = SessionContext::with_config(config);
+        let session_ctx = SessionContext::new_with_config(config);
         let state = session_ctx.state();
         let task_ctx = state.task_ctx();
         // skip column 9 that overflows the automaticly discovered column type 
of i64 (u64 would work)
@@ -960,7 +960,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         let testdata = arrow_test_data();
         ctx.register_csv(
             "aggr",
@@ -997,7 +997,7 @@ mod tests {
             .has_header(true)
             .file_compression_type(FileCompressionType::GZIP)
             .file_extension("csv.gz");
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         let testdata = arrow_test_data();
         ctx.register_csv(
             "aggr",
@@ -1033,7 +1033,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         ctx.register_csv(
             "empty",
             "tests/data/empty_0_byte.csv",
@@ -1066,7 +1066,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         ctx.register_csv(
             "empty",
             "tests/data/empty.csv",
@@ -1104,7 +1104,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         let file_format = CsvFormat::default().with_has_header(false);
         let listing_options = ListingOptions::new(Arc::new(file_format))
             .with_file_extension(FileType::CSV.get_ext());
@@ -1157,7 +1157,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         let file_format = CsvFormat::default().with_has_header(false);
         let listing_options = ListingOptions::new(Arc::new(file_format))
             .with_file_extension(FileType::CSV.get_ext());
@@ -1202,7 +1202,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
 
         ctx.register_csv(
             "one_col",
@@ -1251,7 +1251,7 @@ mod tests {
             .with_repartition_file_scans(true)
             .with_repartition_file_min_size(0)
             .with_target_partitions(n_partitions);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         ctx.register_csv(
             "wide_rows",
             "tests/data/wide_rows.csv",
diff --git a/datafusion/core/src/datasource/file_format/json.rs 
b/datafusion/core/src/datasource/file_format/json.rs
index c715317a95..96fd4daa2d 100644
--- a/datafusion/core/src/datasource/file_format/json.rs
+++ b/datafusion/core/src/datasource/file_format/json.rs
@@ -389,7 +389,7 @@ mod tests {
     #[tokio::test]
     async fn read_small_batches() -> Result<()> {
         let config = SessionConfig::new().with_batch_size(2);
-        let session_ctx = SessionContext::with_config(config);
+        let session_ctx = SessionContext::new_with_config(config);
         let state = session_ctx.state();
         let task_ctx = state.task_ctx();
         let projection = None;
diff --git a/datafusion/core/src/datasource/file_format/parquet.rs 
b/datafusion/core/src/datasource/file_format/parquet.rs
index ebdf3ea444..16050d66db 100644
--- a/datafusion/core/src/datasource/file_format/parquet.rs
+++ b/datafusion/core/src/datasource/file_format/parquet.rs
@@ -1378,7 +1378,7 @@ mod tests {
     #[tokio::test]
     async fn read_small_batches() -> Result<()> {
         let config = SessionConfig::new().with_batch_size(2);
-        let session_ctx = SessionContext::with_config(config);
+        let session_ctx = SessionContext::new_with_config(config);
         let state = session_ctx.state();
         let task_ctx = state.task_ctx();
         let projection = None;
@@ -1406,7 +1406,7 @@ mod tests {
     #[tokio::test]
     async fn capture_bytes_scanned_metric() -> Result<()> {
         let config = SessionConfig::new().with_batch_size(2);
-        let session = SessionContext::with_config(config);
+        let session = SessionContext::new_with_config(config);
         let ctx = session.state();
 
         // Read the full file
diff --git a/datafusion/core/src/datasource/listing/table.rs 
b/datafusion/core/src/datasource/listing/table.rs
index 797562e92d..5b1710d344 100644
--- a/datafusion/core/src/datasource/listing/table.rs
+++ b/datafusion/core/src/datasource/listing/table.rs
@@ -1877,7 +1877,7 @@ mod tests {
         let session_ctx = match session_config_map {
             Some(cfg) => {
                 let config = SessionConfig::from_string_hash_map(cfg)?;
-                SessionContext::with_config(config)
+                SessionContext::new_with_config(config)
             }
             None => SessionContext::new(),
         };
@@ -2046,7 +2046,7 @@ mod tests {
         let session_ctx = match session_config_map {
             Some(cfg) => {
                 let config = SessionConfig::from_string_hash_map(cfg)?;
-                SessionContext::with_config(config)
+                SessionContext::new_with_config(config)
             }
             None => SessionContext::new(),
         };
@@ -2252,7 +2252,7 @@ mod tests {
         let session_ctx = match session_config_map {
             Some(cfg) => {
                 let config = SessionConfig::from_string_hash_map(cfg)?;
-                SessionContext::with_config(config)
+                SessionContext::new_with_config(config)
             }
             None => SessionContext::new(),
         };
diff --git a/datafusion/core/src/datasource/physical_plan/csv.rs 
b/datafusion/core/src/datasource/physical_plan/csv.rs
index dfc6acdde0..8277836a24 100644
--- a/datafusion/core/src/datasource/physical_plan/csv.rs
+++ b/datafusion/core/src/datasource/physical_plan/csv.rs
@@ -1079,8 +1079,9 @@ mod tests {
     async fn write_csv_results() -> Result<()> {
         // create partitioned input file and context
         let tmp_dir = TempDir::new()?;
-        let ctx =
-            
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+        let ctx = SessionContext::new_with_config(
+            SessionConfig::new().with_target_partitions(8),
+        );
 
         let schema = populate_csv_partitions(&tmp_dir, 8, ".csv")?;
 
diff --git a/datafusion/core/src/datasource/physical_plan/json.rs 
b/datafusion/core/src/datasource/physical_plan/json.rs
index 537855704a..4931104041 100644
--- a/datafusion/core/src/datasource/physical_plan/json.rs
+++ b/datafusion/core/src/datasource/physical_plan/json.rs
@@ -675,8 +675,9 @@ mod tests {
     #[tokio::test]
     async fn write_json_results() -> Result<()> {
         // create partitioned input file and context
-        let ctx =
-            
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+        let ctx = SessionContext::new_with_config(
+            SessionConfig::new().with_target_partitions(8),
+        );
 
         let path = format!("{TEST_DATA_BASE}/1.json");
 
diff --git a/datafusion/core/src/datasource/physical_plan/parquet.rs 
b/datafusion/core/src/datasource/physical_plan/parquet.rs
index d16c79a969..6f27acfe7f 100644
--- a/datafusion/core/src/datasource/physical_plan/parquet.rs
+++ b/datafusion/core/src/datasource/physical_plan/parquet.rs
@@ -1928,8 +1928,9 @@ mod tests {
         // create partitioned input file and context
         let tmp_dir = TempDir::new()?;
         // let mut ctx = create_ctx(&tmp_dir, 4).await?;
-        let ctx =
-            
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+        let ctx = SessionContext::new_with_config(
+            SessionConfig::new().with_target_partitions(8),
+        );
         let schema = populate_csv_partitions(&tmp_dir, 4, ".csv")?;
         // register csv file with the execution context
         ctx.register_csv(
diff --git a/datafusion/core/src/datasource/view.rs 
b/datafusion/core/src/datasource/view.rs
index d58284d1ba..85fb893988 100644
--- a/datafusion/core/src/datasource/view.rs
+++ b/datafusion/core/src/datasource/view.rs
@@ -159,7 +159,7 @@ mod tests {
     #[tokio::test]
     async fn issue_3242() -> Result<()> {
         // regression test for 
https://github.com/apache/arrow-datafusion/pull/3242
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -199,7 +199,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_view() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -237,7 +237,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_view_with_alias() -> Result<()> {
-        let session_ctx = SessionContext::with_config(SessionConfig::new());
+        let session_ctx = 
SessionContext::new_with_config(SessionConfig::new());
 
         session_ctx
             .sql("CREATE TABLE abc AS VALUES (1,2,3), (4,5,6)")
@@ -270,7 +270,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_view_with_inline_alias() -> Result<()> {
-        let session_ctx = SessionContext::with_config(SessionConfig::new());
+        let session_ctx = 
SessionContext::new_with_config(SessionConfig::new());
 
         session_ctx
             .sql("CREATE TABLE abc AS VALUES (1,2,3), (4,5,6)")
@@ -303,7 +303,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_view_with_projection() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -341,7 +341,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_view_with_filter() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -378,7 +378,7 @@ mod tests {
 
     #[tokio::test]
     async fn query_join_views() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -481,7 +481,7 @@ mod tests {
 
     #[tokio::test]
     async fn create_view_plan() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -534,7 +534,7 @@ mod tests {
 
     #[tokio::test]
     async fn create_or_replace_view() -> Result<()> {
-        let session_ctx = SessionContext::with_config(
+        let session_ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
diff --git a/datafusion/core/src/execution/context.rs 
b/datafusion/core/src/execution/context.rs
index 4bdd40a914..ca6da6cfa0 100644
--- a/datafusion/core/src/execution/context.rs
+++ b/datafusion/core/src/execution/context.rs
@@ -258,7 +258,7 @@ impl Default for SessionContext {
 impl SessionContext {
     /// Creates a new `SessionContext` using the default [`SessionConfig`].
     pub fn new() -> Self {
-        Self::with_config(SessionConfig::new())
+        Self::new_with_config(SessionConfig::new())
     }
 
     /// Finds any [`ListingSchemaProvider`]s and instructs them to reload 
tables from "disk"
@@ -284,11 +284,18 @@ impl SessionContext {
     /// Creates a new `SessionContext` using the provided
     /// [`SessionConfig`] and a new [`RuntimeEnv`].
     ///
-    /// See [`Self::with_config_rt`] for more details on resource
+    /// See [`Self::new_with_config_rt`] for more details on resource
     /// limits.
-    pub fn with_config(config: SessionConfig) -> Self {
+    pub fn new_with_config(config: SessionConfig) -> Self {
         let runtime = Arc::new(RuntimeEnv::default());
-        Self::with_config_rt(config, runtime)
+        Self::new_with_config_rt(config, runtime)
+    }
+
+    /// Creates a new `SessionContext` using the provided
+    /// [`SessionConfig`] and a new [`RuntimeEnv`].
+    #[deprecated(since = "32.0.0", note = "Use 
SessionContext::new_with_config")]
+    pub fn with_config(config: SessionConfig) -> Self {
+        Self::new_with_config(config)
     }
 
     /// Creates a new `SessionContext` using the provided
@@ -304,13 +311,20 @@ impl SessionContext {
     /// memory used) across all DataFusion queries in a process,
     /// all `SessionContext`'s should be configured with the
     /// same `RuntimeEnv`.
+    pub fn new_with_config_rt(config: SessionConfig, runtime: Arc<RuntimeEnv>) 
-> Self {
+        let state = SessionState::new_with_config_rt(config, runtime);
+        Self::new_with_state(state)
+    }
+
+    /// Creates a new `SessionContext` using the provided
+    /// [`SessionConfig`] and a [`RuntimeEnv`].
+    #[deprecated(since = "32.0.0", note = "Use 
SessionState::new_with_config_rt")]
     pub fn with_config_rt(config: SessionConfig, runtime: Arc<RuntimeEnv>) -> 
Self {
-        let state = SessionState::with_config_rt(config, runtime);
-        Self::with_state(state)
+        Self::new_with_config_rt(config, runtime)
     }
 
     /// Creates a new `SessionContext` using the provided [`SessionState`]
-    pub fn with_state(state: SessionState) -> Self {
+    pub fn new_with_state(state: SessionState) -> Self {
         Self {
             session_id: state.session_id.clone(),
             session_start_time: Utc::now(),
@@ -318,6 +332,11 @@ impl SessionContext {
         }
     }
 
+    /// Creates a new `SessionContext` using the provided [`SessionState`]
+    #[deprecated(since = "32.0.0", note = "Use SessionState::new_with_state")]
+    pub fn with_state(state: SessionState) -> Self {
+        Self::new_with_state(state)
+    }
     /// Returns the time this `SessionContext` was created
     pub fn session_start_time(&self) -> DateTime<Utc> {
         self.session_start_time
@@ -1401,25 +1420,24 @@ impl Debug for SessionState {
     }
 }
 
-/// Default session builder using the provided configuration
-#[deprecated(
-    since = "23.0.0",
-    note = "See SessionContext::with_config() or SessionState::with_config_rt"
-)]
-pub fn default_session_builder(config: SessionConfig) -> SessionState {
-    SessionState::with_config_rt(config, Arc::new(RuntimeEnv::default()))
-}
-
 impl SessionState {
     /// Returns new [`SessionState`] using the provided
     /// [`SessionConfig`] and [`RuntimeEnv`].
-    pub fn with_config_rt(config: SessionConfig, runtime: Arc<RuntimeEnv>) -> 
Self {
+    pub fn new_with_config_rt(config: SessionConfig, runtime: Arc<RuntimeEnv>) 
-> Self {
         let catalog_list = Arc::new(MemoryCatalogList::new()) as Arc<dyn 
CatalogList>;
-        Self::with_config_rt_and_catalog_list(config, runtime, catalog_list)
+        Self::new_with_config_rt_and_catalog_list(config, runtime, 
catalog_list)
     }
 
-    /// Returns new SessionState using the provided configuration, runtime and 
catalog list.
-    pub fn with_config_rt_and_catalog_list(
+    /// Returns new [`SessionState`] using the provided
+    /// [`SessionConfig`] and [`RuntimeEnv`].
+    #[deprecated(since = "32.0.0", note = "Use 
SessionState::new_with_config_rt")]
+    pub fn with_config_rt(config: SessionConfig, runtime: Arc<RuntimeEnv>) -> 
Self {
+        Self::new_with_config_rt(config, runtime)
+    }
+
+    /// Returns new [`SessionState`] using the provided
+    /// [`SessionConfig`],  [`RuntimeEnv`], and [`CatalogList`]
+    pub fn new_with_config_rt_and_catalog_list(
         config: SessionConfig,
         runtime: Arc<RuntimeEnv>,
         catalog_list: Arc<dyn CatalogList>,
@@ -1476,7 +1494,19 @@ impl SessionState {
             table_factories,
         }
     }
-
+    /// Returns new [`SessionState`] using the provided
+    /// [`SessionConfig`] and [`RuntimeEnv`].
+    #[deprecated(
+        since = "32.0.0",
+        note = "Use SessionState::new_with_config_rt_and_catalog_list"
+    )]
+    pub fn with_config_rt_and_catalog_list(
+        config: SessionConfig,
+        runtime: Arc<RuntimeEnv>,
+        catalog_list: Arc<dyn CatalogList>,
+    ) -> Self {
+        Self::new_with_config_rt_and_catalog_list(config, runtime, 
catalog_list)
+    }
     fn register_default_schema(
         config: &SessionConfig,
         table_factories: &HashMap<String, Arc<dyn TableProviderFactory>>,
@@ -2250,7 +2280,7 @@ mod tests {
         let disk_manager = ctx1.runtime_env().disk_manager.clone();
 
         let ctx2 =
-            SessionContext::with_config_rt(SessionConfig::new(), 
ctx1.runtime_env());
+            SessionContext::new_with_config_rt(SessionConfig::new(), 
ctx1.runtime_env());
 
         assert_eq!(ctx1.runtime_env().memory_pool.reserved(), 100);
         assert_eq!(ctx2.runtime_env().memory_pool.reserved(), 100);
@@ -2397,8 +2427,8 @@ mod tests {
             .set_str("datafusion.catalog.location", url.as_str())
             .set_str("datafusion.catalog.format", "CSV")
             .set_str("datafusion.catalog.has_header", "true");
-        let session_state = SessionState::with_config_rt(cfg, runtime);
-        let ctx = SessionContext::with_state(session_state);
+        let session_state = SessionState::new_with_config_rt(cfg, runtime);
+        let ctx = SessionContext::new_with_state(session_state);
         ctx.refresh_catalogs().await?;
 
         let result =
@@ -2423,9 +2453,10 @@ mod tests {
     #[tokio::test]
     async fn custom_query_planner() -> Result<()> {
         let runtime = Arc::new(RuntimeEnv::default());
-        let session_state = SessionState::with_config_rt(SessionConfig::new(), 
runtime)
-            .with_query_planner(Arc::new(MyQueryPlanner {}));
-        let ctx = SessionContext::with_state(session_state);
+        let session_state =
+            SessionState::new_with_config_rt(SessionConfig::new(), runtime)
+                .with_query_planner(Arc::new(MyQueryPlanner {}));
+        let ctx = SessionContext::new_with_state(session_state);
 
         let df = ctx.sql("SELECT 1").await?;
         df.collect().await.expect_err("query not supported");
@@ -2434,7 +2465,7 @@ mod tests {
 
     #[tokio::test]
     async fn disabled_default_catalog_and_schema() -> Result<()> {
-        let ctx = SessionContext::with_config(
+        let ctx = SessionContext::new_with_config(
             SessionConfig::new().with_create_default_catalog_and_schema(false),
         );
 
@@ -2477,7 +2508,7 @@ mod tests {
     }
 
     async fn catalog_and_schema_test(config: SessionConfig) {
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         let catalog = MemoryCatalogProvider::new();
         let schema = MemorySchemaProvider::new();
         schema
@@ -2554,7 +2585,7 @@ mod tests {
     #[tokio::test]
     async fn catalogs_not_leaked() {
         // the information schema used to introduce cyclic Arcs
-        let ctx = SessionContext::with_config(
+        let ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -2577,7 +2608,7 @@ mod tests {
     #[tokio::test]
     async fn sql_create_schema() -> Result<()> {
         // the information schema used to introduce cyclic Arcs
-        let ctx = SessionContext::with_config(
+        let ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -2600,7 +2631,7 @@ mod tests {
     #[tokio::test]
     async fn sql_create_catalog() -> Result<()> {
         // the information schema used to introduce cyclic Arcs
-        let ctx = SessionContext::with_config(
+        let ctx = SessionContext::new_with_config(
             SessionConfig::new().with_information_schema(true),
         );
 
@@ -2758,8 +2789,9 @@ mod tests {
         tmp_dir: &TempDir,
         partition_count: usize,
     ) -> Result<SessionContext> {
-        let ctx =
-            
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+        let ctx = SessionContext::new_with_config(
+            SessionConfig::new().with_target_partitions(8),
+        );
 
         let schema = populate_csv_partitions(tmp_dir, partition_count, 
".csv")?;
 
diff --git a/datafusion/core/src/physical_optimizer/enforce_sorting.rs 
b/datafusion/core/src/physical_optimizer/enforce_sorting.rs
index c4b72a7cb3..6e3b160c89 100644
--- a/datafusion/core/src/physical_optimizer/enforce_sorting.rs
+++ b/datafusion/core/src/physical_optimizer/enforce_sorting.rs
@@ -810,7 +810,7 @@ mod tests {
     macro_rules! assert_optimized {
         ($EXPECTED_PLAN_LINES: expr, $EXPECTED_OPTIMIZED_PLAN_LINES: expr, 
$PLAN: expr, $REPARTITION_SORTS: expr) => {
             let config = 
SessionConfig::new().with_repartition_sorts($REPARTITION_SORTS);
-            let session_ctx = SessionContext::with_config(config);
+            let session_ctx = SessionContext::new_with_config(config);
             let state = session_ctx.state();
 
             let physical_plan = $PLAN;
diff --git a/datafusion/core/src/physical_planner.rs 
b/datafusion/core/src/physical_planner.rs
index 2328ffce23..84b5b9afa7 100644
--- a/datafusion/core/src/physical_planner.rs
+++ b/datafusion/core/src/physical_planner.rs
@@ -2087,7 +2087,7 @@ mod tests {
         let runtime = Arc::new(RuntimeEnv::default());
         let config = SessionConfig::new().with_target_partitions(4);
         let config = config.set_bool("datafusion.optimizer.skip_failed_rules", 
false);
-        SessionState::with_config_rt(config, runtime)
+        SessionState::new_with_config_rt(config, runtime)
     }
 
     async fn plan(logical_plan: &LogicalPlan) -> Result<Arc<dyn 
ExecutionPlan>> {
diff --git a/datafusion/core/tests/dataframe/mod.rs 
b/datafusion/core/tests/dataframe/mod.rs
index e1982761f0..845d77581b 100644
--- a/datafusion/core/tests/dataframe/mod.rs
+++ b/datafusion/core/tests/dataframe/mod.rs
@@ -1567,7 +1567,7 @@ async fn use_var_provider() -> Result<()> {
     let config = SessionConfig::new()
         .with_target_partitions(4)
         .set_bool("datafusion.optimizer.skip_failed_rules", false);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
 
     ctx.register_table("csv_table", mem_table)?;
     ctx.register_variable(VarType::UserDefined, Arc::new(HardcodedIntProvider 
{}));
diff --git a/datafusion/core/tests/fifo.rs b/datafusion/core/tests/fifo.rs
index 754389a614..2c8b0b784f 100644
--- a/datafusion/core/tests/fifo.rs
+++ b/datafusion/core/tests/fifo.rs
@@ -99,7 +99,7 @@ mod unix_test {
             .with_batch_size(TEST_BATCH_SIZE)
             .with_collect_statistics(false)
             .with_target_partitions(1);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         // To make unbounded deterministic
         let waiting = Arc::new(AtomicBool::new(unbounded_file));
         // Create a new temporary FIFO file
@@ -211,7 +211,7 @@ mod unix_test {
             .with_batch_size(TEST_BATCH_SIZE)
             .set_bool("datafusion.execution.coalesce_batches", false)
             .with_target_partitions(1);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         // Tasks
         let mut tasks: Vec<JoinHandle<()>> = vec![];
 
@@ -342,7 +342,7 @@ mod unix_test {
         let waiting_thread = waiting.clone();
         // create local execution context
         let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE);
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
         // Create a new temporary FIFO file
         let tmp_dir = TempDir::new()?;
         let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?;
diff --git a/datafusion/core/tests/fuzz_cases/aggregate_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/aggregate_fuzz.rs
index a0e9a50a22..50d3610dee 100644
--- a/datafusion/core/tests/fuzz_cases/aggregate_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/aggregate_fuzz.rs
@@ -77,7 +77,7 @@ mod tests {
 async fn run_aggregate_test(input1: Vec<RecordBatch>, group_by_columns: 
Vec<&str>) {
     let schema = input1[0].schema();
     let session_config = SessionConfig::new().with_batch_size(50);
-    let ctx = SessionContext::with_config(session_config);
+    let ctx = SessionContext::new_with_config(session_config);
     let mut sort_keys = vec![];
     for ordering_col in ["a", "b", "c"] {
         sort_keys.push(PhysicalSortExpr {
diff --git a/datafusion/core/tests/fuzz_cases/join_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/join_fuzz.rs
index 9b741440ff..ac86364f42 100644
--- a/datafusion/core/tests/fuzz_cases/join_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/join_fuzz.rs
@@ -102,7 +102,7 @@ async fn run_join_test(
     let batch_sizes = [1, 2, 7, 49, 50, 51, 100];
     for batch_size in batch_sizes {
         let session_config = SessionConfig::new().with_batch_size(batch_size);
-        let ctx = SessionContext::with_config(session_config);
+        let ctx = SessionContext::new_with_config(session_config);
         let task_ctx = ctx.task_ctx();
 
         let schema1 = input1[0].schema();
diff --git a/datafusion/core/tests/fuzz_cases/merge_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/merge_fuzz.rs
index 6411f31be0..c38ff41f57 100644
--- a/datafusion/core/tests/fuzz_cases/merge_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/merge_fuzz.rs
@@ -118,7 +118,7 @@ async fn run_merge_test(input: Vec<Vec<RecordBatch>>) {
         let merge = Arc::new(SortPreservingMergeExec::new(sort, 
Arc::new(exec)));
 
         let session_config = SessionConfig::new().with_batch_size(batch_size);
-        let ctx = SessionContext::with_config(session_config);
+        let ctx = SessionContext::new_with_config(session_config);
         let task_ctx = ctx.task_ctx();
         let collected = collect(merge, task_ctx).await.unwrap();
 
diff --git a/datafusion/core/tests/fuzz_cases/sort_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/sort_fuzz.rs
index 6c427c7fb7..d74144b0ab 100644
--- a/datafusion/core/tests/fuzz_cases/sort_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/sort_fuzz.rs
@@ -174,9 +174,9 @@ impl SortTest {
             let runtime_config = RuntimeConfig::new()
                 .with_memory_pool(Arc::new(GreedyMemoryPool::new(pool_size)));
             let runtime = Arc::new(RuntimeEnv::new(runtime_config).unwrap());
-            SessionContext::with_config_rt(session_config, runtime)
+            SessionContext::new_with_config_rt(session_config, runtime)
         } else {
-            SessionContext::with_config(session_config)
+            SessionContext::new_with_config(session_config)
         };
 
         let task_ctx = session_ctx.task_ctx();
diff --git 
a/datafusion/core/tests/fuzz_cases/sort_preserving_repartition_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/sort_preserving_repartition_fuzz.rs
index 6304e01c63..4d3a2a15c5 100644
--- a/datafusion/core/tests/fuzz_cases/sort_preserving_repartition_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/sort_preserving_repartition_fuzz.rs
@@ -93,7 +93,7 @@ mod sp_repartition_fuzz_tests {
     ) {
         let schema = input1[0].schema();
         let session_config = SessionConfig::new().with_batch_size(50);
-        let ctx = SessionContext::with_config(session_config);
+        let ctx = SessionContext::new_with_config(session_config);
         let mut sort_keys = vec![];
         for ordering_col in ["a", "b", "c"] {
             sort_keys.push(PhysicalSortExpr {
diff --git a/datafusion/core/tests/fuzz_cases/window_fuzz.rs 
b/datafusion/core/tests/fuzz_cases/window_fuzz.rs
index 3d103ee70e..1f0a4b09b1 100644
--- a/datafusion/core/tests/fuzz_cases/window_fuzz.rs
+++ b/datafusion/core/tests/fuzz_cases/window_fuzz.rs
@@ -396,7 +396,7 @@ async fn run_window_test(
     let mut rng = StdRng::seed_from_u64(random_seed);
     let schema = input1[0].schema();
     let session_config = SessionConfig::new().with_batch_size(50);
-    let ctx = SessionContext::with_config(session_config);
+    let ctx = SessionContext::new_with_config(session_config);
     let (window_fn, args, fn_name) = get_random_function(&schema, &mut rng, 
is_linear);
 
     let window_frame = get_random_window_frame(&mut rng, is_linear);
diff --git a/datafusion/core/tests/memory_limit.rs 
b/datafusion/core/tests/memory_limit.rs
index 1041888b95..a98d097856 100644
--- a/datafusion/core/tests/memory_limit.rs
+++ b/datafusion/core/tests/memory_limit.rs
@@ -412,13 +412,13 @@ impl TestCase {
         let runtime = RuntimeEnv::new(rt_config).unwrap();
 
         // Configure execution
-        let state = SessionState::with_config_rt(config, Arc::new(runtime));
+        let state = SessionState::new_with_config_rt(config, 
Arc::new(runtime));
         let state = match scenario.rules() {
             Some(rules) => state.with_physical_optimizer_rules(rules),
             None => state,
         };
 
-        let ctx = SessionContext::with_state(state);
+        let ctx = SessionContext::new_with_state(state);
         ctx.register_table("t", table).expect("registering table");
 
         let query = query.expect("Test error: query not specified");
diff --git a/datafusion/core/tests/parquet/file_statistics.rs 
b/datafusion/core/tests/parquet/file_statistics.rs
index 90abbe9e21..58f81cc571 100644
--- a/datafusion/core/tests/parquet/file_statistics.rs
+++ b/datafusion/core/tests/parquet/file_statistics.rs
@@ -101,7 +101,7 @@ fn get_cache_runtime_state() -> 
(Arc<DefaultFileStatisticsCache>, SessionState)
     let rt = Arc::new(
         
RuntimeEnv::new(RuntimeConfig::new().with_cache_manager(cache_config)).unwrap(),
     );
-    let state = SessionContext::with_config_rt(SessionConfig::default(), 
rt).state();
+    let state = SessionContext::new_with_config_rt(SessionConfig::default(), 
rt).state();
 
     (cache1, state)
 }
diff --git a/datafusion/core/tests/parquet/filter_pushdown.rs 
b/datafusion/core/tests/parquet/filter_pushdown.rs
index 885834f939..61a8f87b9e 100644
--- a/datafusion/core/tests/parquet/filter_pushdown.rs
+++ b/datafusion/core/tests/parquet/filter_pushdown.rs
@@ -507,7 +507,7 @@ impl<'a> TestCase<'a> {
     ) -> RecordBatch {
         println!("  scan options: {scan_options:?}");
         println!("  reading with filter {filter:?}");
-        let ctx = SessionContext::with_config(scan_options.config());
+        let ctx = SessionContext::new_with_config(scan_options.config());
         let exec = self
             .test_parquet_file
             .create_scan(Some(filter.clone()))
diff --git a/datafusion/core/tests/parquet/mod.rs 
b/datafusion/core/tests/parquet/mod.rs
index 33a78660ab..3f003c077d 100644
--- a/datafusion/core/tests/parquet/mod.rs
+++ b/datafusion/core/tests/parquet/mod.rs
@@ -154,7 +154,7 @@ impl ContextWithParquet {
         let parquet_path = file.path().to_string_lossy();
 
         // now, setup a the file as a data source and run a query against it
-        let ctx = SessionContext::with_config(config);
+        let ctx = SessionContext::new_with_config(config);
 
         ctx.register_parquet("t", &parquet_path, ParquetReadOptions::default())
             .await
diff --git a/datafusion/core/tests/sql/aggregates.rs 
b/datafusion/core/tests/sql/aggregates.rs
index 5d42936232..63d5e58090 100644
--- a/datafusion/core/tests/sql/aggregates.rs
+++ b/datafusion/core/tests/sql/aggregates.rs
@@ -801,7 +801,7 @@ async fn aggregate_with_alias() -> Result<()> {
 #[tokio::test]
 async fn test_accumulator_row_accumulator() -> Result<()> {
     let config = SessionConfig::new();
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     register_aggregate_csv(&ctx).await?;
 
     let sql = "SELECT c1, c2, MIN(c13) as min1, MIN(c9) as min2, MAX(c13) as 
max1, MAX(c9) as max2, AVG(c9) as avg1, MIN(c13) as min3, COUNT(C9) as cnt1, 
0.5*SUM(c9-c8) as sum1
diff --git a/datafusion/core/tests/sql/create_drop.rs 
b/datafusion/core/tests/sql/create_drop.rs
index aa34552044..b1434dddee 100644
--- a/datafusion/core/tests/sql/create_drop.rs
+++ b/datafusion/core/tests/sql/create_drop.rs
@@ -26,11 +26,11 @@ async fn create_custom_table() -> Result<()> {
     let cfg = RuntimeConfig::new();
     let env = RuntimeEnv::new(cfg).unwrap();
     let ses = SessionConfig::new();
-    let mut state = SessionState::with_config_rt(ses, Arc::new(env));
+    let mut state = SessionState::new_with_config_rt(ses, Arc::new(env));
     state
         .table_factories_mut()
         .insert("DELTATABLE".to_string(), Arc::new(TestTableFactory {}));
-    let ctx = SessionContext::with_state(state);
+    let ctx = SessionContext::new_with_state(state);
 
     let sql = "CREATE EXTERNAL TABLE dt STORED AS DELTATABLE LOCATION 
's3://bucket/schema/table';";
     ctx.sql(sql).await.unwrap();
@@ -48,11 +48,11 @@ async fn create_external_table_with_ddl() -> Result<()> {
     let cfg = RuntimeConfig::new();
     let env = RuntimeEnv::new(cfg).unwrap();
     let ses = SessionConfig::new();
-    let mut state = SessionState::with_config_rt(ses, Arc::new(env));
+    let mut state = SessionState::new_with_config_rt(ses, Arc::new(env));
     state
         .table_factories_mut()
         .insert("MOCKTABLE".to_string(), Arc::new(TestTableFactory {}));
-    let ctx = SessionContext::with_state(state);
+    let ctx = SessionContext::new_with_state(state);
 
     let sql = "CREATE EXTERNAL TABLE dt (a_id integer, a_str string, a_bool 
boolean) STORED AS MOCKTABLE LOCATION 'mockprotocol://path/to/table';";
     ctx.sql(sql).await.unwrap();
diff --git a/datafusion/core/tests/sql/displayable.rs 
b/datafusion/core/tests/sql/displayable.rs
index b736820009..3255d514c5 100644
--- a/datafusion/core/tests/sql/displayable.rs
+++ b/datafusion/core/tests/sql/displayable.rs
@@ -24,7 +24,7 @@ use datafusion_physical_plan::displayable;
 async fn teset_displayable() {
     // Hard code target_partitions as it appears in the RepartitionExec output
     let config = SessionConfig::new().with_target_partitions(3);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
 
     // register the a table
     ctx.register_csv("example", "tests/data/example.csv", 
CsvReadOptions::new())
diff --git a/datafusion/core/tests/sql/explain_analyze.rs 
b/datafusion/core/tests/sql/explain_analyze.rs
index 06120c01ce..2b708df4f3 100644
--- a/datafusion/core/tests/sql/explain_analyze.rs
+++ b/datafusion/core/tests/sql/explain_analyze.rs
@@ -27,7 +27,7 @@ async fn explain_analyze_baseline_metrics() {
     let config = SessionConfig::new()
         .with_target_partitions(3)
         .with_batch_size(4096);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     register_aggregate_csv_by_sql(&ctx).await;
     // a query with as many operators as we have metrics for
     let sql = "EXPLAIN ANALYZE \
@@ -598,7 +598,7 @@ async fn test_physical_plan_display_indent() {
     let config = SessionConfig::new()
         .with_target_partitions(9000)
         .with_batch_size(4096);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     register_aggregate_csv(&ctx).await.unwrap();
     let sql = "SELECT c1, MAX(c12), MIN(c12) as the_min \
                FROM aggregate_test_100 \
@@ -642,7 +642,7 @@ async fn test_physical_plan_display_indent_multi_children() 
{
     let config = SessionConfig::new()
         .with_target_partitions(9000)
         .with_batch_size(4096);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     // ensure indenting works for nodes with multiple children
     register_aggregate_csv(&ctx).await.unwrap();
     let sql = "SELECT c1 \
@@ -777,7 +777,7 @@ async fn csv_explain_analyze_verbose() {
 async fn explain_logical_plan_only() {
     let mut config = ConfigOptions::new();
     config.explain.logical_plan_only = true;
-    let ctx = SessionContext::with_config(config.into());
+    let ctx = SessionContext::new_with_config(config.into());
     let sql = "EXPLAIN select count(*) from (values ('a', 1, 100), ('a', 2, 
150)) as t (c1,c2,c3)";
     let actual = execute(&ctx, sql).await;
     let actual = normalize_vec_for_explain(actual);
@@ -797,7 +797,7 @@ async fn explain_logical_plan_only() {
 async fn explain_physical_plan_only() {
     let mut config = ConfigOptions::new();
     config.explain.physical_plan_only = true;
-    let ctx = SessionContext::with_config(config.into());
+    let ctx = SessionContext::new_with_config(config.into());
     let sql = "EXPLAIN select count(*) from (values ('a', 1, 100), ('a', 2, 
150)) as t (c1,c2,c3)";
     let actual = execute(&ctx, sql).await;
     let actual = normalize_vec_for_explain(actual);
@@ -816,7 +816,7 @@ async fn csv_explain_analyze_with_statistics() {
     let mut config = ConfigOptions::new();
     config.explain.physical_plan_only = true;
     config.explain.show_statistics = true;
-    let ctx = SessionContext::with_config(config.into());
+    let ctx = SessionContext::new_with_config(config.into());
     register_aggregate_csv_by_sql(&ctx).await;
 
     let sql = "EXPLAIN ANALYZE SELECT c1 FROM aggregate_test_100";
diff --git a/datafusion/core/tests/sql/group_by.rs 
b/datafusion/core/tests/sql/group_by.rs
index 862d2275af..49903857b7 100644
--- a/datafusion/core/tests/sql/group_by.rs
+++ b/datafusion/core/tests/sql/group_by.rs
@@ -149,7 +149,7 @@ async fn create_groupby_context(tmp_dir: &TempDir) -> 
Result<SessionContext> {
     }
 
     let cfg = SessionConfig::new().with_target_partitions(1);
-    let ctx = SessionContext::with_config(cfg);
+    let ctx = SessionContext::new_with_config(cfg);
     ctx.register_csv(
         "traces",
         tmp_dir.path().to_str().unwrap(),
diff --git a/datafusion/core/tests/sql/joins.rs 
b/datafusion/core/tests/sql/joins.rs
index d08f09d3b6..528bde6323 100644
--- a/datafusion/core/tests/sql/joins.rs
+++ b/datafusion/core/tests/sql/joins.rs
@@ -81,7 +81,7 @@ async fn null_aware_left_anti_join() -> Result<()> {
 #[tokio::test]
 async fn join_change_in_planner() -> Result<()> {
     let config = SessionConfig::new().with_target_partitions(8);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let tmp_dir = TempDir::new().unwrap();
     let left_file_path = tmp_dir.path().join("left.csv");
     File::create(left_file_path.clone()).unwrap();
@@ -152,7 +152,7 @@ async fn join_change_in_planner() -> Result<()> {
 #[tokio::test]
 async fn join_change_in_planner_without_sort() -> Result<()> {
     let config = SessionConfig::new().with_target_partitions(8);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let tmp_dir = TempDir::new()?;
     let left_file_path = tmp_dir.path().join("left.csv");
     File::create(left_file_path.clone())?;
@@ -209,7 +209,7 @@ async fn join_change_in_planner_without_sort_not_allowed() 
-> Result<()> {
     let config = SessionConfig::new()
         .with_target_partitions(8)
         .with_allow_symmetric_joins_without_pruning(false);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let tmp_dir = TempDir::new()?;
     let left_file_path = tmp_dir.path().join("left.csv");
     File::create(left_file_path.clone())?;
diff --git a/datafusion/core/tests/sql/mod.rs b/datafusion/core/tests/sql/mod.rs
index 4529889270..20d715b32c 100644
--- a/datafusion/core/tests/sql/mod.rs
+++ b/datafusion/core/tests/sql/mod.rs
@@ -102,7 +102,7 @@ fn create_join_context(
     column_right: &str,
     repartition_joins: bool,
 ) -> Result<SessionContext> {
-    let ctx = SessionContext::with_config(
+    let ctx = SessionContext::new_with_config(
         SessionConfig::new()
             .with_repartition_joins(repartition_joins)
             .with_target_partitions(2)
@@ -157,7 +157,7 @@ fn create_left_semi_anti_join_context_with_null_ids(
     column_right: &str,
     repartition_joins: bool,
 ) -> Result<SessionContext> {
-    let ctx = SessionContext::with_config(
+    let ctx = SessionContext::new_with_config(
         SessionConfig::new()
             .with_repartition_joins(repartition_joins)
             .with_target_partitions(2)
@@ -524,7 +524,8 @@ async fn create_ctx_with_partition(
     tmp_dir: &TempDir,
     partition_count: usize,
 ) -> Result<SessionContext> {
-    let ctx = 
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+    let ctx =
+        
SessionContext::new_with_config(SessionConfig::new().with_target_partitions(8));
 
     let schema = populate_csv_partitions(tmp_dir, partition_count, ".csv")?;
 
diff --git a/datafusion/core/tests/sql/order.rs 
b/datafusion/core/tests/sql/order.rs
index c5497b4cc0..0142675bbd 100644
--- a/datafusion/core/tests/sql/order.rs
+++ b/datafusion/core/tests/sql/order.rs
@@ -180,7 +180,7 @@ async fn test_issue5970_mini() -> Result<()> {
     let config = SessionConfig::new()
         .with_target_partitions(2)
         .with_repartition_sorts(true);
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let sql = "
 WITH
     m0(t) AS (
diff --git a/datafusion/core/tests/sql/partitioned_csv.rs 
b/datafusion/core/tests/sql/partitioned_csv.rs
index 98cb3b1893..d5a1c2f0b4 100644
--- a/datafusion/core/tests/sql/partitioned_csv.rs
+++ b/datafusion/core/tests/sql/partitioned_csv.rs
@@ -78,7 +78,8 @@ pub async fn create_ctx(
     tmp_dir: &TempDir,
     partition_count: usize,
 ) -> Result<SessionContext> {
-    let ctx = 
SessionContext::with_config(SessionConfig::new().with_target_partitions(8));
+    let ctx =
+        
SessionContext::new_with_config(SessionConfig::new().with_target_partitions(8));
 
     let schema = populate_csv_partitions(tmp_dir, partition_count, ".csv")?;
 
diff --git a/datafusion/core/tests/sql/repartition.rs 
b/datafusion/core/tests/sql/repartition.rs
index 20e64b2eee..332f18e941 100644
--- a/datafusion/core/tests/sql/repartition.rs
+++ b/datafusion/core/tests/sql/repartition.rs
@@ -33,7 +33,7 @@ use std::sync::Arc;
 #[tokio::test]
 async fn unbounded_repartition() -> Result<()> {
     let config = SessionConfig::new();
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let task = ctx.task_ctx();
     let schema = Arc::new(Schema::new(vec![Field::new("a2", DataType::UInt32, 
false)]));
     let batch = RecordBatch::try_new(
diff --git a/datafusion/core/tests/sql/select.rs 
b/datafusion/core/tests/sql/select.rs
index cda5fba805..63f3e97930 100644
--- a/datafusion/core/tests/sql/select.rs
+++ b/datafusion/core/tests/sql/select.rs
@@ -407,7 +407,8 @@ async fn sort_on_window_null_string() -> Result<()> {
     ])
     .unwrap();
 
-    let ctx = 
SessionContext::with_config(SessionConfig::new().with_target_partitions(1));
+    let ctx =
+        
SessionContext::new_with_config(SessionConfig::new().with_target_partitions(1));
     ctx.register_batch("test", batch)?;
 
     let sql =
@@ -590,7 +591,7 @@ async fn boolean_literal() -> Result<()> {
 #[tokio::test]
 async fn unprojected_filter() {
     let config = SessionConfig::new();
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let df = ctx.read_table(table_with_sequence(1, 3).unwrap()).unwrap();
 
     let df = df
diff --git a/datafusion/core/tests/tpcds_planning.rs 
b/datafusion/core/tests/tpcds_planning.rs
index 3f55049ecd..4db97c75cb 100644
--- a/datafusion/core/tests/tpcds_planning.rs
+++ b/datafusion/core/tests/tpcds_planning.rs
@@ -1045,7 +1045,7 @@ async fn regression_test(query_no: u8, create_physical: 
bool) -> Result<()> {
     let sql = fs::read_to_string(filename).expect("Could not read query");
 
     let config = SessionConfig::default();
-    let ctx = SessionContext::with_config(config);
+    let ctx = SessionContext::new_with_config(config);
     let tables = get_table_definitions();
     for table in &tables {
         ctx.register_table(
diff --git a/datafusion/core/tests/user_defined/user_defined_plan.rs 
b/datafusion/core/tests/user_defined/user_defined_plan.rs
index 21ec20f0d4..d8c4bea174 100644
--- a/datafusion/core/tests/user_defined/user_defined_plan.rs
+++ b/datafusion/core/tests/user_defined/user_defined_plan.rs
@@ -247,10 +247,10 @@ async fn topk_plan() -> Result<()> {
 fn make_topk_context() -> SessionContext {
     let config = SessionConfig::new().with_target_partitions(48);
     let runtime = Arc::new(RuntimeEnv::default());
-    let state = SessionState::with_config_rt(config, runtime)
+    let state = SessionState::new_with_config_rt(config, runtime)
         .with_query_planner(Arc::new(TopKQueryPlanner {}))
         .add_optimizer_rule(Arc::new(TopKOptimizerRule {}));
-    SessionContext::with_state(state)
+    SessionContext::new_with_state(state)
 }
 
 // ------ The implementation of the TopK code follows -----
diff --git a/datafusion/proto/tests/cases/roundtrip_logical_plan.rs 
b/datafusion/proto/tests/cases/roundtrip_logical_plan.rs
index cd294b0e53..5b53946379 100644
--- a/datafusion/proto/tests/cases/roundtrip_logical_plan.rs
+++ b/datafusion/proto/tests/cases/roundtrip_logical_plan.rs
@@ -168,10 +168,10 @@ async fn roundtrip_custom_tables() -> Result<()> {
     let cfg = RuntimeConfig::new();
     let env = RuntimeEnv::new(cfg).unwrap();
     let ses = SessionConfig::new();
-    let mut state = SessionState::with_config_rt(ses, Arc::new(env));
+    let mut state = SessionState::new_with_config_rt(ses, Arc::new(env));
     // replace factories
     *state.table_factories_mut() = table_factories;
-    let ctx = SessionContext::with_state(state);
+    let ctx = SessionContext::new_with_state(state);
 
     let sql = "CREATE EXTERNAL TABLE t STORED AS testtable LOCATION 
's3://bucket/schema/table';";
     ctx.sql(sql).await.unwrap();
diff --git a/datafusion/sqllogictest/src/test_context.rs 
b/datafusion/sqllogictest/src/test_context.rs
index 9af2de1af4..b2314f34f3 100644
--- a/datafusion/sqllogictest/src/test_context.rs
+++ b/datafusion/sqllogictest/src/test_context.rs
@@ -67,7 +67,7 @@ impl TestContext {
             // hardcode target partitions so plans are deterministic
             .with_target_partitions(4);
 
-        let test_ctx = TestContext::new(SessionContext::with_config(config));
+        let test_ctx = 
TestContext::new(SessionContext::new_with_config(config));
 
         let file_name = relative_path.file_name().unwrap().to_str().unwrap();
         match file_name {
diff --git a/datafusion/substrait/tests/cases/roundtrip_logical_plan.rs 
b/datafusion/substrait/tests/cases/roundtrip_logical_plan.rs
index 2554d0667e..9b9afa159c 100644
--- a/datafusion/substrait/tests/cases/roundtrip_logical_plan.rs
+++ b/datafusion/substrait/tests/cases/roundtrip_logical_plan.rs
@@ -726,12 +726,12 @@ async fn function_extension_info(sql: &str) -> 
Result<(Vec<String>, Vec<u32>)> {
 }
 
 async fn create_context() -> Result<SessionContext> {
-    let state = SessionState::with_config_rt(
+    let state = SessionState::new_with_config_rt(
         SessionConfig::default(),
         Arc::new(RuntimeEnv::default()),
     )
     .with_serializer_registry(Arc::new(MockSerializerRegistry));
-    let ctx = SessionContext::with_state(state);
+    let ctx = SessionContext::new_with_state(state);
     let mut explicit_options = CsvReadOptions::new();
     let schema = Schema::new(vec![
         Field::new("a", DataType::Int64, true),

Reply via email to