This is an automated email from the ASF dual-hosted git repository.
jiayu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/sedona.git
The following commit(s) were added to refs/heads/master by this push:
new 6bd60da6ee [DOCS] Minor typo fixes (#2557)
6bd60da6ee is described below
commit 6bd60da6eeb259d9e4ce478a4937eca5ef902729
Author: John Bampton <[email protected]>
AuthorDate: Fri Dec 12 14:05:04 2025 +1000
[DOCS] Minor typo fixes (#2557)
---
docs/usecases/contrib/NdviSentinelApacheSedona.ipynb | 14 ++++++--------
python/sedona/spark/geopandas/base.py | 2 +-
2 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/docs/usecases/contrib/NdviSentinelApacheSedona.ipynb
b/docs/usecases/contrib/NdviSentinelApacheSedona.ipynb
index ed9a61857d..fe521ec8fa 100644
--- a/docs/usecases/contrib/NdviSentinelApacheSedona.ipynb
+++ b/docs/usecases/contrib/NdviSentinelApacheSedona.ipynb
@@ -85,9 +85,9 @@
}
],
"source": [
- "analise_folder = \"analise_teste_\" + str(date.today())\n",
+ "analysis_folder = \"analysis_tests_\" + str(date.today())\n",
"hdfs = PyWebHdfsClient(host=\"179.106.229.159\", port=\"50070\",
user_name=\"root\")\n",
- "hdfs.delete_file_dir(analise_folder, recursive=True)"
+ "hdfs.delete_file_dir(analysis_folder, recursive=True)"
]
},
{
@@ -353,8 +353,8 @@
"metadata": {},
"outputs": [],
"source": [
- "# df_export.repartition(\"origin\").write.format('csv').option('header',
True).partitionBy(\"origin\").mode('overwrite').option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analise_folder)\n",
- "# df_export.write.format('csv').option('header', True).option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analise_folder)\n",
+ "# df_export.repartition(\"origin\").write.format('csv').option('header',
True).partitionBy(\"origin\").mode('overwrite').option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analysis_folder)\n",
+ "# df_export.write.format('csv').option('header', True).option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analysis_folder)\n",
"# start = 0\n",
"# end = 10\n",
"# part_df_export = spark.sql('select * from df_export where id between
'+str(start)+' and '+str(end))\n",
@@ -369,7 +369,7 @@
"outputs": [],
"source": [
"# df_writer = part_df_export.write.format('csv').option('header',
True).option('sep', ',')\n",
- "# df_writer.save(\"hdfs://776faf4d6a1e:8020/\"+analise_folder)"
+ "# df_writer.save(\"hdfs://776faf4d6a1e:8020/\"+analysis_folder)"
]
},
{
@@ -809,9 +809,7 @@
"id": "aae72e9c",
"metadata": {},
"outputs": [],
- "source": [
- "#
calculated_mean.repartition(\"origin\").write.format('csv').option('header',
True).partitionBy(\"origin\").mode('overwrite').option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analise_folder)"
- ]
+ "source": "#
calculated_mean.repartition(\"origin\").write.format('csv').option('header',
True).partitionBy(\"origin\").mode('overwrite').option('sep',
',').save(\"hdfs://776faf4d6a1e:8020/\"+analysis_folder)"
},
{
"cell_type": "code",
diff --git a/python/sedona/spark/geopandas/base.py
b/python/sedona/spark/geopandas/base.py
index 0ad65c0cf6..1bd3ffabba 100644
--- a/python/sedona/spark/geopandas/base.py
+++ b/python/sedona/spark/geopandas/base.py
@@ -794,7 +794,7 @@ class GeoFrame(metaclass=ABCMeta):
See Also
--------
- GeoSeries.minumum_bounding_circle : minimum bounding circle (geometry)
+ GeoSeries.minimum_bounding_circle : minimum bounding circle (geometry)
"""
return _delegate_to_geometry_column("minimum_bounding_radius", self)