This is an automated email from the ASF dual-hosted git repository.

yhu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/beam.git


The following commit(s) were added to refs/heads/master by this push:
     new 783584d0773 Fix typo tranform; workaround non-ascii char (#25428)
783584d0773 is described below

commit 783584d0773d3a5ba532aca1dbc6f87ff4945786
Author: Yi Hu <[email protected]>
AuthorDate: Fri Feb 10 19:06:49 2023 -0500

    Fix typo tranform; workaround non-ascii char (#25428)
---
 .../apache/beam/runners/dataflow/BatchStatefulParDoOverrides.java | 2 +-
 .../runners/dataflow/options/DataflowPipelineDebugOptions.java    | 4 ++--
 .../org/apache/beam/runners/samza/SamzaExecutionEnvironment.java  | 2 +-
 sdks/go/examples/pingpong/pingpong.go                             | 2 +-
 .../main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java | 2 +-
 .../java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java    | 8 ++++----
 .../kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java | 2 +-
 .../src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java   | 2 +-
 .../main/java/org/apache/beam/sdk/tpcds/SqlTransformRunner.java   | 2 +-
 sdks/python/apache_beam/io/gcp/bigquery_file_loads.py             | 2 +-
 sdks/python/apache_beam/ml/gcp/recommendations_ai.py              | 2 +-
 sdks/python/apache_beam/runners/interactive/cache_manager.py      | 2 +-
 .../python/apache_beam/runners/interactive/pipeline_instrument.py | 2 +-
 .../apache_beam/testing/benchmarks/chicago_taxi/trainer/model.py  | 2 +-
 .../apache_beam/testing/benchmarks/cloudml/criteo_tft/criteo.py   | 2 +-
 15 files changed, 19 insertions(+), 19 deletions(-)

diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/BatchStatefulParDoOverrides.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/BatchStatefulParDoOverrides.java
index 229fdf65b68..1b0008a6963 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/BatchStatefulParDoOverrides.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/BatchStatefulParDoOverrides.java
@@ -58,7 +58,7 @@ import org.joda.time.Instant;
  * grouping and expansion.
  *
  * <p>This implementation relies on implementation details of the Dataflow 
runner, specifically
- * standard fusion behavior of {@link ParDo} tranforms following a {@link 
GroupByKey}.
+ * standard fusion behavior of {@link ParDo} transforms following a {@link 
GroupByKey}.
  */
 public class BatchStatefulParDoOverrides {
 
diff --git 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
index 208de034a50..fb373d9aaf4 100644
--- 
a/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
+++ 
b/runners/google-cloud-dataflow-java/src/main/java/org/apache/beam/runners/dataflow/options/DataflowPipelineDebugOptions.java
@@ -136,12 +136,12 @@ public interface DataflowPipelineDebugOptions extends 
ExperimentalOptions, Pipel
   }
 
   /**
-   * Mapping of old PTranform names to new ones, specified as JSON 
<code>{"oldName":"newName",...}
+   * Mapping of old PTransform names to new ones, specified as JSON 
<code>{"oldName":"newName",...}
    * </code>. To mark a transform as deleted, make newName the empty string.
    */
   @JsonIgnore
   @Description(
-      "Mapping of old PTranform names to new ones, specified as JSON "
+      "Mapping of old PTransform names to new ones, specified as JSON "
           + "{\"oldName\":\"newName\",...}. To mark a transform as deleted, 
make newName the empty "
           + "string.")
   Map<String, String> getTransformNameMapping();
diff --git 
a/runners/samza/src/main/java/org/apache/beam/runners/samza/SamzaExecutionEnvironment.java
 
b/runners/samza/src/main/java/org/apache/beam/runners/samza/SamzaExecutionEnvironment.java
index 47d78e3d162..02c31edfd1d 100644
--- 
a/runners/samza/src/main/java/org/apache/beam/runners/samza/SamzaExecutionEnvironment.java
+++ 
b/runners/samza/src/main/java/org/apache/beam/runners/samza/SamzaExecutionEnvironment.java
@@ -35,7 +35,7 @@ public enum SamzaExecutionEnvironment {
 
   /**
    * Runs Samza job as a stand alone embedded library mode which can be 
imported into your Java
-   * application. You can increase your application’s capacity by spinning up 
multiple instances.
+   * application. You can increase your application's capacity by spinning up 
multiple instances.
    * These instances will then dynamically coordinate with each other and 
distribute work among
    * themselves. If an instance fails, the tasks running on it will be 
re-assigned to the remaining
    * ones. By default, Samza uses Zookeeper for coordination across individual 
instances.
diff --git a/sdks/go/examples/pingpong/pingpong.go 
b/sdks/go/examples/pingpong/pingpong.go
index 99286495c3f..6ea649272d2 100644
--- a/sdks/go/examples/pingpong/pingpong.go
+++ b/sdks/go/examples/pingpong/pingpong.go
@@ -44,7 +44,7 @@ func init() {
        register.Iter1[string]()
 }
 
-// stitch constructs two composite PTranformations that provide input to each 
other. It
+// stitch constructs two composite PTransforms that provide input to each 
other. It
 // is a (deliberately) complex DAG to show what kind of structures are 
possible.
 func stitch(s beam.Scope, words beam.PCollection) (beam.PCollection, 
beam.PCollection) {
        ping := s.Scope("ping")
diff --git 
a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
 
b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
index e871d156b09..478404de759 100644
--- 
a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
+++ 
b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java
@@ -990,7 +990,7 @@ public class BigQueryIO {
        * Specifies that a query should be run with a BATCH priority.
        *
        * <p>Batch mode queries are queued by BigQuery. These are started as 
soon as idle resources
-       * are available, usually within a few minutes. Batch queries don’t 
count towards your
+       * are available, usually within a few minutes. Batch queries don't 
count towards your
        * concurrent rate limit.
        */
       BATCH
diff --git 
a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
 
b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
index a8870e4d612..d876d00a500 100644
--- 
a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
+++ 
b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java
@@ -1304,10 +1304,10 @@ public class DatastoreV1 {
   /**
    * A {@link PTransform} that writes mutations to Cloud Datastore.
    *
-   * <p>It requires a {@link DoFn} that tranforms an object of type {@code T} 
to a {@link Mutation}.
-   * {@code T} is usually either an {@link Entity} or a {@link Key} 
<b>Note:</b> Only idempotent
-   * Cloud Datastore mutation operations (upsert and delete) should be used by 
the {@code DoFn}
-   * provided, as the commits are retried when failures occur.
+   * <p>It requires a {@link DoFn} that transforms an object of type {@code T} 
to a {@link
+   * Mutation}. {@code T} is usually either an {@link Entity} or a {@link Key} 
<b>Note:</b> Only
+   * idempotent Cloud Datastore mutation operations (upsert and delete) should 
be used by the {@code
+   * DoFn} provided, as the commits are retried when failures occur.
    */
   private abstract static class Mutate<T> extends PTransform<PCollection<T>, 
PDone> {
 
diff --git 
a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java 
b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
index e5c40ddf4d1..26c895ddc89 100644
--- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
+++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java
@@ -229,7 +229,7 @@ import org.slf4j.LoggerFactory;
  *       TopicPartition} has been stopped/removed, so it stops reading from it 
and returns {@code
  *       ProcessContinuation.stop()}.
  *   <li>At 10:45 the pipeline author wants to read from TopicPartition A 
again.
- *   <li>At 11:00AM when {@link WatchForKafkaTopicPartitions} is invoked by 
firing timer, it doesn’t
+ *   <li>At 11:00AM when {@link WatchForKafkaTopicPartitions} is invoked by 
firing timer, it doesn't
  *       know that TopicPartition A has been stopped/removed. All it knows is 
that TopicPartition A
  *       is still an active TopicPartition and it will not emit TopicPartition 
A again.
  * </ul>
diff --git 
a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
 
b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
index 637bd84b7a6..4cf8f9f4e31 100644
--- 
a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
+++ 
b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java
@@ -134,7 +134,7 @@ import org.slf4j.LoggerFactory;
  *  .apply( ... ) // other transformations
  * }</pre>
  *
- * <p>There’s also possibility to start reading using arbitrary point in time 
- in this case you
+ * <p>There's also possibility to start reading using arbitrary point in time 
- in this case you
  * need to provide {@link Instant} object:
  *
  * <pre>{@code
diff --git 
a/sdks/java/testing/tpcds/src/main/java/org/apache/beam/sdk/tpcds/SqlTransformRunner.java
 
b/sdks/java/testing/tpcds/src/main/java/org/apache/beam/sdk/tpcds/SqlTransformRunner.java
index cd337e87d87..f8287fc3c99 100644
--- 
a/sdks/java/testing/tpcds/src/main/java/org/apache/beam/sdk/tpcds/SqlTransformRunner.java
+++ 
b/sdks/java/testing/tpcds/src/main/java/org/apache/beam/sdk/tpcds/SqlTransformRunner.java
@@ -262,7 +262,7 @@ public class SqlTransformRunner {
   }
 
   /**
-   * This is the default method in BeamTpcds.main method. Run job using 
SqlTranform.query() method.
+   * This is the default method in BeamTpcds.main method. Run job using 
SqlTransform.query() method.
    *
    * @param args Command line arguments
    * @throws Exception
diff --git a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py 
b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
index 0e06dc94c9a..72421bd0e11 100644
--- a/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
+++ b/sdks/python/apache_beam/io/gcp/bigquery_file_loads.py
@@ -333,7 +333,7 @@ class UpdateDestinationSchema(beam.DoFn):
   regardless of whether data is loaded directly to the destination table or
   loaded into temporary tables before being copied into the destination.
 
-  This tranform takes as input a (destination, job_reference) pair where the
+  This transform takes as input a (destination, job_reference) pair where the
   job_reference refers to a completed load job into a temporary table.
 
   This transform emits (destination, job_reference) pairs where the
diff --git a/sdks/python/apache_beam/ml/gcp/recommendations_ai.py 
b/sdks/python/apache_beam/ml/gcp/recommendations_ai.py
index b6eb4cfb4bc..696ea5e322e 100644
--- a/sdks/python/apache_beam/ml/gcp/recommendations_ai.py
+++ b/sdks/python/apache_beam/ml/gcp/recommendations_ai.py
@@ -78,7 +78,7 @@ def get_recommendation_user_event_client():
 
 class CreateCatalogItem(PTransform):
   """Creates catalogitem information.
-    The ``PTranform`` returns a PCollectionTuple with a PCollections of
+    The ``PTransform`` returns a PCollectionTuple with a PCollections of
     successfully and failed created CatalogItems.
 
     Example usage::
diff --git a/sdks/python/apache_beam/runners/interactive/cache_manager.py 
b/sdks/python/apache_beam/runners/interactive/cache_manager.py
index 1960733ba38..b04eb92132a 100644
--- a/sdks/python/apache_beam/runners/interactive/cache_manager.py
+++ b/sdks/python/apache_beam/runners/interactive/cache_manager.py
@@ -236,7 +236,7 @@ class FileBasedCacheManager(CacheManager):
     return reader, version
 
   def write(self, values, *labels):
-    """Imitates how a WriteCache tranform works without running a pipeline.
+    """Imitates how a WriteCache transform works without running a pipeline.
 
     For testing and cache manager development, not for production usage because
     the write is not sharded and does not use Beam execution model.
diff --git a/sdks/python/apache_beam/runners/interactive/pipeline_instrument.py 
b/sdks/python/apache_beam/runners/interactive/pipeline_instrument.py
index 540d54d1c39..8e5d50ed3f3 100644
--- a/sdks/python/apache_beam/runners/interactive/pipeline_instrument.py
+++ b/sdks/python/apache_beam/runners/interactive/pipeline_instrument.py
@@ -300,7 +300,7 @@ class PipelineInstrument(object):
         v in transforms.items() if v.unique_name in required_transform_labels
     ]
 
-    # The required transforms are the tranforms that we want to cut out of
+    # The required transforms are the transforms that we want to cut out of
     # the pipeline_proto and insert into a new pipeline to return.
     required_transform_ids = (
         roots + caching_transform_ids + unbounded_source_ids)
diff --git 
a/sdks/python/apache_beam/testing/benchmarks/chicago_taxi/trainer/model.py 
b/sdks/python/apache_beam/testing/benchmarks/chicago_taxi/trainer/model.py
index 8297863d567..3d112bea0a8 100644
--- a/sdks/python/apache_beam/testing/benchmarks/chicago_taxi/trainer/model.py
+++ b/sdks/python/apache_beam/testing/benchmarks/chicago_taxi/trainer/model.py
@@ -99,7 +99,7 @@ def eval_input_receiver_fn(tf_transform_output, schema):
 
   Returns:
     EvalInputReceiver function, which contains:
-      - Tensorflow graph which parses raw untranformed features, applies the
+      - Tensorflow graph which parses raw untransformed features, applies the
         tf-transform preprocessing operators.
       - Set of raw, untransformed features.
       - Label against which predictions will be compared.
diff --git 
a/sdks/python/apache_beam/testing/benchmarks/cloudml/criteo_tft/criteo.py 
b/sdks/python/apache_beam/testing/benchmarks/cloudml/criteo_tft/criteo.py
index cd14bd9e659..d2a0b652ca6 100644
--- a/sdks/python/apache_beam/testing/benchmarks/cloudml/criteo_tft/criteo.py
+++ b/sdks/python/apache_beam/testing/benchmarks/cloudml/criteo_tft/criteo.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-"""Schema and tranform definition for the Criteo dataset."""
+"""Schema and transform definition for the Criteo dataset."""
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function

Reply via email to