[ 
https://issues.apache.org/jira/browse/BEAM-3060?focusedWorklogId=107478&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-107478
 ]

ASF GitHub Bot logged work on BEAM-3060:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 30/May/18 23:25
            Start Date: 30/May/18 23:25
    Worklog Time Spent: 10m 
      Work Description: chamikaramj closed pull request #5441: [BEAM-3060] HDFS 
large cluster configuration. Jenkins job updated to use large cl…
URL: https://github.com/apache/beam/pull/5441
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/.test-infra/jenkins/job_PerformanceTests_FileBasedIO_IT_HDFS.groovy 
b/.test-infra/jenkins/job_PerformanceTests_FileBasedIO_IT_HDFS.groovy
index 7aa5c3251c0..62a2346fa17 100644
--- a/.test-infra/jenkins/job_PerformanceTests_FileBasedIO_IT_HDFS.groovy
+++ b/.test-infra/jenkins/job_PerformanceTests_FileBasedIO_IT_HDFS.groovy
@@ -140,7 +140,7 @@ private void 
create_filebasedio_performance_test_job(testConfiguration) {
                 beam_extra_mvn_properties: '["filesystem=hdfs"]',
                 bigquery_table           : testConfiguration.bqTable,
                 beam_options_config_file : makePathAbsolute('pkb-config.yml'),
-                beam_kubernetes_scripts  : 
makePathAbsolute('hdfs-single-datanode-cluster.yml') + ',' + 
makePathAbsolute('hdfs-single-datanode-cluster-for-local-dev.yml')
+                beam_kubernetes_scripts  : 
makePathAbsolute('hdfs-multi-datanode-cluster.yml')
         ]
         common_job_properties.setupKubernetes(delegate, namespace, kubeconfig)
         common_job_properties.buildPerformanceTest(delegate, argMap)
@@ -149,5 +149,5 @@ private void 
create_filebasedio_performance_test_job(testConfiguration) {
 }
 
 static def makePathAbsolute(String path) {
-    return '"$WORKSPACE/src/.test-infra/kubernetes/hadoop/SmallITCluster/' + 
path + '"'
+    return '"$WORKSPACE/src/.test-infra/kubernetes/hadoop/LargeITCluster/' + 
path + '"'
 }
\ No newline at end of file
diff --git 
a/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster-for-local-dev.yml
 
b/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster-for-local-dev.yml
new file mode 100644
index 00000000000..7cb891bcd99
--- /dev/null
+++ 
b/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster-for-local-dev.yml
@@ -0,0 +1,73 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+# This cluster is intended to be run additionally to 
hdfs-multi-datanode-cluster.yml.
+# It provides an additional setup to access large hdfs cluster by DirectRunner 
or any
+# external application. Services created by this setup need to be properly 
included in
+# /etc/hosts file, so it is strongly suggested to run start-all.sh script 
instead of
+# running this file manually.
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: datanode-0
+  labels:
+    name: datanode-0
+spec:
+  ports:
+    - name: hdfs
+      port: 9000
+    - name: web
+      port: 50010
+  selector:
+    statefulset.kubernetes.io/pod-name: datanode-0
+  type: LoadBalancer
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: datanode-1
+  labels:
+    name: datanode-1
+spec:
+  ports:
+    - name: hdfs
+      port: 9000
+    - name: web
+      port: 50010
+  selector:
+    statefulset.kubernetes.io/pod-name: datanode-1
+  type: LoadBalancer
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: datanode-2
+  labels:
+    name: datanode-2
+spec:
+  ports:
+    - name: hdfs
+      port: 9000
+    - name: web
+      port: 50010
+  selector:
+    statefulset.kubernetes.io/pod-name: datanode-2
+  type: LoadBalancer
diff --git 
a/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster.yml 
b/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster.yml
new file mode 100644
index 00000000000..e796243d389
--- /dev/null
+++ 
b/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster.yml
@@ -0,0 +1,135 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+# This scripts creates one namenode and three datanode cluster.
+# Service "hadoop" allow to access namenode from outside cluster and is 
sufficient to
+# run tests using Dataflow runner(for Direct runner please use setup-all.sh 
script).
+# Service "datanodes" allow to reach any datanode using its FQDN.
+# StatefulSet "datanode" creates 3 replicas of hadoop datanode. Env variable 
NODE_TYPE
+# is being used to autodetect by kubernetes pod his role in the cluster.
+# Pod "namenode-0" is our master node - hadoop namenode. NODE_TYPE is set to 
namenode.
+# Additionally we specify number of datanodes that namenode will provision 
with hdfs
+# configuration and add to hadoop cluster by setting DATANODE_COUNT to 3.
+# On Jenkins we run tests in namespaces. This value is passed to pod 
environment in
+# NODE_NAMESPACE variable and being used to create list of FQDN of datanodes:
+#  datanode-x.hadoop-datanodes.NODE_NAMESPACE.svc.cluster.local
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: hadoop
+  labels:
+    name: hadoop
+spec:
+  ports:
+    - name: hdfs
+      port: 9000
+    - name: web
+      port: 50070
+  selector:
+    name: namenode
+  type: LoadBalancer
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: hadoop-datanodes
+spec:
+  selector:
+    name: datanode
+  clusterIP: None
+  ports:
+   - name: hdfs
+     port: 9000
+     targetPort: 9000
+
+---
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: datanode
+spec:
+  selector:
+    matchLabels:
+      name: datanode
+  serviceName: "hadoop-datanodes"
+  replicas: 3
+  template:
+    metadata:
+      labels:
+        name: datanode
+    spec:
+      containers:
+      - name: datanode
+        image: szewi/kubernetes-hadoop:2.7.1
+        imagePullPolicy: Always
+        env:
+          - name: NODE_TYPE
+            value: datanode
+        ports:
+          - name: sshd
+            containerPort: 22
+          - name: namenode-hdfs
+            containerPort: 9000
+          - name: web
+            containerPort: 50070
+          - name: datanode
+            containerPort: 50010
+          - name: datanode-icp
+            containerPort: 50020
+          - name: datanode-http
+            containerPort: 50075
+
+---
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: namenode-0
+  labels:
+    name: namenode
+spec:
+  hostname: namenode-0
+  subdomain: hadoop-namenodes
+  containers:
+  - image: szewi/kubernetes-hadoop:2.7.1
+    imagePullPolicy: Always
+    name: namenode
+    env:
+      - name: NODE_TYPE
+        value: namenode
+      - name: DATANODE_COUNT
+        value: '3'
+      - name: NODE_NAMESPACE
+        valueFrom:
+          fieldRef:
+            fieldPath: metadata.namespace
+    ports:
+      - name: sshd
+        containerPort: 22
+      - name: namenode-hdfs
+        containerPort: 9000
+      - name: web
+        containerPort: 50070
+      - name: datanode
+        containerPort: 50010
+      - name: datanode-icp
+        containerPort: 50020
+      - name: datanode-http
+        containerPort: 50075
\ No newline at end of file
diff --git a/.test-infra/kubernetes/hadoop/LargeITCluster/pkb-config.yml 
b/.test-infra/kubernetes/hadoop/LargeITCluster/pkb-config.yml
new file mode 100644
index 00000000000..c829e05e7cc
--- /dev/null
+++ b/.test-infra/kubernetes/hadoop/LargeITCluster/pkb-config.yml
@@ -0,0 +1,44 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is a pkb benchmark configuration file, used when running the IO ITs
+# that use this data store. It allows users to run tests when they are on a
+# separate network from the kubernetes cluster by reading the hadoop namenode 
IP
+# address from the LoadBalancer service.
+#
+# When running Perfkit with DirectRunner - format pattern must additionally 
contain
+# dfs.client.use.datanode.hostname set to true:
+#   format: 
'[{\"fs.defaultFS\":\"hdfs://{{LoadBalancerIp}}:9000\",\"dfs.replication\":1,\"dfs.client.use.datanode.hostname\":\"true\"
 }]'
+# and /etc/hosts should be modified with an entries containing:
+#   LoadBalancerIp HadoopMasterPodName
+#   LoadBalancerIp FQDN-HadoopDatanode-0
+#   LoadBalancerIp FQDN-HadoopDatanode-1
+#   LoadBalancerIp FQDN-HadoopDatanode-2
+# otherwise hdfs client won't be able to reach datanodes. Proper configuration 
to add
+# will be generated when setup-all.sh script will be used to create cluster.
+# FilenamePrefix is used in file-based-io-tests.
+
+static_pipeline_options:
+dynamic_pipeline_options:
+  - name: hdfsConfiguration
+    format: 
'[{\"fs.defaultFS\":\"hdfs://{{LoadBalancerIp}}:9000\",\"dfs.replication\":1}]'
+    type: LoadBalancerIp
+    serviceName: hadoop
+  - name: filenamePrefix
+    format: 'hdfs://{{LoadBalancerIp}}:9000/TEXTIO_IT_'
+    type: LoadBalancerIp
+    serviceName: hadoop
diff --git a/.test-infra/kubernetes/hadoop/LargeITCluster/setup-all.sh 
b/.test-infra/kubernetes/hadoop/LargeITCluster/setup-all.sh
new file mode 100755
index 00000000000..9a435121405
--- /dev/null
+++ b/.test-infra/kubernetes/hadoop/LargeITCluster/setup-all.sh
@@ -0,0 +1,57 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+# This script starts hdfs cluster and hadoop service that allows to reach 
cluster
+# from developer's machine. Once the cluster is working, scripts waits till
+# external cluster endpoint will be available. It prints out configuration 
lines that
+# should be added to /etc/hosts file in order to work with hdfs cluster.
+#
+
+#!/bin/sh
+set -e
+
+kubectl create -f hdfs-multi-datanode-cluster.yml
+
+kubectl create -f hdfs-multi-datanode-cluster-for-local-dev.yml
+
+external_ip="$(kubectl get svc hadoop -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+echo "Waiting for the hadoop service to come up ........"
+while [ -z "$external_ip" ]
+do
+ sleep 10s
+ external_ip="$(kubectl get svc hadoop -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+ echo "."
+done
+
+hadoop_master_pod_name="$(kubectl get pods --selector=name=namenode -o 
jsonpath='{.items[*].metadata.name}')"
+
+hadoop_datanodes_pod_names="$(kubectl get pods --selector=name=datanode -o 
jsonpath='{.items[*].metadata.name}')"
+
+echo "For local tests please add the following 4 entries to /etc/hosts file"
+echo $external_ip$'\t'$hadoop_master_pod_name
+datanodes_pods=(${hadoop_datanodes_pod_names})
+
+for pod in "${datanodes_pods[@]}"; do
+  external_ip="$(kubectl get svc ${pod} -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+  while [ -z "$external_ip" ]
+  do
+  sleep 10s
+  external_ip="$(kubectl get svc hadoop -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+  done
+  echo $external_ip$'\t'$pod".hadoop-datanodes.default.svc.cluster.local"
+done
+
+echo "Done."
+
diff --git a/.test-infra/kubernetes/hadoop/LargeITCluster/setup.sh 
b/.test-infra/kubernetes/hadoop/LargeITCluster/setup.sh
new file mode 100755
index 00000000000..a95eb4675bd
--- /dev/null
+++ b/.test-infra/kubernetes/hadoop/LargeITCluster/setup.sh
@@ -0,0 +1,20 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+
+#!/bin/sh
+set -e
+
+kubectl create -f hdfs-multi-datanode-cluster.yml
diff --git a/.test-infra/kubernetes/hadoop/LargeITCluster/teardown-all.sh 
b/.test-infra/kubernetes/hadoop/LargeITCluster/teardown-all.sh
new file mode 100755
index 00000000000..e522be9a010
--- /dev/null
+++ b/.test-infra/kubernetes/hadoop/LargeITCluster/teardown-all.sh
@@ -0,0 +1,33 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+# This script terminates hdfs cluster and hadoop service. It checks /etc/hosts 
file
+# for any unneeded entries and notifies user about them.
+#
+
+#!/bin/sh
+set -e
+
+external_ip="$(kubectl get svc hadoop -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+
+hadoop_master_pod_name="$(kubectl get pods --selector=name=namenode -o 
jsonpath='{.items[*].metadata.name}')"
+
+kubectl delete -f hdfs-multi-datanode-cluster.yml
+
+kubectl delete -f hdfs-multi-datanode-cluster-for-local-dev.yml
+
+if grep "$external_ip\|$hadoop_master_pod_name" /etc/hosts ; then
+    echo "Remove entries from /etc/hosts."
+fi
diff --git a/.test-infra/kubernetes/hadoop/LargeITCluster/teardown.sh 
b/.test-infra/kubernetes/hadoop/LargeITCluster/teardown.sh
new file mode 100755
index 00000000000..1480e369536
--- /dev/null
+++ b/.test-infra/kubernetes/hadoop/LargeITCluster/teardown.sh
@@ -0,0 +1,21 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+
+#!/bin/sh
+set -e
+
+kubectl delete -f hdfs-multi-datanode-cluster.yml
+
diff --git a/sdks/java/io/file-based-io-tests/pom.xml 
b/sdks/java/io/file-based-io-tests/pom.xml
index 3de4ba55ae1..6f7d5d11422 100644
--- a/sdks/java/io/file-based-io-tests/pom.xml
+++ b/sdks/java/io/file-based-io-tests/pom.xml
@@ -248,6 +248,89 @@
                 </plugins>
             </build>
         </profile>
+        <profile>
+            <id>io-it-hdfs-large</id>
+            <activation>
+                <property><name>io-it-suite-hdfs-large</name></property>
+            </activation>
+            <properties>
+                <!-- This is based on the location of the current pom relative 
to the root
+                     See discussion in BEAM-2460 -->
+                
<beamRootProjectDir>${project.parent.parent.parent.parent.basedir}</beamRootProjectDir>
+            </properties>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>org.codehaus.gmaven</groupId>
+                        <artifactId>groovy-maven-plugin</artifactId>
+                        <version>${groovy-maven-plugin.version}</version>
+                        <executions>
+                            <execution>
+                                <id>find-supported-python-for-compile</id>
+                                <phase>initialize</phase>
+                                <goals>
+                                    <goal>execute</goal>
+                                </goals>
+                                <configuration>
+                                    
<source>${beamRootProjectDir}/sdks/python/findSupportedPython.groovy</source>
+                                </configuration>
+                            </execution>
+                        </executions>
+                    </plugin>
+
+                    <plugin>
+                        <groupId>org.codehaus.mojo</groupId>
+                        <artifactId>exec-maven-plugin</artifactId>
+                        <version>${maven-exec-plugin.version}</version>
+                        <executions>
+                            <execution>
+                                <phase>verify</phase>
+                                <goals>
+                                    <goal>exec</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                        <configuration>
+                            <executable>${python.interpreter.bin}</executable>
+                            <arguments>
+                                <argument>${pkbLocation}</argument>
+                                
<argument>-benchmarks=beam_integration_benchmark</argument>
+                                <argument>-beam_it_profile=io-it</argument>
+                                
<argument>-beam_location=${beamRootProjectDir}</argument>
+                                <argument>-beam_prebuilt=true</argument>
+                                <argument>-beam_sdk=java</argument>
+                                <argument>-kubeconfig=${kubeconfig}</argument>
+                                <argument>-kubectl=${kubectl}</argument>
+                                <!-- runner overrides, controlled via 
forceDirectRunner -->
+                                <argument>${pkbBeamRunnerProfile}</argument>
+                                <argument>${pkbBeamRunnerOption}</argument>
+                                <!-- specific to this IO -->
+                                
<argument>-beam_it_module=sdks/java/io/file-based-io-tests</argument>
+                                
<argument>-beam_it_class=${fileBasedIoItClass}</argument>
+                                <!-- arguments typically defined by user -->
+                                
<argument>-beam_it_options=${integrationTestPipelineOptions}</argument>
+                                
<argument>-beam_options_config_file=${beamRootProjectDir}/.test-infra/kubernetes/hadoop/LargeITCluster/pkb-config.yml</argument>
+                                
<argument>-beam_kubernetes_scripts=${beamRootProjectDir}/.test-infra/kubernetes/hadoop/LargeITCluster/hdfs-multi-datanode-cluster.yml</argument>
+                                <!--
+                                optional array of key=value items. It will be 
passed to
+                                target mvn command by pkb. eg. 
-DpkbExtraProperties='["filesystem=local"]'
+                                -->
+                                
<argument>-beam_extra_mvn_properties=${pkbExtraProperties}</argument>
+                            </arguments>
+                        </configuration>
+                    </plugin>
+
+                    <plugin>
+                        <groupId>org.apache.maven.plugins</groupId>
+                        <artifactId>maven-surefire-plugin</artifactId>
+                        <version>${maven-surefire-plugin.version}</version>
+                        <configuration>
+                            <skipTests>true</skipTests>
+                        </configuration>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
         <profile>
             <!-- Include the google-cloud-platform activated by 
-Dfilesystem=gcs
             Support for protocol scheme gs:// - allow to read/write to google 
storage -->


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 107478)
    Time Spent: 10h 20m  (was: 10h 10m)

> Add performance tests for commonly used file-based I/O PTransforms
> ------------------------------------------------------------------
>
>                 Key: BEAM-3060
>                 URL: https://issues.apache.org/jira/browse/BEAM-3060
>             Project: Beam
>          Issue Type: Test
>          Components: sdk-java-core
>            Reporter: Chamikara Jayalath
>            Assignee: Szymon Nieradka
>            Priority: Major
>          Time Spent: 10h 20m
>  Remaining Estimate: 0h
>
> We recently added a performance testing framework [1] that can be used to do 
> following.
> (1) Execute Beam tests using PerfkitBenchmarker
> (2) Manage Kubernetes-based deployments of data stores.
> (3) Easily publish benchmark results. 
> I think it will be useful to add performance tests for commonly used 
> file-based I/O PTransforms using this framework. I suggest looking into 
> following formats initially.
> (1) AvroIO
> (2) TextIO
> (3) Compressed text using TextIO
> (4) TFRecordIO
> It should be possibly to run these tests for various Beam runners (Direct, 
> Dataflow, Flink, Spark, etc.) and file-systems (GCS, local, HDFS, etc.) 
> easily.
> In the initial version, tests can be made manually triggerable for PRs 
> through Jenkins. Later, we could make some of these tests run periodically 
> and publish benchmark results (to BigQuery) through PerfkitBenchmarker.
> [1] https://beam.apache.org/documentation/io/testing/



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to