This is an automated email from the ASF dual-hosted git repository.

vjasani pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c2c382f674296375efdfed8ce405f2414d8b9905
Author: jpisaac <jacobpisaa...@gmail.com>
AuthorDate: Fri Apr 2 09:30:00 2021 -0700

    PHOENIX-6430 Added support for full row update for tables when no columns 
specfied in scenario
---
 ...T.java => MultiTenantTableOperationBaseIT.java} |  22 +-
 .../mt/tenantoperation/MultiTenantTestUtils.java   | 267 +++++++++++++++++++++
 ...IT.java => MultiTenantViewOperationBaseIT.java} |  11 +-
 .../mt/tenantoperation/TenantOperationIT.java      | 112 ---------
 .../tenantoperation/TenantOperationWorkloadIT.java | 166 -------------
 .../TenantTableOperationWorkloadIT.java            |  72 ++++++
 .../TenantViewOperationWorkloadIT.java             |  61 +++++
 .../apache/phoenix/pherf/configuration/Upsert.java |  22 +-
 .../mt/tenantoperation/QueryOperationSupplier.java |   2 +-
 .../mt/tenantoperation/TenantOperationFactory.java |  94 +++++---
 .../tenantoperation/UpsertOperationSupplier.java   |  32 ++-
 .../org/apache/phoenix/pherf/ResultBaseTest.java   |   5 -
 ..._mt_table.sql => test_schema_mt_base_table.sql} |   2 +-
 ...schema_mt_view.sql => test_schema_mt_view1.sql} |   4 +-
 ...schema_mt_view.sql => test_schema_mt_view2.sql} |   4 +-
 ...ema_mt_table.sql => test_schema_tbl_simple.sql} |  22 +-
 .../test/resources/scenario/test_mt_workload.xml   |  95 +++++++-
 .../test/resources/scenario/test_tbl_workload.xml  | 169 +++++++++++++
 18 files changed, 772 insertions(+), 390 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTableOperationBaseIT.java
similarity index 81%
copy from 
phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
copy to 
phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTableOperationBaseIT.java
index f200381..5fe1a9e 100644
--- 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
+++ 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTableOperationBaseIT.java
@@ -19,8 +19,6 @@
 
 package org.apache.phoenix.pherf.workload.mt.tenantoperation;
 
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.XMLConfigParserTest;
@@ -28,10 +26,7 @@ import org.apache.phoenix.pherf.configuration.DataModel;
 import org.apache.phoenix.pherf.configuration.XMLConfigParser;
 import org.apache.phoenix.pherf.schema.SchemaReader;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
 
 import java.net.URL;
 import java.nio.file.Path;
@@ -43,17 +38,10 @@ import java.util.Properties;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-@Category(NeedsOwnMiniClusterTest.class)
-public class MultiTenantOperationBaseIT extends ParallelStatsDisabledIT {
-    enum TestOperationGroup {
-        upsertOp, queryOp1, queryOp2, idleOp, udfOp
-    }
+public class MultiTenantTableOperationBaseIT extends ParallelStatsDisabledIT {
 
-    static enum  TestTenantGroup {
-        tg1, tg2, tg3
-    }
-    protected static final String matcherScenario = 
".*scenario/.*test_mt_workload.*xml";
-    protected static final String matcherSchema = 
".*datamodel/.*test_schema_mt*.*sql";
+    protected static final String matcherScenario = 
".*scenario/.*test_tbl_workload.*xml";
+    protected static final String matcherSchema = 
".*datamodel/.*test_schema_tbl*.*sql";
 
     protected static PhoenixUtil util = PhoenixUtil.create(true);
     protected static Properties properties;
@@ -83,8 +71,4 @@ public class MultiTenantOperationBaseIT extends 
ParallelStatsDisabledIT {
         return XMLConfigParser.readDataModel(p);
     }
 
-    @AfterClass public static synchronized void tearDown() throws Exception {
-        dropNonSystemTables();
-    }
-
 }
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTestUtils.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTestUtils.java
new file mode 100644
index 0000000..873e061
--- /dev/null
+++ 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantTestUtils.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.pherf.workload.mt.tenantoperation;
+
+import com.lmax.disruptor.LifecycleAware;
+import com.lmax.disruptor.WorkHandler;
+import org.apache.phoenix.pherf.configuration.DataModel;
+import org.apache.phoenix.pherf.configuration.LoadProfile;
+import org.apache.phoenix.pherf.configuration.Scenario;
+import org.apache.phoenix.pherf.util.PhoenixUtil;
+import org.apache.phoenix.pherf.workload.Workload;
+import org.apache.phoenix.pherf.workload.mt.Operation;
+import org.apache.phoenix.pherf.workload.mt.OperationStats;
+import org.apache.phoenix.thirdparty.com.google.common.base.Function;
+import org.apache.phoenix.thirdparty.com.google.common.base.Supplier;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetAddress;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class MultiTenantTestUtils {
+    private static final Logger LOGGER = 
LoggerFactory.getLogger(MultiTenantTestUtils.class);
+    enum TestOperationGroup {
+        upsertOp, queryOp1, queryOp2, idleOp, udfOp
+    }
+
+    public void testWorkloadWithOneHandler(Properties properties, DataModel 
model,
+            String scenarioName, int expectedTenantGroups, int 
expectedOpGroups) throws Exception {
+
+        int numHandlers = 1;
+        int totalOperations = 50;
+        int perHandlerCount = 50;
+
+        ExecutorService executor = null;
+        try {
+            executor = Executors.newFixedThreadPool(numHandlers);
+            PhoenixUtil pUtil = PhoenixUtil.create();
+            for (Scenario scenario : model.getScenarios()) {
+                if (scenarioName != null && !scenarioName.isEmpty()
+                        && scenario.getName().compareTo(scenarioName) != 0) {
+                    continue;
+                }
+                LOGGER.debug(String.format("Testing %s", scenario.getName()));
+                LoadProfile loadProfile = scenario.getLoadProfile();
+
+                // Set the total number of operations for this load profile
+                loadProfile.setNumOperations(totalOperations);
+                TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model,
+                        scenario);
+                assertEquals("tenant group size is not as expected: ", 
expectedTenantGroups,
+                        loadProfile.getTenantDistribution().size());
+                assertEquals("operation group size from the factory is not as 
expected: ",
+                        expectedOpGroups, opFactory.getOperations().size());
+
+                // populate the handlers and countdown latches.
+                String handlerId = String
+                        .format("%s.%d", 
InetAddress.getLocalHost().getHostName(), numHandlers);
+                List<WorkHandler> workers = Lists.newArrayList();
+                Map<String, CountDownLatch> latches = Maps.newConcurrentMap();
+                workers.add(new EventCountingWorkHandler(opFactory, handlerId, 
latches));
+                latches.put(handlerId, new CountDownLatch(perHandlerCount));
+                // submit the workload
+                Workload workload = new TenantOperationWorkload(pUtil, model, 
scenario, workers,
+                        properties);
+                Future status = executor.submit(workload.execute());
+                // Just make sure there are no exceptions
+                status.get();
+
+                // Wait for the handlers to count down
+                for (Map.Entry<String, CountDownLatch> latch : 
latches.entrySet()) {
+                    assertTrue(latch.getValue().await(60, TimeUnit.SECONDS));
+                }
+            }
+        } finally {
+            if (executor != null) {
+                executor.shutdown();
+            }
+        }
+    }
+
+    public void testWorkloadWithManyHandlers(Properties properties, DataModel 
model,
+            String scenarioName, int expectedTenantGroups, int 
expectedOpGroups) throws Exception {
+
+        int numHandlers = 5;
+        int totalOperations = 500;
+        int perHandlerCount = 50;
+
+        ExecutorService executor = Executors.newFixedThreadPool(numHandlers);
+        PhoenixUtil pUtil = PhoenixUtil.create();
+        for (Scenario scenario : model.getScenarios()) {
+            if (scenarioName != null && !scenarioName.isEmpty()
+                    && scenario.getName().compareTo(scenarioName) != 0) {
+                continue;
+            }
+            LOGGER.debug(String.format("Testing %s", scenario.getName()));
+            LoadProfile loadProfile = scenario.getLoadProfile();
+
+            // Set the total number of operations for this load profile
+            loadProfile.setNumOperations(totalOperations);
+            TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model, scenario);
+            assertEquals("tenant group size is not as expected: ", 
expectedTenantGroups,
+                    loadProfile.getTenantDistribution().size());
+
+            assertEquals("operation group size from the factory is not as 
expected: ",
+                    expectedOpGroups, opFactory.getOperations().size());
+
+            // populate the handlers and countdown latches.
+            List<WorkHandler> workers = Lists.newArrayList();
+            Map<String, CountDownLatch> latches = Maps.newConcurrentMap();
+            for (int i = 0; i < numHandlers; i++) {
+                String handlerId = String
+                        .format("%s.%d", 
InetAddress.getLocalHost().getHostName(), i);
+                workers.add(new EventCountingWorkHandler(opFactory, handlerId, 
latches));
+                latches.put(handlerId, new CountDownLatch(perHandlerCount));
+            }
+            // submit the workload
+            Workload workload = new TenantOperationWorkload(pUtil, model, 
scenario, workers,
+                    properties);
+            Future status = executor.submit(workload.execute());
+            // Just make sure there are no exceptions
+            status.get();
+            // Wait for the handlers to count down
+            for (Map.Entry<String, CountDownLatch> latch : latches.entrySet()) 
{
+                assertTrue(latch.getValue().await(60, TimeUnit.SECONDS));
+            }
+        }
+        executor.shutdown();
+    }
+
+    public void testVariousOperations(Properties properties, DataModel model, 
String scenarioName,
+            int expectedTenantGroups, int expectedOpGroups) throws Exception {
+
+        int numRuns = 10;
+        int numOperations = 10;
+
+        PhoenixUtil pUtil = PhoenixUtil.create();
+        for (Scenario scenario : model.getScenarios()) {
+            if (scenarioName != null && !scenarioName.isEmpty()
+                    && scenario.getName().compareTo(scenarioName) != 0) {
+                continue;
+            }
+            LOGGER.debug(String.format("Testing %s", scenario.getName()));
+            LoadProfile loadProfile = scenario.getLoadProfile();
+            assertEquals("tenant group size is not as expected: ", 
expectedTenantGroups,
+                    loadProfile.getTenantDistribution().size());
+            assertEquals("operation group size is not as expected: ", 
expectedOpGroups,
+                    loadProfile.getOpDistribution().size());
+
+            TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model, scenario);
+            TenantOperationEventGenerator evtGen = new 
TenantOperationEventGenerator(
+                    opFactory.getOperations(), model, scenario);
+
+            assertEquals("operation group size from the factory is not as 
expected: ",
+                    expectedOpGroups, opFactory.getOperations().size());
+
+            int numRowsInserted = 0;
+            for (int i = 0; i < numRuns; i++) {
+                int ops = numOperations;
+                loadProfile.setNumOperations(ops);
+                while (ops-- > 0) {
+                    TenantOperationInfo info = evtGen.next();
+                    Supplier<Function<TenantOperationInfo, OperationStats>> 
opSupplier = opFactory
+                            .getOperationSupplier(info);
+                    OperationStats stats = opSupplier.get().apply(info);
+                    LOGGER.info(PhoenixUtil.getGSON().toJson(stats));
+                    if (info.getOperation().getType() == 
Operation.OperationType.PRE_RUN) continue;
+                    assertTrue(stats.getStatus() != -1);
+                    switch (TestOperationGroup
+                            .valueOf(info.getOperationGroupId())) {
+                    case upsertOp:
+                        assertTrue(opSupplier.getClass()
+                                
.isAssignableFrom(UpsertOperationSupplier.class));
+                        numRowsInserted += stats.getRowCount();
+                        break;
+                    case queryOp1:
+                    case queryOp2:
+                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
+                                
.isAssignableFrom(QueryOperationSupplier.class));
+
+                        // expected row count > 0
+                        // Since the same view/table is being used by many 
tests.
+                        // Keeping query return values would require lot of 
housekeeping
+                        assertTrue(stats.getRowCount() > 0);
+                        break;
+                    case idleOp:
+                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
+                                
.isAssignableFrom(IdleTimeOperationSupplier.class));
+                        assertEquals(0, stats.getRowCount());
+                        // expected think time (no-op) to be ~50ms
+                        assertTrue(40 < stats.getDurationInMs() && 
stats.getDurationInMs() < 60);
+                        break;
+                    case udfOp:
+                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
+                                
.isAssignableFrom(UserDefinedOperationSupplier.class));
+                        assertEquals(0, stats.getRowCount());
+                        break;
+                    default:
+                        Assert.fail();
+                    }
+                }
+            }
+        }
+    }
+
+    private static class EventCountingWorkHandler
+            implements 
WorkHandler<TenantOperationWorkload.TenantOperationEvent>, LifecycleAware {
+        private final String handlerId;
+        private final TenantOperationFactory tenantOperationFactory;
+        private final Map<String, CountDownLatch> latches;
+
+        public EventCountingWorkHandler(TenantOperationFactory 
tenantOperationFactory,
+                String handlerId, Map<String, CountDownLatch> latches) {
+            this.handlerId = handlerId;
+            this.tenantOperationFactory = tenantOperationFactory;
+            this.latches = latches;
+        }
+
+        @Override public void onStart() {
+        }
+
+        @Override public void onShutdown() {
+        }
+
+        @Override public void 
onEvent(TenantOperationWorkload.TenantOperationEvent event)
+                throws Exception {
+            TenantOperationInfo input = event.getTenantOperationInfo();
+            Supplier<Function<TenantOperationInfo, OperationStats>>
+                    opSupplier
+                    = tenantOperationFactory.getOperationSupplier(input);
+            OperationStats stats = opSupplier.get().apply(input);
+            LOGGER.info(PhoenixUtil.getGSON().toJson(stats));
+            assertEquals(0, stats.getStatus());
+            latches.get(handlerId).countDown();
+        }
+    }
+
+}
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantViewOperationBaseIT.java
similarity index 91%
rename from 
phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
rename to 
phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantViewOperationBaseIT.java
index f200381..ba71977 100644
--- 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantOperationBaseIT.java
+++ 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/MultiTenantViewOperationBaseIT.java
@@ -19,7 +19,6 @@
 
 package org.apache.phoenix.pherf.workload.mt.tenantoperation;
 
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.pherf.PherfConstants;
@@ -28,7 +27,6 @@ import org.apache.phoenix.pherf.configuration.DataModel;
 import org.apache.phoenix.pherf.configuration.XMLConfigParser;
 import org.apache.phoenix.pherf.schema.SchemaReader;
 import org.apache.phoenix.pherf.util.PhoenixUtil;
-import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
@@ -44,14 +42,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 @Category(NeedsOwnMiniClusterTest.class)
-public class MultiTenantOperationBaseIT extends ParallelStatsDisabledIT {
-    enum TestOperationGroup {
-        upsertOp, queryOp1, queryOp2, idleOp, udfOp
-    }
-
-    static enum  TestTenantGroup {
-        tg1, tg2, tg3
-    }
+public class MultiTenantViewOperationBaseIT extends ParallelStatsDisabledIT {
     protected static final String matcherScenario = 
".*scenario/.*test_mt_workload.*xml";
     protected static final String matcherSchema = 
".*datamodel/.*test_schema_mt*.*sql";
 
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationIT.java
deleted file mode 100644
index 737080a..0000000
--- 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationIT.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.phoenix.pherf.workload.mt.tenantoperation;
-
-import org.apache.phoenix.pherf.configuration.DataModel;
-import org.apache.phoenix.pherf.configuration.LoadProfile;
-import org.apache.phoenix.pherf.configuration.Scenario;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
-import org.apache.phoenix.pherf.workload.mt.Operation;
-import org.apache.phoenix.pherf.workload.mt.OperationStats;
-import org.apache.phoenix.thirdparty.com.google.common.base.Function;
-import org.apache.phoenix.thirdparty.com.google.common.base.Supplier;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests focused on tenant operations and their validations
- */
-public class TenantOperationIT extends MultiTenantOperationBaseIT {
-    private static final Logger LOGGER = 
LoggerFactory.getLogger(TenantOperationIT.class);
-
-    @Test
-    public void testVariousOperations() throws Exception {
-        int numTenantGroups = 3;
-        int numOpGroups = 5;
-        int numRuns = 10;
-        int numOperations = 10;
-
-        PhoenixUtil pUtil = PhoenixUtil.create();
-        DataModel model = readTestDataModel("/scenario/test_mt_workload.xml");
-        for (Scenario scenario : model.getScenarios()) {
-            LOGGER.debug(String.format("Testing %s", scenario.getName()));
-            LoadProfile loadProfile = scenario.getLoadProfile();
-            assertEquals("tenant group size is not as expected: ",
-                    numTenantGroups, 
loadProfile.getTenantDistribution().size());
-            assertEquals("operation group size is not as expected: ",
-                    numOpGroups, loadProfile.getOpDistribution().size());
-
-            TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model, scenario);
-            TenantOperationEventGenerator evtGen = new 
TenantOperationEventGenerator(
-                    opFactory.getOperations(), model, scenario);
-
-            assertEquals("operation group size from the factory is not as 
expected: ",
-                    numOpGroups, opFactory.getOperations().size());
-
-            int numRowsInserted = 0;
-            for (int i = 0; i < numRuns; i++) {
-                int ops = numOperations;
-                loadProfile.setNumOperations(ops);
-                while (ops-- > 0) {
-                    TenantOperationInfo info = evtGen.next();
-                    Supplier<Function<TenantOperationInfo, OperationStats>> 
opSupplier =
-                            opFactory.getOperationSupplier(info);
-                    OperationStats stats = opSupplier.get().apply(info);
-                    LOGGER.info(pUtil.getGSON().toJson(stats));
-                    if (info.getOperation().getType() == 
Operation.OperationType.PRE_RUN) continue;
-                    switch 
(TestOperationGroup.valueOf(info.getOperationGroupId())) {
-                    case upsertOp:
-                        assertTrue(opSupplier.getClass()
-                                
.isAssignableFrom(UpsertOperationSupplier.class));
-                        numRowsInserted += stats.getRowCount();
-                        break;
-                    case queryOp1:
-                    case queryOp2:
-                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
-                                
.isAssignableFrom(QueryOperationSupplier.class));
-
-                        // expected row count == num rows inserted
-                        assertEquals(numRowsInserted, stats.getRowCount());
-                        break;
-                    case idleOp:
-                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
-                                
.isAssignableFrom(IdleTimeOperationSupplier.class));
-                        assertEquals(0, stats.getRowCount());
-                        // expected think time (no-op) to be ~50ms
-                        assertTrue(40 < stats.getDurationInMs() && 
stats.getDurationInMs() < 60);
-                        break;
-                    case udfOp:
-                        
assertTrue(opFactory.getOperationSupplier(info).getClass()
-                                
.isAssignableFrom(UserDefinedOperationSupplier.class));
-                        assertEquals(0, stats.getRowCount());
-                        break;
-                    default:
-                        Assert.fail();
-                    }
-                }
-            }
-        }
-    }
-}
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationWorkloadIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationWorkloadIT.java
deleted file mode 100644
index 4918d9c..0000000
--- 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationWorkloadIT.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.phoenix.pherf.workload.mt.tenantoperation;
-
-import org.apache.phoenix.thirdparty.com.google.common.base.Function;
-import org.apache.phoenix.thirdparty.com.google.common.base.Supplier;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-import com.lmax.disruptor.LifecycleAware;
-import com.lmax.disruptor.WorkHandler;
-import org.apache.phoenix.pherf.configuration.DataModel;
-import org.apache.phoenix.pherf.configuration.Scenario;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
-import org.apache.phoenix.pherf.workload.Workload;
-import org.apache.phoenix.pherf.workload.mt.OperationStats;
-import 
org.apache.phoenix.pherf.workload.mt.tenantoperation.TenantOperationWorkload.TenantOperationEvent;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetAddress;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests focused on tenant operation workloads {@link TenantOperationWorkload}
- * and workload handlers {@link WorkHandler}
- */
-public class TenantOperationWorkloadIT extends MultiTenantOperationBaseIT {
-
-    private static class EventCountingWorkHandler implements
-            WorkHandler<TenantOperationEvent>, LifecycleAware {
-        private final String handlerId;
-        private final TenantOperationFactory tenantOperationFactory;
-        private static final Logger LOGGER = 
LoggerFactory.getLogger(EventCountingWorkHandler.class);
-        private final Map<String, CountDownLatch> latches;
-        public EventCountingWorkHandler(TenantOperationFactory 
tenantOperationFactory,
-                String handlerId, Map<String, CountDownLatch> latches) {
-            this.handlerId = handlerId;
-            this.tenantOperationFactory = tenantOperationFactory;
-            this.latches = latches;
-        }
-
-        @Override public void onStart() {}
-
-        @Override public void onShutdown() {}
-
-        @Override public void onEvent(TenantOperationEvent event)
-                throws Exception {
-            TenantOperationInfo input = event.getTenantOperationInfo();
-            Supplier<Function<TenantOperationInfo, OperationStats>>
-                    opSupplier = 
tenantOperationFactory.getOperationSupplier(input);
-            OperationStats stats = opSupplier.get().apply(input);
-            
LOGGER.info(tenantOperationFactory.getPhoenixUtil().getGSON().toJson(stats));
-            assertEquals(0, stats.getStatus());
-            latches.get(handlerId).countDown();
-        }
-    }
-
-    @Test
-    public void testWorkloadWithOneHandler() throws Exception {
-        int numOpGroups = 5;
-        int numHandlers = 1;
-        int totalOperations = 50;
-        int perHandlerCount = 50;
-
-        ExecutorService executor = null;
-        try {
-            executor = Executors.newFixedThreadPool(numHandlers);
-            PhoenixUtil pUtil = PhoenixUtil.create();
-            DataModel model = 
readTestDataModel("/scenario/test_mt_workload.xml");
-            for (Scenario scenario : model.getScenarios()) {
-                // Set the total number of operations for this load profile
-                scenario.getLoadProfile().setNumOperations(totalOperations);
-                TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model, scenario);
-                assertEquals("operation group size from the factory is not as 
expected: ",
-                        numOpGroups, opFactory.getOperations().size());
-
-                // populate the handlers and countdown latches.
-                String handlerId = String.format("%s.%d", 
InetAddress.getLocalHost().getHostName(), numHandlers);
-                List<WorkHandler> workers = Lists.newArrayList();
-                Map<String, CountDownLatch> latches = Maps.newConcurrentMap();
-                workers.add(new EventCountingWorkHandler(opFactory, handlerId, 
latches));
-                latches.put(handlerId, new CountDownLatch(perHandlerCount));
-                // submit the workload
-                Workload workload = new TenantOperationWorkload(pUtil, model, 
scenario, workers, properties);
-                Future status = executor.submit(workload.execute());
-                // Just make sure there are no exceptions
-                status.get();
-
-                // Wait for the handlers to count down
-                for (Map.Entry<String, CountDownLatch> latch : 
latches.entrySet()) {
-                    assertTrue(latch.getValue().await(60, TimeUnit.SECONDS));
-                }
-            }
-        } finally {
-            if (executor != null) {
-                executor.shutdown();
-            }
-        }
-    }
-
-    @Test
-    public void testWorkloadWithManyHandlers() throws Exception {
-        int numOpGroups = 5;
-        int numHandlers = 5;
-        int totalOperations = 500;
-        int perHandlerCount = 50;
-
-        ExecutorService executor = Executors.newFixedThreadPool(numHandlers);
-        PhoenixUtil pUtil = PhoenixUtil.create();
-        DataModel model = readTestDataModel("/scenario/test_mt_workload.xml");
-        for (Scenario scenario : model.getScenarios()) {
-            // Set the total number of operations for this load profile
-            scenario.getLoadProfile().setNumOperations(totalOperations);
-            TenantOperationFactory opFactory = new 
TenantOperationFactory(pUtil, model, scenario);
-            assertEquals("operation group size from the factory is not as 
expected: ",
-                    numOpGroups, opFactory.getOperations().size());
-
-            // populate the handlers and countdown latches.
-            List<WorkHandler> workers = Lists.newArrayList();
-            Map<String, CountDownLatch> latches = Maps.newConcurrentMap();
-            for (int i=0;i<numHandlers;i++) {
-                String handlerId = String.format("%s.%d", 
InetAddress.getLocalHost().getHostName(), i);
-                workers.add(new EventCountingWorkHandler(opFactory, handlerId, 
latches));
-                latches.put(handlerId, new CountDownLatch(perHandlerCount));
-            }
-            // submit the workload
-            Workload workload = new TenantOperationWorkload(pUtil, model, 
scenario, workers, properties);
-            Future status = executor.submit(workload.execute());
-            // Just make sure there are no exceptions
-            status.get();
-            // Wait for the handlers to count down
-            for (Map.Entry<String, CountDownLatch> latch : latches.entrySet()) 
{
-                assertTrue(latch.getValue().await(60, TimeUnit.SECONDS));
-            }
-        }
-        executor.shutdown();
-    }
-
-}
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantTableOperationWorkloadIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantTableOperationWorkloadIT.java
new file mode 100644
index 0000000..dc98848
--- /dev/null
+++ 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantTableOperationWorkloadIT.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.phoenix.pherf.workload.mt.tenantoperation;
+
+import com.lmax.disruptor.WorkHandler;
+import org.apache.phoenix.pherf.configuration.DataModel;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests focused on tenant tablee operations and their validations
+ * Tests focused on tenant operation workloads {@link TenantOperationWorkload}
+ * and workload handlers {@link WorkHandler}
+ */
+public class TenantTableOperationWorkloadIT extends 
MultiTenantTableOperationBaseIT {
+    private final MultiTenantTestUtils multiTenantTestUtils = new 
MultiTenantTestUtils();
+    private final DataModel model;
+
+    public TenantTableOperationWorkloadIT() throws Exception {
+        model = readTestDataModel("/scenario/test_tbl_workload.xml");
+    }
+
+    @Test public void testVariousOperations() throws Exception {
+        int expectedTenantGroups = 1;
+        int expectedWriteOpGroups = 1;
+        int expectedReadOpGroups = 2;
+        multiTenantTestUtils.testVariousOperations(properties, model, 
"TEST_TABLE_WRITE",
+                expectedTenantGroups, expectedWriteOpGroups);
+        multiTenantTestUtils.testVariousOperations(properties, model, 
"TEST_TABLE_READ",
+                expectedTenantGroups, expectedReadOpGroups);
+    }
+
+    @Test public void testWorkloadWithOneHandler() throws Exception {
+        int expectedTenantGroups = 1;
+        int expectedWriteOpGroups = 1;
+        int expectedReadOpGroups = 2;
+        multiTenantTestUtils.testWorkloadWithOneHandler(properties, model, 
"TEST_TABLE_WRITE",
+                expectedTenantGroups, expectedWriteOpGroups);
+        multiTenantTestUtils.testWorkloadWithOneHandler(properties, model, 
"TEST_TABLE_READ",
+                expectedTenantGroups, expectedReadOpGroups);
+    }
+
+    @Test public void testWorkloadWithManyHandlers() throws Exception {
+        int expectedTenantGroups = 1;
+        int expectedWriteOpGroups = 1;
+        int expectedReadOpGroups = 2;
+        multiTenantTestUtils.testWorkloadWithManyHandlers(properties, model, 
"TEST_TABLE_WRITE",
+                expectedTenantGroups, expectedWriteOpGroups);
+        multiTenantTestUtils.testWorkloadWithManyHandlers(properties, model, 
"TEST_TABLE_READ",
+                expectedTenantGroups, expectedReadOpGroups);
+    }
+
+}
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantViewOperationWorkloadIT.java
 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantViewOperationWorkloadIT.java
new file mode 100644
index 0000000..66bbe38
--- /dev/null
+++ 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantViewOperationWorkloadIT.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.phoenix.pherf.workload.mt.tenantoperation;
+
+import com.lmax.disruptor.WorkHandler;
+import org.apache.phoenix.pherf.configuration.DataModel;
+import org.junit.Test;
+
+/**
+ * Tests focused on tenant view operations and their validations
+ * Tests focused on tenant operation workloads {@link TenantOperationWorkload}
+ * and workload handlers {@link WorkHandler}
+ */
+public class TenantViewOperationWorkloadIT extends 
MultiTenantViewOperationBaseIT {
+
+    private final MultiTenantTestUtils multiTenantTestUtils = new 
MultiTenantTestUtils();
+    private final DataModel model;
+
+    public TenantViewOperationWorkloadIT() throws Exception {
+        model = readTestDataModel("/scenario/test_mt_workload.xml");
+    }
+
+    @Test public void testVariousOperations() throws Exception {
+        int expectedTenantGroups = 3;
+        int expectedOpGroups = 5;
+        multiTenantTestUtils.testVariousOperations(properties, model, null,
+                expectedTenantGroups, expectedOpGroups);
+    }
+
+    @Test public void testWorkloadWithOneHandler() throws Exception {
+        int expectedTenantGroups = 3;
+        int expectedOpGroups = 5;
+        multiTenantTestUtils.testWorkloadWithOneHandler(properties, model, 
null,
+                expectedTenantGroups, expectedOpGroups);
+
+    }
+
+    @Test public void testWorkloadWithManyHandlers() throws Exception {
+        int expectedTenantGroups = 3;
+        int expectedOpGroups = 5;
+        multiTenantTestUtils.testWorkloadWithManyHandlers(properties, model, 
null,
+                expectedTenantGroups, expectedOpGroups);
+    }
+}
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java
index dfbe9e6..810a742 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java
@@ -19,8 +19,11 @@
 package org.apache.phoenix.pherf.configuration;
 
 import org.apache.phoenix.pherf.rules.RulesApplier;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 
 import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
 import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -30,7 +33,7 @@ public class Upsert {
     private String id;
     private String upsertGroup;
     private String statement;
-    private List<Column> columns;
+    private List<Column> column;
     private boolean useGlobalConnection;
     private Pattern pattern;
     private long timeoutDuration = Long.MAX_VALUE;
@@ -90,12 +93,13 @@ public class Upsert {
         this.id = id;
     }
 
-    public List<Column> getColumns() {
-        return columns;
+    public List<Column> getColumn() {
+        if (column == null) return Lists.newArrayList();
+        return column;
     }
 
-    public void setColumns(List<Column> columns) {
-        this.columns = columns;
+    public void setColumn(List<Column> column) {
+        this.column = column;
     }
 
     @XmlAttribute
@@ -124,12 +128,4 @@ public class Upsert {
         // normalize statement - merge all consecutive spaces into one
         this.statement = statement.replaceAll("\\s+", " ");
     }
-
-    public List<Column> getColumn() {
-        return columns;
-    }
-
-    public void setColumn(List<Column> columns) {
-        this.columns = columns;
-    }
 }
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/QueryOperationSupplier.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/QueryOperationSupplier.java
index 99188ce..e149661 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/QueryOperationSupplier.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/QueryOperationSupplier.java
@@ -69,7 +69,7 @@ public class QueryOperationSupplier extends 
BaseOperationSupplier {
 
                 String opName = String.format("%s:%s:%s:%s:%s", scenarioName, 
tableName,
                         opGroup, tenantGroup, input.getTenantId());
-                LOGGER.info("\nExecuting query " + query.getStatement());
+                LOGGER.debug("\nExecuting query " + query.getStatement());
 
                 long startTime = 0;
                 int status = 0;
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationFactory.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationFactory.java
index 6e86e14..ee74fad 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationFactory.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/TenantOperationFactory.java
@@ -18,6 +18,7 @@
 
 package org.apache.phoenix.pherf.workload.mt.tenantoperation;
 
+import org.apache.phoenix.pherf.configuration.Column;
 import org.apache.phoenix.thirdparty.com.google.common.base.Charsets;
 import org.apache.phoenix.thirdparty.com.google.common.base.Function;
 import org.apache.phoenix.thirdparty.com.google.common.base.Supplier;
@@ -49,6 +50,8 @@ import org.slf4j.LoggerFactory;
 
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Factory class for operation suppliers.
@@ -94,6 +97,7 @@ public class TenantOperationFactory {
             Maps.newEnumMap(Operation.OperationType.class);
 
     private final BloomFilter<TenantView> tenantsLoaded;
+    private ReadWriteLock rwLock = new ReentrantReadWriteLock();
 
     public TenantOperationFactory(PhoenixUtil phoenixUtil, DataModel model, 
Scenario scenario) {
         this.phoenixUtil = phoenixUtil;
@@ -269,48 +273,15 @@ public class TenantOperationFactory {
 
         // Check if pre run ddls are needed.
         if (!tenantsLoaded.mightContain(tenantView)) {
-
-            Supplier<Function<TenantOperationInfo, OperationStats>> 
preRunOpSupplier =
-                    operationSuppliers.get(Operation.OperationType.PRE_RUN);
-            // Check if the scenario has a PRE_RUN operation.
-            if (preRunOpSupplier != null) {
-                // Initialize the tenant using the pre scenario ddls.
-                final PreScenarioOperation
-                        operation = new PreScenarioOperation() {
-                    @Override public List<Ddl> getPreScenarioDdls() {
-                        List<Ddl> ddls = scenario.getPreScenarioDdls();
-                        return ddls == null ? Lists.<Ddl>newArrayList() : ddls;
-                    }
-
-                    @Override public String getId() {
-                        return OperationType.PRE_RUN.name();
-                    }
-
-                    @Override public OperationType getType() {
-                        return OperationType.PRE_RUN;
-                    }
-                };
-                // Initialize with the pre run operation.
-                TenantOperationInfo preRunSample = new TenantOperationInfo(
-                        input.getModelName(),
-                        input.getScenarioName(),
-                        input.getTableName(),
-                        input.getTenantGroupId(),
-                        Operation.OperationType.PRE_RUN.name(),
-                        input.getTenantId(), operation);
-
-                try {
-                    // Run the initialization operation.
-                    OperationStats stats = 
preRunOpSupplier.get().apply(preRunSample);
-                    LOGGER.info(phoenixUtil.getGSON().toJson(stats));
-                } catch (Exception e) {
-                    LOGGER.error(String.format("Failed to initialize tenant. 
[%s, %s] ",
-                            tenantView.tenantId,
-                            tenantView.viewName), e);
+            rwLock.writeLock().lock();
+            try {
+                if (!tenantsLoaded.mightContain(tenantView)) {
+                    executePreRunOpsForTenant(tenantView, input);
+                    tenantsLoaded.put(tenantView);
                 }
+            } finally {
+                rwLock.writeLock().unlock();
             }
-
-            tenantsLoaded.put(tenantView);
         }
 
         Supplier<Function<TenantOperationInfo, OperationStats>> opSupplier =
@@ -321,4 +292,47 @@ public class TenantOperationFactory {
         return opSupplier;
     }
 
+    private void executePreRunOpsForTenant(TenantView tenantView, 
TenantOperationInfo input) {
+
+        Supplier<Function<TenantOperationInfo, OperationStats>> 
preRunOpSupplier =
+                operationSuppliers.get(Operation.OperationType.PRE_RUN);
+        // Check if the scenario has a PRE_RUN operation.
+        if (preRunOpSupplier != null) {
+            // Initialize the tenant using the pre scenario ddls.
+            final PreScenarioOperation
+                    operation = new PreScenarioOperation() {
+                @Override public List<Ddl> getPreScenarioDdls() {
+                    List<Ddl> ddls = scenario.getPreScenarioDdls();
+                    return ddls == null ? Lists.<Ddl>newArrayList() : ddls;
+                }
+
+                @Override public String getId() {
+                    return OperationType.PRE_RUN.name();
+                }
+
+                @Override public OperationType getType() {
+                    return OperationType.PRE_RUN;
+                }
+            };
+            // Initialize with the pre run operation.
+            TenantOperationInfo preRunSample = new TenantOperationInfo(
+                    input.getModelName(),
+                    input.getScenarioName(),
+                    input.getTableName(),
+                    input.getTenantGroupId(),
+                    Operation.OperationType.PRE_RUN.name(),
+                    input.getTenantId(), operation);
+
+            try {
+                // Run the initialization operation.
+                OperationStats stats = 
preRunOpSupplier.get().apply(preRunSample);
+                LOGGER.info(phoenixUtil.getGSON().toJson(stats));
+            } catch (Exception e) {
+                LOGGER.error(String.format("Failed to initialize tenant. [%s, 
%s] ",
+                        tenantView.tenantId,
+                        tenantView.viewName), e);
+            }
+        }
+    }
+
 }
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/UpsertOperationSupplier.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/UpsertOperationSupplier.java
index 30f953b..f7ff24a 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/UpsertOperationSupplier.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/tenantoperation/UpsertOperationSupplier.java
@@ -37,17 +37,19 @@ import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.text.SimpleDateFormat;
 import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * A supplier of {@link Function} that takes {@link UpsertOperation} as an 
input
  */
 class UpsertOperationSupplier extends BaseOperationSupplier {
     private static final Logger LOGGER = 
LoggerFactory.getLogger(UpsertOperationSupplier.class);
+    private ReadWriteLock rwLock = new ReentrantReadWriteLock();
 
     public UpsertOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, 
Scenario scenario) {
         super(phoenixUtil, model, scenario);
     }
-
     @Override
     public Function<TenantOperationInfo, OperationStats> get() {
         return new Function<TenantOperationInfo, OperationStats>() {
@@ -65,7 +67,7 @@ class UpsertOperationSupplier extends BaseOperationSupplier {
                 final String opGroup = input.getOperationGroupId();
                 final String tableName = input.getTableName();
                 final String scenarioName = input.getScenarioName();
-                final List<Column> columns = upsert.getColumn();
+
                 // TODO:
                 // Ideally the fact that the op needs to executed using global 
connection
                 // needs to be built into the framework and injected during 
event generation.
@@ -73,23 +75,39 @@ class UpsertOperationSupplier extends BaseOperationSupplier 
{
 
                 final boolean isTenantGroupGlobal = 
(tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0);
                 final String tenantId = isTenantGroupGlobal || 
upsert.isUseGlobalConnection() ? null : input.getTenantId();
-
                 final String opName = String.format("%s:%s:%s:%s:%s",
                         scenarioName, tableName, opGroup, tenantGroup, 
input.getTenantId());
-
                 long rowsCreated = 0;
                 long startTime = 0, duration, totalDuration;
                 int status = 0;
                 SimpleDateFormat simpleDateFormat = new 
SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+
                 try (Connection connection = 
phoenixUtil.getConnection(tenantId)) {
-                    String sql = phoenixUtil.buildSql(columns, tableName);
+                    // If list of columns has not been not provided or lazy 
loaded
+                    // then use the metadata call to get the column list.
+                    if (upsert.getColumn().isEmpty()) {
+                        rwLock.writeLock().lock();
+                        try {
+                            if (upsert.getColumn().isEmpty()) {
+                                LOGGER.info("Fetching columns metadata from db 
for operation : " + opName);
+                                List<Column> allCols = 
phoenixUtil.getColumnsFromPhoenix(scenario.getSchemaName(),
+                                        
scenario.getTableNameWithoutSchemaName(),
+                                        connection);
+                                upsert.setColumn(allCols);
+                            }
+                        } finally {
+                            rwLock.writeLock().unlock();
+                        }
+                    }
+
+                    String sql = phoenixUtil.buildSql(upsert.getColumn(), 
tableName);
+                    LOGGER.info("Operation " + opName + " executing " + sql);
                     startTime = EnvironmentEdgeManager.currentTimeMillis();
                     PreparedStatement stmt = null;
                     try {
                         stmt = connection.prepareStatement(sql);
                         for (long i = rowCount; i > 0; i--) {
-                            LOGGER.debug("Operation " + opName + " executing 
");
-                            stmt = phoenixUtil.buildStatement(rulesApplier, 
scenario, columns, stmt, simpleDateFormat);
+                            stmt = phoenixUtil.buildStatement(rulesApplier, 
scenario, upsert.getColumn(), stmt, simpleDateFormat);
                             if (useBatchApi) {
                                 stmt.addBatch();
                             } else {
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
index 531af26..a4b7648 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
@@ -18,15 +18,10 @@
 
 package org.apache.phoenix.pherf;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.phoenix.pherf.result.ResultUtil;
-import org.apache.phoenix.pherf.workload.mt.tenantoperation.TenantOperationIT;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-import java.io.File;
 import java.util.Properties;
 
 public class ResultBaseTest {
diff --git 
a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql 
b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_base_table.sql
similarity index 94%
copy from phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql
copy to phoenix-pherf/src/test/resources/datamodel/test_schema_mt_base_table.sql
index b6791bf..184180a 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_base_table.sql
@@ -15,7 +15,7 @@
   -- See the License for the specific language governing permissions and
   -- limitations under the License.
 */
-CREATE TABLE IF NOT EXISTS PHERF.TEST_MULTI_TENANT_TABLE (
+CREATE TABLE IF NOT EXISTS PHERF.TEST_BASE_TABLE (
     TENANT_ID CHAR(15) NOT NULL,
     IDENTIFIER CHAR(3) NOT NULL,
     ID CHAR(15) NOT NULL,
diff --git a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql 
b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view1.sql
similarity index 88%
copy from phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql
copy to phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view1.sql
index ad25e9b..51080dd 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view1.sql
@@ -16,7 +16,7 @@
   -- limitations under the License.
 */
 
-CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW (
+CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW1 (
     GID CHAR(15) NOT NULL,
     FIELD1 VARCHAR,
     OTHER_INT INTEGER
@@ -24,4 +24,4 @@ CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW (
     (
         GID
     )
-) AS SELECT * FROM PHERF.TEST_MULTI_TENANT_TABLE WHERE IDENTIFIER = 'EV1'
+) AS SELECT * FROM PHERF.TEST_BASE_TABLE WHERE IDENTIFIER = 'EV1'
diff --git a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql 
b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view2.sql
similarity index 88%
rename from phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql
rename to phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view2.sql
index ad25e9b..a435eb7 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_view2.sql
@@ -16,7 +16,7 @@
   -- limitations under the License.
 */
 
-CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW (
+CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW2 (
     GID CHAR(15) NOT NULL,
     FIELD1 VARCHAR,
     OTHER_INT INTEGER
@@ -24,4 +24,4 @@ CREATE VIEW IF NOT EXISTS PHERF.TEST_GLOBAL_VIEW (
     (
         GID
     )
-) AS SELECT * FROM PHERF.TEST_MULTI_TENANT_TABLE WHERE IDENTIFIER = 'EV1'
+) AS SELECT * FROM PHERF.TEST_BASE_TABLE WHERE IDENTIFIER = 'EV2'
diff --git 
a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql 
b/phoenix-pherf/src/test/resources/datamodel/test_schema_tbl_simple.sql
similarity index 76%
rename from phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql
rename to phoenix-pherf/src/test/resources/datamodel/test_schema_tbl_simple.sql
index b6791bf..8b76820 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema_mt_table.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema_tbl_simple.sql
@@ -16,16 +16,18 @@
   -- limitations under the License.
 */
 CREATE TABLE IF NOT EXISTS PHERF.TEST_MULTI_TENANT_TABLE (
-    TENANT_ID CHAR(15) NOT NULL,
-    IDENTIFIER CHAR(3) NOT NULL,
-    ID CHAR(15) NOT NULL,
-    CREATED_DATE DATE,
-    FIELD VARCHAR,
-    SOME_INT INTEGER
+    HOST CHAR(2) NOT NULL,
+    DOMAIN VARCHAR NOT NULL,
+    FEATURE VARCHAR NOT NULL,
+    DATE DATE NOT NULL,
+    USAGE.CORE BIGINT,
+    USAGE.DB BIGINT,
+    STATS.ACTIVE_VISITOR INTEGER
     CONSTRAINT PK PRIMARY KEY
     (
-        TENANT_ID,
-        IDENTIFIER,
-        ID
+        HOST,
+        DOMAIN,
+        FEATURE,
+        DATE
     )
-) VERSIONS=1,MULTI_TENANT=true
+) VERSIONS=1, MULTI_TENANT=true
diff --git a/phoenix-pherf/src/test/resources/scenario/test_mt_workload.xml 
b/phoenix-pherf/src/test/resources/scenario/test_mt_workload.xml
index d3b83a2..00d613b 100644
--- a/phoenix-pherf/src/test/resources/scenario/test_mt_workload.xml
+++ b/phoenix-pherf/src/test/resources/scenario/test_mt_workload.xml
@@ -45,6 +45,26 @@
             <name>GENERAL_INTEGER</name>
         </column>
         <column>
+            <type>INTEGER</type>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <name>INT_ID</name>
+        </column>
+        <column>
+            <type>DATE</type>
+            <!--SEQUENTIAL is unsupported for DATE -->
+            <dataSequence>RANDOM</dataSequence>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <useCurrentDate>true</useCurrentDate>
+            <name>GENERAL_DATE</name>
+        </column>
+        <column>
             <type>CHAR</type>
             <length>3</length>
             <userDefined>true</userDefined>
@@ -63,9 +83,14 @@
                 </datavalue>
             </valuelist>
         </column>
+        <column>
+            <type>CHAR</type>
+            <length>3</length>
+            <name>IDENTIFIER</name>
+        </column>
     </datamapping>
     <scenarios>
-        <scenario tableName="PHERF.EVT_1" name="EVT_1">
+        <scenario tableName="PHERF.EVT_1" name="EVT_11">
             <loadProfile>
                 <batchSize>1</batchSize>
                 <numOperations>10</numOperations>
@@ -80,7 +105,7 @@
                 <opDistribution id="udfOp" weight="20"></opDistribution>
             </loadProfile>
             <preScenarioDdls>
-                <ddl statement="CREATE VIEW IF NOT EXISTS PHERF.EVT_1 (ZID 
CHAR(15), TYPE VARCHAR) AS SELECT * FROM PHERF.TEST_GLOBAL_VIEW" />
+                <ddl statement="CREATE VIEW IF NOT EXISTS PHERF.EVT_1 (ZID 
CHAR(15), TYPE VARCHAR) AS SELECT * FROM PHERF.TEST_GLOBAL_VIEW1" />
             </preScenarioDdls>
 
             <upserts>
@@ -131,5 +156,71 @@
                 </udf>
             </udfs>
         </scenario>
+        <scenario tableName="PHERF.EVT_2" name="EVT_21">
+            <loadProfile>
+                <batchSize>1</batchSize>
+                <numOperations>10</numOperations>
+                <!-- Case 1 : Upsert Operation test -->
+                <tenantDistribution id="tg1" weight="100" 
numTenants="1"></tenantDistribution>
+                <tenantDistribution id="tg2" weight="0" 
numTenants="0"></tenantDistribution>
+                <tenantDistribution id="tg3" weight="0" 
numTenants="0"></tenantDistribution>
+                <opDistribution id="upsertOp" weight="20"></opDistribution>
+                <opDistribution id="queryOp1" weight="20"></opDistribution>
+                <opDistribution id="queryOp2" weight="20"></opDistribution>
+                <opDistribution id="idleOp" weight="20"></opDistribution>
+                <opDistribution id="udfOp" weight="20"></opDistribution>
+            </loadProfile>
+            <preScenarioDdls>
+                <ddl statement="CREATE VIEW IF NOT EXISTS PHERF.EVT_2 (INT_ID 
INTEGER, TYPE VARCHAR) AS SELECT * FROM PHERF.TEST_GLOBAL_VIEW2" />
+            </preScenarioDdls>
+
+            <upserts>
+                <upsert id="upsertOp">
+                <column>
+                    <type>CHAR</type>
+                    <name>ID</name>
+                </column>
+                <column>
+                    <type>INTEGER</type>
+                    <name>SOME_INT</name>
+                </column>
+                <column>
+                    <type>CHAR</type>
+                    <name>GID</name>
+                </column>
+                <column>
+                    <type>VARCHAR</type>
+                    <name>FIELD1</name>
+                </column>
+                <column>
+                    <type>INTEGER</type>
+                    <name>OTHER_INT</name>
+                </column>
+                <column>
+                    <type>INTEGER</type>
+                    <name>INT_ID</name>
+                </column>
+                <column>
+                    <type>CHAR</type>
+                    <name>TYPE</name>
+                </column>
+                </upsert>
+            </upserts>
+
+            <querySet>
+                <query id="queryOp1" statement="select count(*) from 
PHERF.EVT_2"/>
+                <query id="queryOp2" statement="select * from PHERF.EVT_2"/>
+            </querySet>
+            <idleTimes>
+                <idleTime id="idleOp" idleTime="50"/>
+            </idleTimes>
+            <udfs>
+                <udf id="udfOp" >
+                    
<clazzName>org.apache.phoenix.pherf.ConfigurationParserTest.TestUDF</clazzName>
+                    <args>Hello</args>
+                    <args>World</args>
+                </udf>
+            </udfs>
+        </scenario>
     </scenarios>
 </datamodel>
diff --git a/phoenix-pherf/src/test/resources/scenario/test_tbl_workload.xml 
b/phoenix-pherf/src/test/resources/scenario/test_tbl_workload.xml
new file mode 100644
index 0000000..286dcf8
--- /dev/null
+++ b/phoenix-pherf/src/test/resources/scenario/test_tbl_workload.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~   or more contributor license agreements.  See the NOTICE file
+  ~   distributed with this work for additional information
+  ~   regarding copyright ownership.  The ASF licenses this file
+  ~   to you under the Apache License, Version 2.0 (the
+  ~   "License"); you may not use this file except in compliance
+  ~   with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~   Unless required by applicable law or agreed to in writing, software
+  ~   distributed under the License is distributed on an "AS IS" BASIS,
+  ~   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~   See the License for the specific language governing permissions and
+  ~   limitations under the License.
+  -->
+
+<datamodel name="TEST_MT_TABLE_MODEL">
+    <datamapping>
+        <column>
+            <type>CHAR</type>
+            <length>2</length>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <name>HOST</name>
+            <valuelist>
+                <!-- Distributes according to specified values. These must 
total 100 -->
+                <!-- ["NA","CS","EU"] -->
+                <datavalue distribution="34">
+                    <value>NA</value>
+                </datavalue>
+                <datavalue distribution="33">
+                    <value>CS</value>
+                </datavalue>
+                <datavalue distribution="33">
+                    <value>EU</value>
+                </datavalue>
+            </valuelist>
+        </column>
+        <column>
+            <type>VARCHAR</type>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <name>DOMAIN</name>
+            <valuelist>
+                <!-- Distributes according to specified values. These must 
total 100 -->
+                <!-- 
["Salesforce.com","Apple.com","Google.com","Amazon.com"]-->
+                <datavalue distribution="25">
+                    <value>Salesforce.com</value>
+                </datavalue>
+                <datavalue distribution="25">
+                    <value>Apple.com</value>
+                </datavalue>
+                <datavalue distribution="25">
+                    <value>Google.com</value>
+                </datavalue>
+                <datavalue distribution="25">
+                    <value>Amazon.com</value>
+                </datavalue>
+            </valuelist>
+        </column>
+        <column>
+            <type>VARCHAR</type>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <name>FEATURE</name>
+            <valuelist>
+                <!-- Distributes according to specified values. These must 
total 100 -->
+                <!-- ["Login","Report","Dashboard","Sales","UI"]-->
+                <datavalue distribution="20">
+                    <value>Login</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>Report</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>Dashboard</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>Sales</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>UI</value>
+                </datavalue>
+            </valuelist>
+        </column>
+        <column>
+            <type>DATE</type>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <name>DATE</name>
+        </column>
+        <column>
+            <type>DATE</type>
+            <userDefined>true</userDefined>
+            <!--SEQUENTIAL is unsupported for DATE -->
+            <dataSequence>RANDOM</dataSequence>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <minValue>2020</minValue>
+            <maxValue>2025</maxValue>
+            <name>DATE_O</name>
+        </column>
+
+        <column>
+            <type>BIGINT</type>
+            <dataSequence>RANDOM</dataSequence>
+            <minValue>1</minValue>
+            <maxValue>100</maxValue>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <name>USAGE.CORE</name>
+        </column>
+        <column>
+            <type>BIGINT</type>
+            <dataSequence>RANDOM</dataSequence>
+            <minValue>1</minValue>
+            <maxValue>2000</maxValue>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <name>USAGE.DB</name>
+        </column>
+        <column>
+            <type>INTEGER</type>
+            <dataSequence>RANDOM</dataSequence>
+            <minValue>1</minValue>
+            <maxValue>10000</maxValue>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <name>STATS.ACTIVE_VISITOR</name>
+        </column>
+    </datamapping>
+    <scenarios>
+        <scenario tableName="PHERF.TEST_MULTI_TENANT_TABLE" 
name="TEST_TABLE_WRITE">
+            <loadProfile>
+                <numOperations>1</numOperations>
+                <tenantDistribution useGlobalConnection="true"/>
+                <opDistribution id="upsertOp" weight="100"/>
+            </loadProfile>
+
+            <upserts>
+                <upsert id="upsertOp" useGlobalConnection="true" 
upsertGroup="write"/>
+            </upserts>
+        </scenario>
+        <scenario tableName="PHERF.TEST_MULTI_TENANT_TABLE" 
name="TEST_TABLE_READ">
+            <loadProfile>
+                <numOperations>1</numOperations>
+                <tenantDistribution useGlobalConnection="true"/>
+                <opDistribution id="queryOp1" weight="20"/>
+                <opDistribution id="queryOp2" weight="20"/>
+            </loadProfile>
+
+            <querySet>
+                <query id="queryOp1" useGlobalConnection="true" 
statement="select count(*) from PHERF.TEST_MULTI_TENANT_TABLE" 
queryGroup="Aggregation"/>
+                <query id="queryOp2" useGlobalConnection="true" 
statement="select * from PHERF.TEST_MULTI_TENANT_TABLE LIMIT 100" 
queryGroup="LIMIT"/>
+            </querySet>
+        </scenario>
+    </scenarios>
+</datamodel>

Reply via email to