Repository: phoenix
Updated Branches:
  refs/heads/4.0 82d6fb9bd -> 46d365a96


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
new file mode 100644
index 0000000..8152390
--- /dev/null
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import static org.junit.Assert.*;
+
+import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.phoenix.pherf.PherfConstants;
+import org.apache.phoenix.pherf.PherfConstants.RunMode;
+import org.apache.phoenix.pherf.jmx.MonitorManager;
+
+import org.apache.phoenix.pherf.result.file.Extension;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
+import org.apache.phoenix.pherf.result.impl.XMLResultHandler;
+import org.apache.phoenix.pherf.result.*;
+import org.junit.Test;
+
+import org.apache.phoenix.pherf.configuration.Query;
+
+public class ResultTest {
+
+    @Test
+    public void testWriters() throws Exception {
+        ExecutorService executorService = Executors.newFixedThreadPool(1);
+        List<ResultHandler> writers = new ArrayList<>();
+//        Monitor monitor = new Monitor(new XMLConfigParser("test.*xml"), 100);
+//        Future future = executorService.submit(monitor);
+
+        writers.add(new CSVResultHandler(PherfConstants.MONITOR_FILE_NAME, 
ResultFileDetails.CSV_DETAILED_PERFORMANCE));
+
+    }
+
+    @Test
+    public void testMonitorWriter() throws Exception {
+        String[] row = "org.apache.phoenix.pherf:type=PherfWriteThreads,6,Mon 
Jan 05 15:14:00 PST 2015".split(PherfConstants.RESULT_FILE_DELIMETER);
+        ResultHandler resultMonitorWriter = null;
+        List<ResultValue> resultValues = new ArrayList<>();
+        for (String val : row) {
+            resultValues.add(new ResultValue(val));
+        }
+
+        try {
+            resultMonitorWriter = new 
CSVResultHandler(PherfConstants.MONITOR_FILE_NAME, 
ResultFileDetails.CSV_MONITOR);
+            Result
+                    result = new Result(ResultFileDetails.CSV_MONITOR, 
ResultFileDetails.CSV_MONITOR.getHeader().toString(), resultValues);
+            resultMonitorWriter.write(result);
+            resultMonitorWriter.write(result);
+            resultMonitorWriter.write(result);
+            resultMonitorWriter.flush();
+            List<Result> results = resultMonitorWriter.read();
+            assertEquals("Results did not contain row.", results.size(), 3);
+
+        } finally {
+            if (resultMonitorWriter != null) {
+                resultMonitorWriter.flush();
+                resultMonitorWriter.close();
+            }
+        }
+    }
+
+    @Test
+    public void testMonitorResult() throws Exception {
+        ExecutorService executorService = Executors.newFixedThreadPool(1);
+        MonitorManager monitor = new MonitorManager(100);
+        Future future = executorService.submit(monitor);
+        List<Result> records = null;
+        final int TIMEOUT = 30;
+
+        int ct = 0;
+        int max = 30;
+        // Wait while we write some rows.
+        while (!future.isDone()) {
+            Thread.sleep(100);
+            if (ct == max) {
+                int timer = 0;
+                monitor.stop();
+                while (monitor.isRunning() && (timer < TIMEOUT)) {
+                    System.out.println("Waiting for monitor to finish. Seconds 
Waited :" + timer);
+                    Thread.sleep(1000);
+                    timer++;
+                }
+            }
+
+            ct++;
+        }
+        executorService.shutdown();
+        records = monitor.readResults();
+
+        assertNotNull("Could not retrieve records", records);
+        assertEquals("Failed to get correct amount of CSV records.", 
records.size(), monitor.getRowCount());
+        assertFalse("Monitor was not stopped correctly.", monitor.isRunning());
+    }
+
+    @Test
+    public void testExtensionEnum() {
+        assertEquals("Extension did not match", Extension.CSV.toString(), 
".csv");
+        assertEquals("Extension did not match", 
Extension.DETAILED_CSV.toString(), "_detail.csv");
+    }
+
+    @Test
+    public void testResult() throws Exception {
+        String filename = "testresult";
+        ResultHandler xmlResultHandler = null;
+        ResultManager resultManager = new ResultManager(filename, 
RunMode.PERFORMANCE);
+
+        // write result to file
+        DataModelResult dataModelResult = new DataModelResult();
+        dataModelResult.setZookeeper("mytestzk");
+        ScenarioResult scenarioResult = new ScenarioResult();
+        scenarioResult.setTableName("MY_TABLE_NAME");
+
+        // Scenario Name left blank on purpose to test that null values get 
generated correctly.
+        //scenarioResult.setName("MY_TEST_SCENARIO");
+
+        dataModelResult.getScenarioResult().add(scenarioResult);
+        scenarioResult.setRowCount(999);
+        QuerySetResult querySetResult = new QuerySetResult();
+        querySetResult.setConcurrency("50");
+        scenarioResult.getQuerySetResult().add(querySetResult);
+        Query query = new Query();
+        Query query2 = new Query();
+
+        // add some spaces so we test query gets normalized
+        query.setQueryGroup("g123");
+        query.setTenantId("tennantID123");
+        query.setStatement("Select    * \n" + "from    FHA");
+        query2.setStatement("Select a, b, c  * \n" + "from    FHA2");
+        assertEquals("Expected consecutive spaces to be normalized", "Select * 
from FHA", query.getStatement());
+
+        QueryResult queryResult = new QueryResult(query);
+        QueryResult queryResult2 = new QueryResult(query2);
+        querySetResult.getQueryResults().add(queryResult);
+        querySetResult.getQueryResults().add(queryResult2);
+
+        ThreadTime tt = new ThreadTime();
+        tt.setThreadName("thread1");
+        Calendar calendar = Calendar.getInstance();
+        Date startTime1 = calendar.getTime();
+        RunTime runtime1 = new RunTime(startTime1, 1000L, 10);
+        tt.getRunTimesInMs().add(runtime1);
+        calendar.add(Calendar.MINUTE, -1);
+        RunTime runtime2 = new RunTime(calendar.getTime(), 2000L, 20);
+        tt.getRunTimesInMs().add(runtime2);
+        calendar.add(Calendar.MINUTE, -1);
+        RunTime runtime3 = new RunTime(calendar.getTime(), 3000L, 30);
+        tt.getRunTimesInMs().add(runtime3);
+        queryResult.getThreadTimes().add(tt);
+        queryResult2.getThreadTimes().add(tt);
+
+        //resultUtil.writeResultToFile(dataModelResult, filename, 
RunMode.PERFORMANCE);
+        resultManager.write(dataModelResult);
+
+        // Put some stuff in a combined file
+        List<DataModelResult> modelResults = new ArrayList<>();
+        modelResults.add(dataModelResult);
+        modelResults.add(dataModelResult);
+        resultManager.write(modelResults);
+
+        // read result from file
+        xmlResultHandler = new XMLResultHandler(filename, 
ResultFileDetails.XML);
+        List<Result> resultList = xmlResultHandler.read();
+        ResultValue<DataModelResult> resultValue = 
resultList.get(0).getResultValues().get(0);
+        DataModelResult dataModelResultFromFile = resultValue.getResultValue();
+
+        ScenarioResult scenarioResultFromFile = 
dataModelResultFromFile.getScenarioResult().get(0);
+        QuerySetResult querySetResultFromFile = 
scenarioResultFromFile.getQuerySetResult().get(0);
+        QueryResult queryResultFromFile = 
querySetResultFromFile.getQueryResults().get(0);
+        ThreadTime ttFromFile = queryResultFromFile.getThreadTimes().get(0);
+
+        // thread level verification
+        assertEquals(10, (int) 
ttFromFile.getMinTimeInMs().getElapsedDurationInMs());
+        assertEquals(30, (int) 
ttFromFile.getMaxTimeInMs().getElapsedDurationInMs());
+        assertEquals(20, (int) ttFromFile.getAvgTimeInMs());
+        // 3rd runtime has the earliest start time, therefore that's what's 
expected.
+        assertEquals(runtime3.getStartTime(), ttFromFile.getStartTime());
+
+        assertEquals(runtime1.getResultRowCount(), 
ttFromFile.getRunTimesInMs().get(0).getResultRowCount());
+        assertEquals(runtime2.getResultRowCount(), 
ttFromFile.getRunTimesInMs().get(1).getResultRowCount());
+        assertEquals(runtime3.getResultRowCount(), 
ttFromFile.getRunTimesInMs().get(2).getResultRowCount());
+
+        // query result level verification
+        assertEquals(10, queryResultFromFile.getAvgMinRunTimeInMs());
+        assertEquals(30, queryResultFromFile.getAvgMaxRunTimeInMs());
+        assertEquals(20, queryResultFromFile.getAvgRunTimeInMs());
+        // 3rd runtime has the earliest start time, therefore that's what's 
expected.
+        assertEquals(runtime3.getStartTime(), 
queryResultFromFile.getStartTime());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
new file mode 100644
index 0000000..8eafd56
--- /dev/null
+++ 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import org.apache.phoenix.pherf.configuration.*;
+import org.apache.phoenix.pherf.loaddata.DataLoader;
+import org.apache.phoenix.pherf.rules.DataValue;
+import org.apache.phoenix.pherf.rules.RulesApplier;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.junit.Test;
+
+import java.sql.Types;
+import java.util.*;
+
+import static org.junit.Assert.*;
+
+public class RuleGeneratorTest extends BaseTestWithCluster {
+
+    @Test
+    public void testDateGenerator() throws Exception {
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        DataModel model = parser.getDataModels().get(0);
+        DataLoader loader = new DataLoader(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        int sampleSize = 100;
+        List<String> values = new ArrayList<>(sampleSize);
+
+        for (Column dataMapping : model.getDataMappingColumns()) {
+            if ((dataMapping.getType() == DataTypeMapping.DATE) && 
(dataMapping.getName().equals("CREATED_DATE"))) {
+                // Test directly through generator method and that it converts 
to Phoenix type
+                assertRandomDateValue(dataMapping, rulesApplier);
+
+                // Test through data value method, which is normal path
+                // Do this 20 times and we should hit each possibility at 
least once.
+                for (int i = 0; i < 20; i++) {
+                    DataValue value = rulesApplier.getDataValue(dataMapping);
+                    assertNotNull("Could not retrieve DataValue for random 
DATE.", value);
+                    assertNotNull("Could not retrieve a value in DataValue for 
random DATE.", value.getValue());
+                    if (value.getMinValue() != null) {
+                        // Check that dates are between min/max
+                        assertDateBetween(value);
+                    }
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testNullChance() throws Exception {
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        DataModel model = parser.getDataModels().get(0);
+        DataLoader loader = new DataLoader(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        int sampleSize = 100;
+        List<String> values = new ArrayList<>(sampleSize);
+
+        for (Column dataMapping : model.getDataMappingColumns()) {
+            DataValue value = rulesApplier.getDataValue(dataMapping);
+            if (dataMapping.getNullChance() == 0) {
+                // 0 chance of getting null means we should never have an 
empty string returned
+                assertFalse("", value.getValue().equals(""));
+            } else if (dataMapping.getNullChance() == 100) {
+                // 100 chance of getting null means we should always have an 
empty string returned
+                assertTrue("", value.getValue().equals(""));
+            } else if ((dataMapping.getNullChance() == 90)) {
+                // You can't really test for this, but you can eyeball it on 
debugging.
+                for (int i = 0; i < sampleSize; i++) {
+                    DataValue tVal = rulesApplier.getDataValue(dataMapping);
+                    values.add(tVal.getValue());
+                }
+                Collections.sort(values);
+            }
+        }
+    }
+
+    @Test
+    public void testSequentialDataSequence() throws Exception {
+        XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+        DataModel model = parser.getDataModels().get(0);
+        DataLoader loader = new DataLoader(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+
+        Column targetColumn = null;
+        for (Column column : model.getDataMappingColumns()) {
+            DataSequence sequence = column.getDataSequence();
+            if (sequence == DataSequence.SEQUENTIAL) {
+                targetColumn = column;
+                break;
+            }
+        }
+        assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", 
targetColumn);
+        assertMultiThreadedIncrementValue(targetColumn, rulesApplier);
+    }
+
+    /**
+     * Verifies that we can generate a date between to specific dates.
+     *
+     * @param dataMapping
+     * @param rulesApplier
+     * @throws Exception
+     */
+    private void assertRandomDateValue(Column dataMapping, RulesApplier 
rulesApplier) throws Exception {
+        List<DataValue> dataValues = dataMapping.getDataValues();
+        DataValue ruleValue = dataValues.get(2);
+        String dt = rulesApplier.generateRandomDate(ruleValue.getMinValue(), 
ruleValue.getMaxValue());
+        ruleValue.setValue(dt);
+        assertDateBetween(ruleValue);
+    }
+
+    /**
+     * This method will test {@link 
org.apache.phoenix.pherf.configuration.DataSequence} SEQUENTIAL
+     * It ensures values returned always increase uniquely. RulesApplier will 
be accessed by multiple writer
+     * so we must ensure increment is thread safe.
+     */
+    private void assertMultiThreadedIncrementValue(final Column column, final 
RulesApplier rulesApplier) throws Exception {
+        final int threadCount = 30;
+        final int increments = 100;
+        final Set testSet = new TreeSet();
+        List<Thread> threadList = new ArrayList<>();
+        for (int i = 0; i < threadCount; i++) {
+            Thread t = new Thread() {
+
+                @Override
+                public void run() {
+                    for (int i = 0; i < increments; i++) {
+                        try {
+                            DataValue value = 
rulesApplier.getDataValue(column);
+                            String strValue = value.getValue();
+                            synchronized (testSet) {
+                                assertFalse("Incrementer gave a duplicate 
value: " + strValue, testSet.contains(strValue));
+                                assertTrue("Length did not equal expected.",
+                                        strValue.length() == 
column.getLength());
+                                testSet.add(strValue);
+                            }
+                        } catch (Exception e) {
+                            fail("Caught an exception during test: " + 
e.getMessage());
+                        }
+                    }
+                }
+            };
+            t.start();
+            threadList.add(t);
+        }
+
+        // Wait for threads to finish
+        for (Thread t : threadList) {
+            try {
+                t.join();
+            } catch (InterruptedException e) {
+                fail("There was a problem reading thread: " + e.getMessage());
+            }
+        }
+
+        assertTrue("Expected count in increments did not match expected", 
testSet.size() == (threadCount * increments));
+    }
+
+    @Test
+    public void testValueListRule() throws Exception {
+        List<String> expectedValues = new ArrayList();
+        expectedValues.add("aAAyYhnNbBs9kWk");
+        expectedValues.add("bBByYhnNbBs9kWu");
+        expectedValues.add("cCCyYhnNbBs9kWr");
+
+        XMLConfigParser parser = new XMLConfigParser(".*test_scenario.xml");
+        DataLoader loader = new DataLoader(parser);
+        RulesApplier rulesApplier = loader.getRulesApplier();
+        Scenario scenario = parser.getScenarios().get(0);
+
+        Column simPhxCol = new Column();
+        simPhxCol.setName("PARENT_ID");
+        simPhxCol.setType(DataTypeMapping.CHAR);
+
+        // Run this 10 times gives a reasonable chance that all the values 
will appear at least once
+        for (int i = 0; i < 10; i++) {
+            DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol);
+            assertTrue("Got a value not in the list for the rule. :" + 
value.getValue(), expectedValues.contains(value.getValue()));
+        }
+    }
+
+    /**
+     * Asserts that the value field is between the min/max value fields
+     *
+     * @param value
+     */
+    private void assertDateBetween(DataValue value) {
+        DateTimeFormatter fmtr = 
DateTimeFormat.forPattern(PherfConstants.DEFAULT_DATE_PATTERN);
+
+        DateTime dt = fmtr.parseDateTime(value.getValue());
+        DateTime min = fmtr.parseDateTime(value.getMinValue());
+        DateTime max = fmtr.parseDateTime(value.getMaxValue());
+
+        assertTrue("Value " + dt + " is not after minValue", dt.isAfter(min));
+        assertTrue("Value " + dt + " is not before maxValue", 
dt.isBefore(max));
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/SchemaReaderTest.java
----------------------------------------------------------------------
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/SchemaReaderTest.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/SchemaReaderTest.java
new file mode 100644
index 0000000..e8d1321
--- /dev/null
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/SchemaReaderTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import org.apache.phoenix.pherf.configuration.Column;
+import org.apache.phoenix.pherf.configuration.DataModel;
+import org.apache.phoenix.pherf.configuration.Scenario;
+import org.apache.phoenix.pherf.configuration.XMLConfigParser;
+import org.apache.phoenix.pherf.schema.SchemaReader;
+import org.junit.Test;
+
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class SchemaReaderTest extends BaseTestWithCluster {
+
+       @Test
+    public void testSchemaReader() {
+        // Test for the unit test version of the schema files.
+        assertApplySchemaTest();
+    }
+
+    private void assertApplySchemaTest() {
+        try {
+            SchemaReader reader = new SchemaReader(".*datamodel/.*test.*sql");
+
+            List<Path> resources = new ArrayList<>(reader.getResourceList());
+            assertTrue("Could not pull list of schema files.", 
resources.size() > 0);
+            assertNotNull("Could not read schema file.", 
this.getClass().getResourceAsStream(
+                    PherfConstants.RESOURCE_DATAMODEL + "/" + 
resources.get(0).getFileName().toString()));
+            assertNotNull("Could not read schema file.", 
reader.resourceToString(resources.get(0)));
+            reader.applySchema();
+
+            Connection connection = null;
+            URL resourceUrl = 
getClass().getResource("/scenario/test_scenario.xml");
+            assertNotNull("Test data XML file is missing", resourceUrl);
+            connection = util.getConnection();
+            Path resourcePath = Paths.get(resourceUrl.toURI());
+            DataModel data = XMLConfigParser.readDataModel(resourcePath);
+            List<Scenario> scenarioList = data.getScenarios();
+            Scenario scenario = scenarioList.get(0);
+            List<Column> columnList = 
util.getColumnsFromPhoenix(scenario.getSchemaName(), 
scenario.getTableNameWithoutSchemaName(), connection);
+            assertTrue("Could not retrieve Metadata from Phoenix", 
columnList.size() > 0);
+        } catch (Exception e) {
+            fail("Could not initialize SchemaReader");
+            e.printStackTrace();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java
----------------------------------------------------------------------
diff --git 
a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java 
b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java
new file mode 100644
index 0000000..0e8b6d3
--- /dev/null
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.junit.Test;
+
+public class TestHBaseProps {
+       
+       @Test
+       public void testCheckHBaseProps(){
+               Configuration conf = HBaseConfiguration.create();
+               assertTrue("did not get correct threadpool size", 
conf.get("phoenix.query.threadPoolSize").equals("128"));
+               assertTrue("did not get correct concurrentrequests size", 
conf.get("hbase.sfdc.concurrentrequests.max").equals("2147483647"));
+       }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/datamodel/test_schema.sql 
b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
new file mode 100644
index 0000000..498f832
--- /dev/null
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
@@ -0,0 +1,15 @@
+CREATE TABLE IF NOT EXISTS PHERF.TEST_TABLE (
+    TENANT_ID CHAR(15) NOT NULL,
+    PARENT_ID CHAR(15) NOT NULL,
+    CREATED_DATE DATE NOT NULL,
+    FIELD VARCHAR,
+    OLDVAL_STRING VARCHAR,
+    NEWVAL_STRING VARCHAR,
+    SOME_INT INTEGER
+    CONSTRAINT PK PRIMARY KEY
+    (
+        TENANT_ID,
+        PARENT_ID,
+        CREATED_DATE DESC
+    )
+) VERSIONS=1,MULTI_TENANT=true,SALT_BUCKETS=16

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/resources/hbase-site.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/hbase-site.xml 
b/phoenix-pherf/src/test/resources/hbase-site.xml
new file mode 100644
index 0000000..4972828
--- /dev/null
+++ b/phoenix-pherf/src/test/resources/hbase-site.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~   or more contributor license agreements.  See the NOTICE file
+  ~   distributed with this work for additional information
+  ~   regarding copyright ownership.  The ASF licenses this file
+  ~   to you under the Apache License, Version 2.0 (the
+  ~   "License"); you may not use this file except in compliance
+  ~   with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~   Unless required by applicable law or agreed to in writing, software
+  ~   distributed under the License is distributed on an "AS IS" BASIS,
+  ~   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~   See the License for the specific language governing permissions and
+  ~   limitations under the License.
+  -->
+<configuration>
+    <property>
+        <name>phoenix.query.threadPoolSize</name>
+        <value>128</value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/resources/pherf.test.properties
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/pherf.test.properties 
b/phoenix-pherf/src/test/resources/pherf.test.properties
new file mode 100644
index 0000000..9816fc8
--- /dev/null
+++ b/phoenix-pherf/src/test/resources/pherf.test.properties
@@ -0,0 +1,47 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+#   or more contributor license agreements.  See the NOTICE file
+#   distributed with this work for additional information
+#   regarding copyright ownership.  The ASF licenses this file
+#   to you under the Apache License, Version 2.0 (the
+#   "License"); you may not use this file except in compliance
+#   with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+SOME_TEST_PROP=test_prop
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml 
b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
new file mode 100644
index 0000000..a13497f
--- /dev/null
+++ b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~   or more contributor license agreements.  See the NOTICE file
+  ~   distributed with this work for additional information
+  ~   regarding copyright ownership.  The ASF licenses this file
+  ~   to you under the Apache License, Version 2.0 (the
+  ~   "License"); you may not use this file except in compliance
+  ~   with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~   Unless required by applicable law or agreed to in writing, software
+  ~   distributed under the License is distributed on an "AS IS" BASIS,
+  ~   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~   See the License for the specific language governing permissions and
+  ~   limitations under the License.
+  -->
+
+<datamodel release="192" name="test_scenario">
+    <datamapping>
+        <column>
+            <!-- This column type defines what will generally happen to 
VARCHAR fields unless they are explicitly defined or overridden elsewhere -->
+            <type>VARCHAR</type>
+            <dataSequence>RANDOM</dataSequence>
+            <length>15</length>
+            <name>GENERAL_VARCHAR</name>
+        </column>
+        <column>
+            <type>CHAR</type>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <length>15</length>
+            <name>GENERAL_CHAR</name>
+        </column>
+        <column>
+            <type>DATE</type>
+            <!--SEQUENTIAL is unsupported for DATE -->
+            <dataSequence>RANDOM</dataSequence>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>0</nullChance>
+            <minValue>1975</minValue>
+            <maxValue>2025</maxValue>
+            <name>GENERAL_DATE</name>
+        </column>
+        <column>
+            <type>DECIMAL</type>
+            <dataSequence>RANDOM</dataSequence>
+            <minValue>0</minValue>
+            <maxValue>1</maxValue>
+
+            <!-- Precision is limited to 18 -->
+            <precision>18</precision>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>10</nullChance>
+            <name>GENERAL_DECIMAL</name>
+        </column>
+        <column>
+            <type>INTEGER</type>
+            <dataSequence>RANDOM</dataSequence>
+            <minValue>1</minValue>
+            <maxValue>50000000</maxValue>
+            <!-- Number [0-100] that represents the probability of creating a 
null value -->
+            <!-- The higher the number, the more like the value will returned 
will be null -->
+            <!-- Leaving this tag out is equivalent to having a 0 probability. 
i.e. never null -->
+            <nullChance>100</nullChance>
+            <name>GENERAL_INTEGER</name>
+        </column>
+        <column>
+            <type>DATE</type>
+            <name>CREATED_DATE</name>
+            <minValue>1975</minValue>
+            <maxValue>2025</maxValue>
+            <valuelist>
+                <!-- Distributes randomly with equal chance of being picked -->
+                <datavalue distribution="80">
+                    <!-- Joda time format: yyyy-MM-dd HH:mm:ss.SSS ZZZ -->
+                    <minValue>2019-09-15 00:01:00.000</minValue>
+                    <maxValue>2019-09-15 11:00:00.000</maxValue>
+                </datavalue>
+                <datavalue distribution="10">
+                    <value>2019-09-19 00:01:00</value>
+                </datavalue>
+                <datavalue distribution="10">
+                    <minValue>2019-09-22 00:01:00.000</minValue>
+                    <maxValue>2019-09-22 00:01:00.300</maxValue>
+                </datavalue>
+            </valuelist>
+        </column>
+        <column>
+            <type>CHAR</type>
+            <userDefined>true</userDefined>
+            <dataSequence>LIST</dataSequence>
+            <length>15</length>
+            <name>PARENT_ID</name>
+            <valuelist>
+                <!-- Distributes according to specified values. These must 
total 100 -->
+                <datavalue distribution="60">
+                    <value>aAAyYhnNbBs9kWk</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>bBByYhnNbBs9kWu</value>
+                </datavalue>
+                <datavalue distribution="20">
+                    <value>cCCyYhnNbBs9kWr</value>
+                </datavalue>
+            </valuelist>
+        </column>
+        <column>
+            <!-- This column type defines what will generally happen to 
VARCHAR fields unless they are explicitly defined or overridden elsewhere -->
+            <type>VARCHAR</type>
+            <length>15</length>
+            <userDefined>true</userDefined>
+            <dataSequence>RANDOM</dataSequence>
+            <name>OLDVAL_STRING</name>
+            <prefix>MYPRFX</prefix>
+        </column>
+        <column>
+            <!-- This column type defines what will generally happen to 
VARCHAR fields unless they are explicitly defined or overridden elsewhere -->
+            <type>VARCHAR</type>
+            <length>15</length>
+            <userDefined>true</userDefined>
+            <dataSequence>SEQUENTIAL</dataSequence>
+            <name>NEWVAL_STRING</name>
+            <prefix>TSTPRFX</prefix>
+        </column>
+
+    </datamapping>
+    <scenarios>
+        <scenario tableName="PHERF.TEST_TABLE" rowCount="50" 
name="testScenario">
+            <!-- Scenario level rule overrides will be unsupported in V1.
+                    You can use the general datamappings in the mean time-->
+            <dataOverride>
+                <column>
+                    <type>VARCHAR</type>
+                    <userDefined>true</userDefined>
+                    <dataSequence>RANDOM</dataSequence>
+                    <length>10</length>
+                    <name>FIELD</name>
+                </column>
+            </dataOverride>
+            <!--Note: 1. Minimum of executionDurationInMs or 
numberOfExecutions. Which ever is reached first 
+                      2. DDL included in query are executed only once on start 
of querySet execution.
+            -->
+            <querySet concurrency="1-3" executionType="SERIAL" 
executionDurationInMs="5000" numberOfExecutions="100">
+                <query id="q1" tenantId="123456789012345" 
expectedAggregateRowCount="0" statement="select count(*) from 
PHERF.TEST_TABLE"/>
+                <!-- queryGroup is a way to organize queries across tables or 
scenario files.
+                    The value will be dumped to results. This gives a value to 
group by on reporting to compare queries -->
+                <query id="q2" queryGroup="g1" statement="select sum(SOME_INT) 
from PHERF.TEST_TABLE"/>
+            </querySet>
+            <!--Minimum of executionDurationInMs or numberOfExecutions. Which 
ever is reached first -->
+            <querySet concurrency="2-3" executionType="PARALLEL" 
executionDurationInMs="10000" numberOfExecutions="10">
+                <query id="q3" statement="select count(*) from 
PHERF.TEST_TABLE"/>
+                <query id="q4" statement="select sum(DIVISION) from 
PHERF.TEST_TABLE"/>
+            </querySet>
+        </scenario>
+    </scenarios>
+</datamodel>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/phoenix-pherf/standalone/pherf.sh
----------------------------------------------------------------------
diff --git a/phoenix-pherf/standalone/pherf.sh 
b/phoenix-pherf/standalone/pherf.sh
new file mode 100755
index 0000000..483830c
--- /dev/null
+++ b/phoenix-pherf/standalone/pherf.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+source config/env.sh
+PHERF_HOME=$(cd "`dirname $0`" && pwd)
+CLASSPATH=${PHERF_HOME}/config:${CLASSPATH}
+
+for f in $PHERF_HOME/lib/*.jar; do
+  CLASSPATH=${CLASSPATH}:$f;
+done
+
+CMD=time $JAVA_HOME/bin/java $REMOTE_DEBUG -Dapp.home=$PHERF_HOME $ENV_PROPS 
-Xms512m -Xmx3072m -cp $CLASSPATH org.apache.phoenix.pherf.Pherf "$@"
+eval $CMD
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b516559/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index dee91b8..a77a594 100644
--- a/pom.xml
+++ b/pom.xml
@@ -27,6 +27,7 @@
     <module>phoenix-flume</module>
     <module>phoenix-pig</module>
     <module>phoenix-assembly</module>
+    <module>phoenix-pherf</module>
   </modules>
 
   <repositories>

Reply via email to