sakshamgangwar commented on a change in pull request #896:
URL: https://github.com/apache/phoenix/pull/896#discussion_r494574352



##########
File path: 
phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
##########
@@ -127,6 +146,77 @@ public void testMapReduceSnapshotWithLimit() throws 
Exception {
     configureJob(job, tableName, inputQuery, null, false);
   }
 
+  @Test
+  public void testSnapshotMapReduceJobNotImpactingTableMapReduceJob() throws 
Exception {
+    //Submitting and asserting successful Map Reduce Job over snapshots
+    PhoenixMapReduceUtil
+            .setInput(job, PhoenixIndexDBWritable.class, SNAPSHOT_NAME, 
tableName, tmpDir, null,
+                    FIELD1, FIELD2, FIELD3);
+    configureJob(job, tableName, null, null, false);
+
+    // create table
+    Connection conn = DriverManager.getConnection(getUrl());
+    tableName = generateUniqueName();
+    conn.createStatement().execute(String.format(CREATE_TABLE, tableName));
+    conn.commit();
+
+    //Submitting next map reduce job over table and making sure that it does 
not fail with
+    // any wrong snapshot properties set in common configurations which are
+    // used across all jobs.
+    createAndTestJob(conn);
+  }
+
+  private void createAndTestJob(Connection conn)
+          throws SQLException, IOException, InterruptedException, 
ClassNotFoundException {
+    String stockTableName = generateUniqueName();
+    String stockStatsTableName = generateUniqueName();
+    conn.createStatement().execute(String.format(CREATE_STOCK_TABLE, 
stockTableName));
+    conn.createStatement().execute(String.format(CREATE_STOCK_STATS_TABLE, 
stockStatsTableName));
+    conn.commit();
+    final Configuration conf = ((PhoenixConnection) 
conn).getQueryServices().getConfiguration();
+    Job job = Job.getInstance(conf);
+    PhoenixMapReduceUtil.setInput(job, MapReduceIT.StockWritable.class, 
PhoenixTestingInputFormat.class,
+            stockTableName, null, STOCK_NAME, RECORDING_YEAR, "0." + 
RECORDINGS_QUARTER);
+    testJob(conn, job, stockTableName, stockStatsTableName);
+  }
+
+  private void testJob(Connection conn, Job job, String stockTableName, String 
stockStatsTableName)
+          throws SQLException, InterruptedException, IOException, 
ClassNotFoundException {
+    assertEquals("Failed to reset getRegionBoundaries counter for 
scanGrouper", 0,
+            
TestingMapReduceParallelScanGrouper.getNumCallsToGetRegionBoundaries());
+    upsertData(conn, stockTableName);
+
+    // only run locally, rather than having to spin up a MiniMapReduce cluster 
and lets us use breakpoints
+    job.getConfiguration().set("mapreduce.framework.name", "local");
+
+    setOutput(job, stockStatsTableName);
+
+    job.setMapperClass(MapReduceIT.StockMapper.class);
+    job.setReducerClass(MapReduceIT.StockReducer.class);
+    job.setOutputFormatClass(PhoenixOutputFormat.class);
+
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(DoubleWritable.class);
+    job.setOutputKeyClass(NullWritable.class);
+    job.setOutputValueClass(MapReduceIT.StockWritable.class);
+
+    // run job and assert if success
+    assertTrue("Job didn't complete successfully! Check logs for reason.", 
job.waitForCompletion(true));

Review comment:
       @ChinmaySKulkarni Added the assertion both after snapshot MR job and 
after Table based job too.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to