This is an automated email from the ASF dual-hosted git repository.
vjasani pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/5.1 by this push:
new 52e0739dd6 PHOENIX-6826 Don't invalidate meta cache if
CQSI#getTableRegionLocation and CQSI#getTableRegionLocation encounters
IOException (#1522)
52e0739dd6 is described below
commit 52e0739dd6df921988d63623625eab3e0c4a1b62
Author: Rushabh Shah
AuthorDate: Tue Nov 1 18:52:47 2022 -0700
PHOENIX-6826 Don't invalidate meta cache if CQSI#getTableRegionLocation and
CQSI#getTableRegionLocation encounters IOException (#1522)
---
.gitignore | 1 +
.../phoenix/query/ConnectionQueryServicesImpl.java | 17 +
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/.gitignore b/.gitignore
index 39216267f3..1d33a1cbe0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,4 @@ CSV_EXPORT/
phoenix-hbase-compat-1.3.0/
phoenix-hbase-compat-1.4.0/
phoenix-hbase-compat-1.5.0/
+*/hbase.log
diff --git
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 5234695077..5275364f59 100644
---
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -671,7 +671,7 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
* all region locations from the HTable doesn't.
*/
int retryCount = 0, maxRetryCount = 1;
-boolean reload =false;
+TableName table = TableName.valueOf(tableName);
while (true) {
try {
// We could surface the package projected
HConnectionImplementation.getNumberOfCachedRegionLocations
@@ -681,17 +681,17 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
byte[] currentKey = HConstants.EMPTY_START_ROW;
do {
HRegionLocation regionLocation =
((ClusterConnection)connection).getRegionLocation(
-TableName.valueOf(tableName), currentKey, reload);
+table, currentKey, false);
currentKey = getNextRegionStartKey(regionLocation,
currentKey);
locations.add(regionLocation);
} while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
return locations;
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
-String fullName = Bytes.toString(tableName);
-throw new TableNotFoundException(fullName);
+throw new TableNotFoundException(table.getNameAsString());
} catch (IOException e) {
+LOGGER.error("Exception encountered in getAllTableRegions for "
++ "table: {}, retryCount: {}",
table.getNameAsString(), retryCount, e);
if (retryCount++ < maxRetryCount) { // One retry, in case
split occurs while navigating
-reload = true;
continue;
}
throw new
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL)
@@ -5697,16 +5697,17 @@ public class ConnectionQueryServicesImpl extends
DelegateQueryServices implement
* to which specified row belongs to.
*/
int retryCount = 0, maxRetryCount = 1;
-boolean reload =false;
while (true) {
+TableName table = TableName.valueOf(tableName);
try {
-return
connection.getRegionLocator(TableName.valueOf(tableName)).getRegionLocation(row,
reload);
+return
connection.getRegionLocator(table).getRegionLocation(row, false);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
String fullName = Bytes.toString(tableName);
throw new
TableNotFoundException(SchemaUtil.getSchemaNameFromFullName(fullName),
SchemaUtil.getTableNameFromFullName(fullName));
} catch (IOException e) {
+LOGGER.error("Exception encountered in getTableRegionLocation
for "
++ "table: {}, retryCount: {}",
table.getNameAsString(), retryCount, e);
if (retryCount++ < maxRetryCount) { // One retry, in case
split occurs while navigating
-reload = true;
continue;
}
throw new
SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL)