tkhurana commented on code in PR #1883: URL: https://github.com/apache/phoenix/pull/1883#discussion_r1638967374
########## phoenix-core-client/src/main/java/org/apache/phoenix/util/ValidateLastDDLTimestampUtil.java: ########## @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.util; + +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Collectors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.phoenix.coprocessor.generated.RegionServerEndpointProtos; +import org.apache.phoenix.exception.StaleMetadataCacheException; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.schema.TableRef; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class for last ddl timestamp validation from the client. + */ +public class ValidateLastDDLTimestampUtil { + + private ValidateLastDDLTimestampUtil() {} + + private static final Logger LOGGER = LoggerFactory + .getLogger(ValidateLastDDLTimestampUtil.class); + private static final List<PTableType> ALLOWED_PTABLE_TYPES = Arrays.asList(new PTableType[] + {PTableType.TABLE, PTableType.VIEW, PTableType.INDEX, PTableType.SYSTEM}); + + public static String getInfoString(PName tenantId, List<TableRef> tableRefs) { + StringBuilder sb = new StringBuilder(); + sb.append(String.format("Tenant: %s, ", tenantId)); + for (TableRef tableRef : tableRefs) { + sb.append(String.format("{Schema: %s, Table: %s},", + tableRef.getTable().getSchemaName(), + tableRef.getTable().getTableName())); + } + return sb.toString(); + } + + /** + * Get whether last ddl timestamp validation is enabled on the connection + * @param connection + * @return true if it is enabled, false otherwise + */ + public static boolean getValidateLastDdlTimestampEnabled(PhoenixConnection connection) { + return connection.getQueryServices().getProps() + .getBoolean(QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, + QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); + } + + /** + * Get whether last ddl timestamp validation is enabled in the Configuration + * @param config + * @return true if it is enabled, false otherwise + */ + public static boolean getValidateLastDdlTimestampEnabled(Configuration config) { + return config.getBoolean( + QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, + QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); + } + + /** + * Verifies that table metadata for given tables is up-to-date in client cache with server. + * A random live region server is picked for invoking the RPC to validate LastDDLTimestamp. + * Retry once if there was an error performing the RPC, otherwise throw the Exception. + * + * @param allTableRefs + * @param doRetry + * @throws SQLException + */ + public static void validateLastDDLTimestamp(PhoenixConnection conn, + List<TableRef> allTableRefs, + boolean doRetry) throws SQLException { + List<TableRef> tableRefs = filterTableRefs(allTableRefs); + String infoString = getInfoString(conn.getTenantId(), tableRefs); + try (Admin admin = conn.getQueryServices().getAdmin()) { + // get all live region servers + List<ServerName> regionServers + = conn.getQueryServices().getLiveRegionServers(); + // pick one at random + ServerName regionServer + = regionServers.get(ThreadLocalRandom.current().nextInt(regionServers.size())); + + LOGGER.debug("Sending DDL timestamp validation request for {} to regionserver {}", + infoString, regionServer); + + // RPC + RegionServerEndpointProtos.RegionServerEndpointService.BlockingInterface + service = RegionServerEndpointProtos.RegionServerEndpointService + .newBlockingStub(admin.coprocessorService(regionServer)); + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request + = getValidateDDLTimestampRequest(tableRefs); + service.validateLastDDLTimestamp(null, request); + } catch (Exception e) { + SQLException parsedException = ClientUtil.parseServerException(e); + if (parsedException instanceof StaleMetadataCacheException) { + throw parsedException; + } + //retry once for any exceptions other than StaleMetadataCacheException + LOGGER.error("Error in validating DDL timestamp for {}", infoString, parsedException); + if (doRetry) { + // update the list of live region servers + conn.getQueryServices().refreshLiveRegionServers(); + validateLastDDLTimestamp(conn, tableRefs, false); + return; + } + throw parsedException; + } + } + + /** + * Build a request for the validateLastDDLTimestamp RPC for the given tables. + * 1. For a view, we need to add all its ancestors to the request + * in case something changed in the hierarchy. + * 2. For an index, we need to add its parent table to the request + * in case the index was dropped. + * 3. Add all indexes of a table/view in case index state was changed. + * + * @param tableRefs + * @return ValidateLastDDLTimestampRequest for the table in tableRef + */ + private static RegionServerEndpointProtos.ValidateLastDDLTimestampRequest + getValidateDDLTimestampRequest(List<TableRef> tableRefs) { + + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.Builder requestBuilder + = RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.newBuilder(); + RegionServerEndpointProtos.LastDDLTimestampRequest.Builder innerBuilder; + + for (TableRef tableRef : tableRefs) { + + // validate all ancestors of this PTable if any + // index -> base table + // view -> parent view and its ancestors + // view index -> view and its ancestors + for (Map.Entry<PTableKey, Long> entry + : tableRef.getTable().getAncestorLastDDLTimestampMap().entrySet()) { + innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); + PTableKey ancestorKey = entry.getKey(); + setLastDDLTimestampRequestParameters(innerBuilder, ancestorKey, entry.getValue()); + requestBuilder.addLastDDLTimestampRequests(innerBuilder); + } + + // add the current table to the request + PTable ptable = tableRef.getTable(); + innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); + setLastDDLTimestampRequestParameters(innerBuilder, ptable.getKey(), + ptable.getLastDDLTimestamp()); + requestBuilder.addLastDDLTimestampRequests(innerBuilder); + + // add all indexes of the current table + for (PTable idxPTable : tableRef.getTable().getIndexes()) { Review Comment: When validating timestamps we validate the timestamps of all the ancestors and the current table as well as indexes. When the server returns StaleMetadataCacheException are we also re-populating the client cache for all the objects that we are validating ? Are we also re-populating the indexes of the current table ? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
