rawlinp commented on a change in pull request #2785: In Traffic Router Support
Snapshots which only update Delivery Services
URL: https://github.com/apache/trafficcontrol/pull/2785#discussion_r237172350
##########
File path:
traffic_router/core/src/main/java/com/comcast/cdn/traffic_control/traffic_router/core/config/ConfigHandler.java
##########
@@ -128,136 +139,213 @@ public AnonymousIpDatabaseUpdater
getAnonymousIpDatabaseUpdater() {
return anonymousIpDatabaseUpdater;
}
- @SuppressWarnings({"PMD.CyclomaticComplexity", "PMD.NPathComplexity",
"PMD.AvoidCatchingThrowable"})
- public boolean processConfig(final String jsonStr) throws
JsonUtilsException, IOException {
+ @SuppressWarnings({"PMD.AvoidCatchingThrowable"})
+ public boolean processConfig(final String snapJson, final String
compJson) throws JsonUtilsException, IOException {
isProcessing.set(true);
- LOGGER.info("Entered processConfig");
- if (jsonStr == null) {
+ LOGGER.debug("Entered processConfig");
+ if (snapJson == null) {
trafficRouterManager.setCacheRegister(null);
cancelled.set(false);
isProcessing.set(false);
publishStatusQueue.clear();
- LOGGER.info("Exiting processConfig: No json data to
process");
+ LOGGER.info("Exiting processConfig: No json data to
process because snapshot was NULL.");
return false;
}
Date date;
- synchronized(configSync) {
+ synchronized (configSync) {
final ObjectMapper mapper = new ObjectMapper();
- final JsonNode jo = mapper.readTree(jsonStr);
- final JsonNode config = JsonUtils.getJsonNode(jo,
"config");
+ final JsonNode jo = mapper.readTree(snapJson);
final JsonNode stats = JsonUtils.getJsonNode(jo,
"stats");
-
+ final ObjectMapper compmapper = new ObjectMapper();
+ JsonNode cjo = null;
+ if (compJson != null) {
+ cjo = compmapper.readTree(compJson);
+ }
+ // Check to see if this is a new Snapshot
final long sts = getSnapshotTimestamp(stats);
date = new Date(sts * 1000L);
-
- if (sts <= getLastSnapshotTimestamp()) {
+ if (sts < getLastSnapshotTimestamp()) {
cancelled.set(false);
isProcessing.set(false);
publishStatusQueue.clear();
- LOGGER.info("Exiting processConfig: Incoming
TrConfig snapshot timestamp (" + sts + ") is older or equal to the loaded
timestamp (" + getLastSnapshotTimestamp() + "); unable to process");
+ LOGGER.info("Exiting processConfig: Incoming
CrConfig snapshot timestamp (" + sts + ") is older than " +
+ "the loaded timestamp (" +
getLastSnapshotTimestamp() + "); unable to process");
return false;
}
-
try {
- parseGeolocationConfig(config);
- parseCoverageZoneNetworkConfig(config);
- parseDeepCoverageZoneNetworkConfig(config);
- parseRegionalGeoConfig(jo);
- parseAnonymousIpConfig(jo);
-
- final CacheRegister cacheRegister = new
CacheRegister();
- final JsonNode deliveryServicesJson =
JsonUtils.getJsonNode(jo, "deliveryServices");
-
cacheRegister.setTrafficRouters(JsonUtils.getJsonNode(jo, "contentRouters"));
- cacheRegister.setConfig(config);
- cacheRegister.setStats(stats);
- parseTrafficOpsConfig(config, stats);
-
- final Map<String, DeliveryService>
deliveryServiceMap = parseDeliveryServiceConfig(JsonUtils.getJsonNode(jo,
deliveryServicesKey));
-
- parseCertificatesConfig(config);
-
certificatesPublisher.setDeliveryServicesJson(deliveryServicesJson);
- final ArrayList<DeliveryService>
deliveryServices = new ArrayList<>();
-
- if (deliveryServiceMap != null &&
!deliveryServiceMap.values().isEmpty()) {
-
deliveryServices.addAll(deliveryServiceMap.values());
+ // Search for updates, adds and deletes to
delivery services
+ final SnapshotEventsProcessor
snapshotEventsProcessor = SnapshotEventsProcessor
+ .diffCrConfigs(jo, cjo);
+
+ if (snapshotEventsProcessor.shouldLoadAll()) {
+ if (loadEntireSnapshot(jo,
snapshotEventsProcessor)) {
+
ConfigHandler.setLastSnapshotTimestamp(sts);
+ return true;
+ }
+ } else if(processChangeEvents(jo,
snapshotEventsProcessor)) {
+
ConfigHandler.setLastSnapshotTimestamp(sts);
+ return true;
}
+ } catch (ParseException e) {
+ LOGGER.error("Exiting processConfig: Failed to
process config for snapshot from " + date, e);
+ return false;
+ } finally {
+ isProcessing.set(false);
+ cancelled.set(false);
+ publishStatusQueue.clear();
+ }
+ return false;
+ }
+ }
- if (deliveryServiceMap != null &&
!deliveryServiceMap.values().isEmpty()) {
-
certificatesPublisher.setDeliveryServices(deliveryServices);
- }
+ @SuppressWarnings({"PMD.AvoidCatchingThrowable"})
+ private boolean processChangeEvents(final JsonNode jo,
+ final SnapshotEventsProcessor
snapshotEventsProcessor)
+ throws ParseException, JsonUtilsException, IOException {
+ LOGGER.debug("In processChangeEvents");
+ CacheRegister cacheRegister = null;
+ if (trafficRouterManager.getTrafficRouter() != null) {
+ cacheRegister =
trafficRouterManager.getTrafficRouter().getCacheRegister();
+ final int i = cacheRegister.hashCode();
+ LOGGER.debug(i);
+ } else {
+ cacheRegister = new CacheRegister();
+ }
+ final JsonNode config = JsonUtils.getJsonNode(jo, configKey);
+ parseRegionalGeoConfig(config, snapshotEventsProcessor);
+ parseAnonymousIpConfig(config, snapshotEventsProcessor);
+ updateCertsPublisher(snapshotEventsProcessor);
+ final List<DeliveryService> httpsDeliveryServices =
snapshotEventsProcessor.getSSLEnabledChangeEvents();
+ httpsDeliveryServices.forEach(ds -> LOGGER.info("Checking for
certificate for " + ds.getId()));
+ if (!httpsDeliveryServices.isEmpty() && !waitForSslCerts()) {
+ return false;
+ }
+ // updates, creates and removes the DeliveryServices in
cacheRegister
+ synchronized (cacheRegister) {
+ parseDeliveryServiceMatchSets(snapshotEventsProcessor,
cacheRegister);
+ parseCacheConfig(snapshotEventsProcessor,
cacheRegister);
+
trafficRouterManager.updateZones(snapshotEventsProcessor);
+ }
+ trafficRouterManager.getTrafficRouter().configurationChanged();
+ NetworkNode.getInstance().clearCacheLocations();
+ NetworkNode.getDeepInstance().clearCacheLocations(true);
+ return true;
+ }
- certificatesPoller.restart();
+ private boolean waitForSslCerts() {
+ try {
+ publishStatusQueue.put(true);
+ } catch (InterruptedException e) {
+ LOGGER.warn("Failed to notify certificates publisher
we're waiting for certificates", e);
+ }
+ while (!cancelled.get() && !publishStatusQueue.isEmpty()) {
+ try {
+ LOGGER.info("Waiting for https certificates to
support new config " + String
+ .format("%x",
publishStatusQueue.hashCode()));
+ Thread.sleep(1000L);
+ } catch (Exception t) {
+ LOGGER.warn("Interrupted while waiting for
status on publishing ssl certs", t);
+ }
+ }
+ if (cancelled.get()) {
+ LOGGER.info("Exiting waitForSslCerts: processing of
config was CANCELED because a newer one is ready.");
+ return false;
+ }
+ return true;
+ }
- final List<DeliveryService>
httpsDeliveryServices = deliveryServices.stream().filter(ds -> !ds.isDns() &&
ds.isSslEnabled()).collect(Collectors.toList());
- httpsDeliveryServices.forEach(ds ->
LOGGER.info("Checking for certificate for " + ds.getId()));
+ public CertificatesPoller getCertificatesPoller() {
+ return certificatesPoller;
+ }
- if (!httpsDeliveryServices.isEmpty()) {
- try {
- publishStatusQueue.put(true);
- } catch (InterruptedException e) {
- LOGGER.warn("Failed to notify
certificates publisher we're waiting for certificates", e);
- }
+ private void updateCertsPublisher(final SnapshotEventsProcessor
snapshotEventsProcessor) {
+ Collection<DeliveryService> deliveryServices = null;
- while (!cancelled.get() &&
!publishStatusQueue.isEmpty()) {
- try {
- LOGGER.info("Waiting
for https certificates to support new config " + String.format("%x",
publishStatusQueue.hashCode()));
- Thread.sleep(1000L);
- } catch (Throwable t) {
-
LOGGER.warn("Interrupted while waiting for status on publishing ssl certs", t);
- }
- }
- }
+ if (snapshotEventsProcessor.getCreationEvents() != null &&
!snapshotEventsProcessor.getCreationEvents()
+ .isEmpty()) {
+ deliveryServices =
snapshotEventsProcessor.getCreationEvents().values();
+
getCertificatesPublisher().getDeliveryServices().addAll(deliveryServices);
+ }
- if (cancelled.get()) {
- cancelled.set(false);
- isProcessing.set(false);
- publishStatusQueue.clear();
- LOGGER.info("Exiting processConfig:
processing of config with timestamp " + date + " was cancelled");
- return false;
- }
+ if (snapshotEventsProcessor.getUpdateEvents() != null &&
!snapshotEventsProcessor.getUpdateEvents().isEmpty()) {
+
getCertificatesPublisher().getDeliveryServices().replaceAll(ds ->
+
getFirst(snapshotEventsProcessor.getUpdateEvents().values(), uds ->
uds.getId().equals(ds.getId()))
+ .orElse(ds));
+ }
-
parseDeliveryServiceMatchSets(deliveryServicesJson, deliveryServiceMap,
cacheRegister);
- parseLocationConfig(JsonUtils.getJsonNode(jo,
"edgeLocations"), cacheRegister);
- parseCacheConfig(JsonUtils.getJsonNode(jo,
"contentServers"), cacheRegister);
- parseMonitorConfig(JsonUtils.getJsonNode(jo,
"monitors"));
-
- federationsWatcher.configure(config);
- steeringWatcher.configure(config);
-
trafficRouterManager.setCacheRegister(cacheRegister);
-
trafficRouterManager.getNameServer().setEcsEnable(JsonUtils.optBoolean(config,
"ecsEnable", false));
-
trafficRouterManager.getTrafficRouter().setRequestHeaders(parseRequestHeaders(config.get("requestHeaders")));
-
trafficRouterManager.getTrafficRouter().configurationChanged();
-
- /*
- * NetworkNode uses lazy loading to associate
CacheLocations with NetworkNodes at request time in TrafficRouter.
- * Therefore this must be done last, as any
thread that holds a reference to the CacheRegister might contain a reference
- * to a Cache that no longer exists. In that
case, the old CacheLocation and List<Cache> will be set on a
- * given CacheLocation within a NetworkNode,
leading to an OFFLINE cache to be served, or an ONLINE cache to
- * never have traffic routed to it, as the old
List<Cache> does not contain the Cache that was moved to ONLINE.
- * NetworkNode is a singleton and is managed
asynchronously. As long as we swap out the CacheRegister first,
- * then clear cache locations, the lazy loading
should work as designed. See issue TC-401 for details.
- *
- * Update for DDC (Dynamic Deep Caching):
NetworkNode now has a 2nd singleton (deepInstance) that is managed
- * similarly to the non-deep instance. However,
instead of clearing a NetworkNode's CacheLocation, only the
- * Caches are cleared from the CacheLocation
then lazily loaded at request time.
- */
- NetworkNode.getInstance().clearCacheLocations();
-
NetworkNode.getDeepInstance().clearCacheLocations(true);
- setLastSnapshotTimestamp(sts);
- } catch (ParseException e) {
- isProcessing.set(false);
- cancelled.set(false);
- publishStatusQueue.clear();
- LOGGER.error("Exiting processConfig: Failed to
process config for snapshot from " + date, e);
- return false;
- }
+ if (snapshotEventsProcessor.getDeleteEvents() != null &&
!snapshotEventsProcessor.getDeleteEvents().isEmpty()) {
+
getCertificatesPublisher().getDeliveryServices().removeIf(ds ->
+
getFirst(snapshotEventsProcessor.getDeleteEvents().values(),
+ uds ->
uds.getId().equals(ds.getId())).isPresent());
+ }
+
+ getCertificatesPoller().restart();
+ }
+
+
+ @SuppressWarnings({"PMD.CyclomaticComplexity", "PMD.NPathComplexity",
"PMD.AvoidCatchingThrowable"})
+ private boolean loadEntireSnapshot(final JsonNode jo, final
SnapshotEventsProcessor snapshotEventsProcessor ) throws
+ JsonUtilsException, ParseException, IOException {
+ final Map<String, DeliveryService> deliveryServiceMap =
snapshotEventsProcessor.getCreationEvents();
Review comment:
Looks like this line got mixed up with tabs and spaces.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services