github-advanced-security[bot] commented on code in PR #6452:
URL: https://github.com/apache/hive/pull/6452#discussion_r3198637636


##########
packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveDependentResource.java:
##########
@@ -0,0 +1,429 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerBuilder;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.EnvVarBuilder;
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.Quantity;
+import io.fabric8.kubernetes.api.model.ResourceRequirements;
+import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeBuilder;
+import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.Matcher;
+import 
io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.spec.DatabaseConfig;
+import org.apache.hive.kubernetes.operator.model.spec.ResourceRequirementsSpec;
+import org.apache.hive.kubernetes.operator.model.spec.StorageSpec;
+import org.apache.hive.kubernetes.operator.model.spec.SecretKeyRef;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for all Hive operator dependent resources.
+ * <p>
+ * Overrides {@link #getSecondaryResource} to use this dependent's own
+ * event source instead of the generic type-based lookup. This is
+ * required because JOSDK 4.9.x's default implementation calls
+ * {@code context.getSecondaryResource(type)} which throws when
+ * multiple dependents manage the same Kubernetes resource type
+ * (e.g. multiple ConfigMap or Service dependents).
+ */
+public abstract class HiveDependentResource<R extends HasMetadata,
+    P extends HasMetadata>
+    extends CRUDKubernetesDependentResource<R, P> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HiveDependentResource.class);
+  private static final ObjectMapper MAPPER = new ObjectMapper()
+      .configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
+
+  protected static final String CONF_MOUNT_PATH = "/etc/hive/conf";
+  protected static final String EXT_JARS_PATH = "/tmp/ext-jars";
+  protected static final String HADOOP_CLASSPATH_VALUE =
+      "/opt/hadoop/share/hadoop/tools/lib/*";
+  /**
+   * Stores SHA-256 hashes of the last desired spec that was applied
+   * for each resource, keyed by namespace/name.  This lets us skip
+   * updates when the desired state has not changed, avoiding the
+   * annotation writes and generation bumps that cause infinite
+   * reconciliation loops on Docker Desktop Kubernetes.
+   */
+  private static final ConcurrentHashMap<String, String>
+      LAST_DESIRED_HASHES = new ConcurrentHashMap<>();
+
+  protected HiveDependentResource(Class<R> resourceType) {
+    super(resourceType);
+  }
+
+  @Override
+  public Optional<R> getSecondaryResource(P primary,
+      Context<P> context) {
+    return eventSource()
+        .flatMap(es -> es.getSecondaryResource(primary));
+  }
+
+  /**
+   * Custom match that compares an SHA-256 hash of the desired resource
+   * spec against the last applied hash.  Overrides the 3-arg entry
+   * point because that is what JOSDK's reconcile loop actually calls.
+   * <p>
+   * The parent's 3-arg match delegates to a 5-arg method that calls
+   * {@code addMetadata()} <em>unconditionally</em> — writing the
+   * {@code javaoperatorsdk.io/previous} annotation on every
+   * reconciliation.  On Docker Desktop, that annotation write bumps
+   * {@code metadata.generation}, which triggers a new informer event,
+   * causing an infinite reconciliation loop.
+   * <p>
+   * By intercepting here we avoid both the annotation write and the
+   * false-positive diffs from K8s-injected defaults (protocol: TCP,
+   * terminationGracePeriodSeconds, etc.) when the desired spec has
+   * not actually changed.
+   */
+  @Override
+  public Matcher.Result<R> match(R actual, P primary,
+      Context<P> context) {
+    R desired = desired(primary, context);
+    String resourceKey = desired.getKind()
+        + "/" + desired.getMetadata().getNamespace()
+        + "/" + desired.getMetadata().getName();
+    String desiredHash = computeHash(desired);
+    if (actual == null) {
+      if (desiredHash != null) {
+        String previousHash = LAST_DESIRED_HASHES.get(resourceKey);
+        if (Objects.equals(previousHash, desiredHash)) {
+          // Resource was created in a previous reconciliation but
+          // the informer hasn't indexed it yet.  Returning false
+          // would trigger another SSA apply, which fires another
+          // informer event, creating an infinite reconciliation
+          // loop on Docker Desktop.  Skip the re-creation.
+          LOG.debug("Resource {} already created (informer lag), "
+              + "skipping re-create", resourceKey);
+          return Matcher.Result.computed(true, desired);
+        }
+        // First creation — cache the hash so the next
+        // reconciliation can detect informer lag.
+        LOG.info("Creating resource {}", resourceKey);
+        LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+      }
+      return Matcher.Result.computed(false, desired);
+    }
+    if (desiredHash == null) {
+      // Serialization failed — delegate to parent which will
+      // call addMetadata + the real matcher
+      return super.match(actual, primary, context);
+    }
+    String previousHash = LAST_DESIRED_HASHES.get(resourceKey);
+    if (previousHash == null) {
+      // First reconciliation after operator start — the resource
+      // already exists so seed the cache without triggering an
+      // update.  This prevents a gratuitous rolling update caused
+      // by the annotation write that addMetadata() would perform.
+      LOG.info("Seeding hash for existing resource {}", resourceKey);
+      LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+      return Matcher.Result.computed(true, desired);
+    }
+    if (desiredHash.equals(previousHash)) {
+      LOG.debug("Desired spec unchanged for {}, skipping update",
+          resourceKey);
+      return Matcher.Result.computed(true, desired);
+    }
+    LOG.info("Desired spec changed for {}, will update", resourceKey);
+    LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+    return Matcher.Result.computed(false, desired);
+  }
+
+  private String computeHash(R resource) {
+    try {
+      JsonNode tree = MAPPER.valueToTree(resource);
+      sortJsonNode(tree);
+      String json = MAPPER.writeValueAsString(tree);
+      MessageDigest digest = MessageDigest.getInstance("SHA-256");
+      byte[] hash = digest.digest(
+          json.getBytes(StandardCharsets.UTF_8));
+      StringBuilder sb = new StringBuilder(64);
+      for (byte b : hash) {
+        sb.append(String.format("%02x", b));
+      }
+      return sb.toString();
+    } catch (Exception e) {
+      LOG.warn("Failed to compute hash for resource {}: {}",
+          resource.getMetadata().getName(), e.getMessage());
+      return null;
+    }
+  }
+
+  /** Recursively sort all object node keys for deterministic JSON. */
+  private static void sortJsonNode(JsonNode node) {
+    if (node.isObject()) {
+      ObjectNode obj = (ObjectNode) node;
+      TreeMap<String, JsonNode> sorted = new TreeMap<>();
+      Iterator<String> fieldNames = obj.fieldNames();
+      while (fieldNames.hasNext()) {
+        String name = fieldNames.next();
+        JsonNode child = obj.get(name);
+        sortJsonNode(child);
+        sorted.put(name, child);
+      }
+      obj.removeAll();
+      sorted.forEach(obj::set);
+    } else if (node.isArray()) {
+      ArrayNode arr = (ArrayNode) node;
+      for (int i = 0; i < arr.size(); i++) {
+        sortJsonNode(arr.get(i));
+      }
+    }
+  }
+
+  /**
+   * Builds the database connection env vars: DB_DRIVER, DBPASSWORD
+   * (from SecretKeyRef), and SERVICE_OPTS with javax.jdo connection
+   * properties. Shared by MetastoreDeploymentDependent and
+   * SchemaInitJobDependent.
+   */
+  protected static List<EnvVar> buildDbEnvVars(DatabaseConfig db) {
+    List<EnvVar> envVars = new ArrayList<>();
+    envVars.add(new EnvVar("DB_DRIVER", db.getType(), null));
+
+    // DBPASSWORD must be defined before SERVICE_OPTS so that
+    // Kubernetes $(DBPASSWORD) interpolation resolves correctly.
+    SecretKeyRef passwordRef = db.getPasswordSecretRef();
+    if (passwordRef != null) {
+      envVars.add(new EnvVarBuilder()
+          .withName("DBPASSWORD")
+          .withNewValueFrom()
+            .withNewSecretKeyRef()
+              .withName(passwordRef.getName())
+              .withKey(passwordRef.getKey())
+            .endSecretKeyRef()
+          .endValueFrom()
+          .build());
+    }
+
+    StringBuilder serviceOpts = new StringBuilder();
+    if (db.getUrl() != null) {
+      serviceOpts.append("-Djavax.jdo.option.ConnectionURL=")
+          .append(db.getUrl());
+    }
+    if (db.getDriver() != null) {
+      serviceOpts.append(" -Djavax.jdo.option.ConnectionDriverName=")
+          .append(db.getDriver());
+    }
+    if (db.getUsername() != null) {
+      serviceOpts.append(" -Djavax.jdo.option.ConnectionUserName=")
+          .append(db.getUsername());
+    }
+    if (passwordRef != null) {
+      serviceOpts.append(
+          " -Djavax.jdo.option.ConnectionPassword=$(DBPASSWORD)");

Review Comment:
   ## SonarCloud / Credentials should not be hard-coded
   
   <!--SONAR_ISSUE_KEY:AZ38mSyeE9GCD649wgIp-->'Password' detected in this 
expression, review this potentially hard-coded password. <p>See more on <a 
href="https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ38mSyeE9GCD649wgIp&open=AZ38mSyeE9GCD649wgIp&pullRequest=6452";>SonarQube
 Cloud</a></p>
   
   [Show more 
details](https://github.com/apache/hive/security/code-scanning/434)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to