Ethanlm commented on a change in pull request #3366:
URL: https://github.com/apache/storm/pull/3366#discussion_r669935903



##########
File path: storm-core/src/native/worker-launcher/impl/oci/oci_base_ctx.c
##########
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "utils/file-utils.h"
+
+#include "configuration.h"
+#include "worker-launcher.h"
+
+#include "oci_base_ctx.h"
+#include "oci_config.h"
+
+#define LAYER_MOUNT_SUFFIX      "/mnt"
+#define LAYER_MOUNT_SUFFIX_LEN  (sizeof(LAYER_MOUNT_SUFFIX) -1)
+
+/**
+ * Get the path to the runtime layers directory.
+ *
+ * Returns the heap-allocated path to the layers directory or NULL on error.
+ */
+char* get_oci_layers_path(const char* run_root) {
+  return get_full_path(run_root, "layers");
+}
+
+/**
+ * Get the path to a layer directory.
+ *
+ * Returns the heap-allocated path to the layer directory or NULL on error.
+ */
+char* get_oci_layer_path(const char* run_root, const char* layer_name) {
+  char* layer_path = NULL;
+  if (asprintf(&layer_path, "%s/layers/%s", run_root, layer_name) == -1) {
+    layer_path = NULL;
+  }
+  return layer_path;
+}
+
+/**
+ * Get the path to a layer's mountpoint.
+ *
+ * Returns the heap-allocated path to the layer's mountpoint or NULL on error.
+ */
+char* get_oci_layer_mount_path(const char* layer_path) {
+  char* mount_path = NULL;
+  if (asprintf(&mount_path, "%s" LAYER_MOUNT_SUFFIX, layer_path) == -1) {
+    mount_path = NULL;
+  }
+  return mount_path;
+}
+
+/**
+ * Get the layer path from a layer's mountpoint.
+ *
+ * Returns the heap-allocated path to the layer directory or NULL on error.
+ */
+char* get_oci_layer_path_from_mount_path(const char* mount_path) {
+  size_t mount_path_len = strlen(mount_path);
+  if (mount_path_len <= LAYER_MOUNT_SUFFIX_LEN) {
+    return NULL;
+  }
+  size_t layer_path_len = mount_path_len - LAYER_MOUNT_SUFFIX_LEN;
+  const char* suffix = mount_path + layer_path_len;
+  if (strcmp(suffix, LAYER_MOUNT_SUFFIX)) {
+    return NULL;
+  }
+  return strndup(mount_path, layer_path_len);
+}
+
+/**
+ * Creates the run root directory and layers directory structure
+ * underneath if necessary.
+ * Returns the malloc'd run root path or NULL if there was an error.
+ */
+static char* setup_oci_run_root_directories() {
+  char* layers_path = NULL;
+  char* run_root = get_value(OCI_RUN_ROOT_CONFIG_KEY);
+  if (run_root == NULL) {
+    run_root = strdup(DEFAULT_OCI_RUN_ROOT);
+    if (run_root == NULL) {
+      goto mem_fail;
+    }
+  }
+  
+  if (mkdir(run_root, S_IRWXU) != 0 && errno != EEXIST) {
+    fprintf(ERRORFILE, "ERROR: Error creating OCI run root at %s : %s\n", 
run_root,
+        strerror(errno));
+    goto fail;
+  }
+
+  layers_path = get_oci_layers_path(run_root);
+  if (layers_path == NULL) {
+    goto mem_fail;
+  }
+
+  if (mkdir(layers_path, S_IRWXU) != 0 && errno != EEXIST) {
+    fprintf(ERRORFILE, "ERROR: Error creating layers directory at %s : %s\n",
+        layers_path, strerror(errno));
+    goto fail;
+  }
+
+  free(layers_path);
+  return run_root;
+
+fail:
+  free(layers_path);
+  free(run_root);
+  return NULL;
+
+mem_fail:
+  fputs("ERROR: Cannot allocate memory in setup_oci_run_root_directories\n", 
ERRORFILE);
+  goto fail;
+}
+
+/**
+ * Initialize an uninitialized OCI base context.
+ */
+void init_oci_base_ctx(oci_base_ctx* ctx) {
+  memset(ctx, 0, sizeof(*ctx));
+  ctx->layers_lock_fd = -1;
+  ctx->layers_lock_state = F_UNLCK;
+}
+
+/**
+ * Releases the resources underneath an OCI base context but does NOT free the
+ * structure itself. This is particularly useful for stack-allocated contexts
+ * or structures that embed the context.
+ * free_oci_base_ctx should be used for heap-allocated contexts.
+ */
+void destroy_oci_base_ctx(oci_base_ctx* ctx) {
+  if (ctx != NULL) {
+    free(ctx->run_root);
+    if (ctx->layers_lock_fd != -1) {
+      close(ctx->layers_lock_fd);
+    }
+  }
+}
+
+/**
+ * Allocates and initializes an OCI base context.
+ *
+ * Returns a pointer to the allocated and initialized context or NULL on error.
+ */
+oci_base_ctx* alloc_oci_base_ctx() {
+  oci_base_ctx* ctx = malloc(sizeof(*ctx));
+  if (ctx != NULL) {
+    init_oci_base_ctx(ctx);
+  }
+  return ctx;
+}
+
+/**
+ * Free an OCI base context and all memory associated with it.
+ */
+void free_oci_base_ctx(oci_base_ctx* ctx) {
+  destroy_oci_base_ctx(ctx);
+  free(ctx);
+}
+
+/**
+ * Opens the base context for use. This will create the container runtime
+ * root directory and layer lock files, if necessary.
+ *
+ * Returns true on success or false if there was an error.
+ */
+bool open_oci_base_ctx(oci_base_ctx* ctx) {
+  ctx->run_root = setup_oci_run_root_directories();
+  if (ctx->run_root == NULL) {
+    return false;

Review comment:
       `setup_oci_run_root_directories` has sufficient log inside the method




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@storm.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to