msharee9 commented on a change in pull request #604: MINIFICPP-926 Create
nanofi tailfile processor for tailing file by configurable chunk size
URL: https://github.com/apache/nifi-minifi-cpp/pull/604#discussion_r303192749
##########
File path: nanofi/src/core/string_utils.c
##########
@@ -20,67 +20,145 @@
#include "core/string_utils.h"
#include <string.h>
#include <stdlib.h>
+#include <stdio.h>
-tokens tokenize_string(const char * str, char delim, tokenizer_mode_t mode) {
- tokens tks;
- tks.num_strings = 0;
- tks.total_bytes = 0;
+int validate_list(struct token_list * tk_list) {
+ if (tk_list && tk_list->head && tk_list->tail && tk_list->size > 0) {
+ return 1;
+ }
+ return 0;
+}
- if (!str) return tks;
+void add_token_to_list(struct token_list * tk_list, const char * begin,
uint64_t len) {
+ struct token_node * new_node = (struct token_node *)malloc(sizeof(struct
token_node));
+ new_node->data = (char *)malloc((len+1) * sizeof(char));
+ strncpy(new_node->data, begin, len);
+ new_node->data[len] = '\0';
+ new_node->next = NULL;
- char * begin = (char *)str;
- char * end = NULL;
- int num_strings = 0;
- while ((end = strchr(begin, delim))) {
- if (begin == end) {
- begin++;
- continue;
+ if (!tk_list->head) {
+ tk_list->head = tk_list->tail = new_node;
+ tk_list->size++;
+ tk_list->total_bytes += len;
+ return;
+ }
+
+ tk_list->tail->next = new_node;
+ tk_list->tail = new_node;
+ tk_list->size++;
+ tk_list->total_bytes += len;
+}
+
+void free_token_node(struct token_node * node) {
+ if (node) {
+ free(node->data);
+ }
+ free(node);
+}
+
+void free_all_tokens(struct token_list * tks) {
+ while (tks && tks->head) {
+ struct token_node * node = tks->head;
+ tks->head = tks->head->next;
+ free_token_node(node);
+ }
+}
+
+void print_token_list(token_list * tokens) {
+ if (tokens) {
+ token_node * head = tokens->head;
+ int i = 0;
+ while (head) {
+ printf("Token %d : %s Length = %lu\n", i, head->data,
strlen(head->data));
+ head = head->next;
+ ++i;
}
- begin = (end+1);
- num_strings++;
+ }
+}
+
+void remove_last_node(token_list * tks) {
+ if (!validate_list(tks)) {
+ return;
}
- if (mode == DEFAULT_MODE && (*begin != '\0')) {
- num_strings++;
+ if (tks->size == 1 || tks->head == tks->tail) {
+ tks->total_bytes -= strlen(tks->tail->data);
+ free_all_tokens(tks);
+ tks->head = NULL;
+ tks->tail = NULL;
+ tks->size = 0;
+ return;
}
- tks.str_list = calloc(num_strings, sizeof(char *));
- tks.num_strings = 0;
- tks.total_bytes = 0;
+ struct token_node * tmp_head = tks->head;
+ struct token_node * tmp_tail = tks->tail;
+
+ while (tmp_head->next && (tmp_head->next != tmp_tail)) {
+ tmp_head = tmp_head->next;
+ }
+
+ struct token_node * tail_node = tmp_tail;
+ tks->tail = tmp_head;
+ tks->tail->next = NULL;
+
+ tks->size--;
+ tks->total_bytes -= (strlen(tail_node->data));
+ free_token_node(tail_node);
+}
+
+void attach_lists(token_list * to, token_list * from) {
+ if (to && validate_list(from)) {
+ if (!to->head) {
+ to->head = from->head;
+ to->tail = from->tail;
+ to->size += from->size;
+ return;
+ }
+
+ if (!to->tail) return;
+
+ to->tail->next = from->head;
+ to->tail = from->tail;
+ to->size += from->size;
+ }
+}
+
+token_list tokenize_string(const char * begin, char delim) {
Review comment:
This function returns a list of tokens (strings) when called. However, it is
also used as part of tokenizing for log aggregate usage. The log aggregate
usage has special requirements, therefore this function is used in conjunction
with other function in the same file. In order to tailor utlist to fulfill
those special requirements, some extra functions will be needed anyway.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services