This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch hadoop-3.3.6
in repository https://gitbox.apache.org/repos/asf/doris-thirdparty.git
The following commit(s) were added to refs/heads/hadoop-3.3.6 by this push:
new 4483660eaef [Feature](hdfs-native-client) Added logger to print errors
to stdout instead. (#236)
4483660eaef is described below
commit 4483660eaefffb5323463adf81f58013648fcff5
Author: Qi Chen <[email protected]>
AuthorDate: Tue Aug 13 11:10:55 2024 +0800
[Feature](hdfs-native-client) Added logger to print errors to stdout
instead. (#236)
---
.../src/main/native/libhdfs/exception.c | 18 +++----
.../src/main/native/libhdfs/hdfs.c | 58 +++++++++++++++-------
.../src/main/native/libhdfs/include/hdfs/hdfs.h | 14 ++++++
3 files changed, 63 insertions(+), 27 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
index ed61b77fcad..b37c37cd7a9 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
+++
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/exception.c
@@ -167,7 +167,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int
noPrintFlags,
jthr = classNameOfObject(exc, env, &className);
if (jthr) {
- fprintf(stderr, "PrintExceptionAndFree: error determining class name "
+ _hdfsLogger->errLogMessage("PrintExceptionAndFree: error determining
class name "
"of exception.\n");
className = strdup("(unknown)");
destroyLocalReference(env, jthr);
@@ -193,18 +193,18 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc,
int noPrintFlags,
setTLSExceptionStrings(rootCause, stackTrace);
if (!noPrint) {
- vfprintf(stderr, fmt, ap);
- fprintf(stderr, " error:\n");
+ _hdfsLogger->vaErrLogMessage(fmt, ap);
+ _hdfsLogger->errLogMessage(" error:\n");
if (!rootCause) {
- fprintf(stderr, "(unable to get root cause for %s)\n", className);
+ _hdfsLogger->errLogMessage("(unable to get root cause for %s)\n",
className);
} else {
- fprintf(stderr, "%s", rootCause);
+ _hdfsLogger->errLogMessage("%s", rootCause);
}
if (!stackTrace) {
- fprintf(stderr, "(unable to get stack trace for %s)\n", className);
+ _hdfsLogger->errLogMessage("(unable to get stack trace for %s)\n",
className);
} else {
- fprintf(stderr, "%s", stackTrace);
+ _hdfsLogger->errLogMessage("%s", stackTrace);
}
}
@@ -235,9 +235,9 @@ int printPendingExceptionAndFree(JNIEnv *env, int
noPrintFlags,
exc = (*env)->ExceptionOccurred(env);
if (!exc) {
va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
+ _hdfsLogger->vaErrLogMessage(fmt, ap);
va_end(ap);
- fprintf(stderr, " error: (no exception)");
+ _hdfsLogger->errLogMessage(" error: (no exception)");
ret = 0;
} else {
(*env)->ExceptionClear(env);
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index b3ba27f8674..323275cb0d3 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -48,6 +48,28 @@
#define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
#define HDFS_FILE_SUPPORTS_DIRECT_PREAD (1<<1)
+void defaultErrLogMessage(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+}
+
+void defaultVaErrLogMessage(const char* fmt, va_list ap) {
+ vfprintf(stderr, fmt, ap);
+}
+
+struct hdfsLogger defaultLogger = {
+ .errLogMessage = defaultErrLogMessage,
+ .vaErrLogMessage = defaultVaErrLogMessage
+};
+
+struct hdfsLogger *_hdfsLogger = &defaultLogger;
+
+void hdfsSetLogger(struct hdfsLogger *hdfsLogger) {
+ _hdfsLogger = hdfsLogger;
+}
+
/**
* Reads bytes using the read(ByteBuffer) API. By using Java
* DirectByteBuffers we can avoid copying the bytes onto the Java heap.
@@ -727,7 +749,7 @@ static int calcEffectiveURI(struct hdfsBuilder *bld, char
** uri)
lastColon = strrchr(bld->nn, ':');
if (lastColon && (strspn(lastColon + 1, "0123456789") ==
strlen(lastColon + 1))) {
- fprintf(stderr, "port %d was given, but URI '%s' already "
+ _hdfsLogger->errLogMessage("port %d was given, but URI '%s'
already "
"contains a port!\n", bld->port, bld->nn);
return EINVAL;
}
@@ -737,7 +759,7 @@ static int calcEffectiveURI(struct hdfsBuilder *bld, char
** uri)
uriLen = strlen(scheme) + strlen(bld->nn) + strlen(suffix);
u = malloc((uriLen + 1) * (sizeof(char)));
if (!u) {
- fprintf(stderr, "calcEffectiveURI: out of memory");
+ _hdfsLogger->errLogMessage("calcEffectiveURI: out of memory");
return ENOMEM;
}
snprintf(u, uriLen + 1, "%s%s%s", scheme, bld->nn, suffix);
@@ -1290,18 +1312,18 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char
*path, int flags,
if (accmode == O_RDONLY || accmode == O_WRONLY) {
/* yay */
} else if (accmode == O_RDWR) {
- fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
+ _hdfsLogger->errLogMessage("ERROR: cannot open an hdfs file in O_RDWR
mode\n");
errno = ENOTSUP;
return NULL;
} else {
- fprintf(stderr, "ERROR: cannot open an hdfs file in mode 0x%x\n",
+ _hdfsLogger->errLogMessage("ERROR: cannot open an hdfs file in mode
0x%x\n",
accmode);
errno = EINVAL;
return NULL;
}
if ((flags & O_CREAT) && (flags & O_EXCL)) {
- fprintf(stderr,
+ _hdfsLogger->errLogMessage(
"WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
}
@@ -1410,7 +1432,7 @@ static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char
*path, int flags,
file = calloc(1, sizeof(struct hdfsFile_internal));
if (!file) {
- fprintf(stderr, "hdfsOpenFile(%s): OOM create hdfsFile\n", path);
+ _hdfsLogger->errLogMessage("hdfsOpenFile(%s): OOM create hdfsFile\n",
path);
ret = ENOMEM;
goto done;
}
@@ -1496,7 +1518,7 @@ hdfsOpenFileBuilder *hdfsOpenFileBuilderAlloc(hdfsFS fs,
hdfsOpenFileBuilder *builder;
builder = calloc(1, sizeof(hdfsOpenFileBuilder));
if (!builder) {
- fprintf(stderr, "hdfsOpenFileBuilderAlloc(%s): OOM when creating "
+ _hdfsLogger->errLogMessage("hdfsOpenFileBuilderAlloc(%s): OOM when
creating "
"hdfsOpenFileBuilder\n", path);
errno = ENOMEM;
goto done;
@@ -1682,7 +1704,7 @@ hdfsOpenFileFuture
*hdfsOpenFileBuilderBuild(hdfsOpenFileBuilder *builder) {
hdfsOpenFileFuture *future;
future = calloc(1, sizeof(hdfsOpenFileFuture));
if (!future) {
- fprintf(stderr, "hdfsOpenFileBuilderBuild: OOM when creating "
+ _hdfsLogger->errLogMessage("hdfsOpenFileBuilderBuild: OOM when
creating "
"hdfsOpenFileFuture\n");
errno = ENOMEM;
goto done;
@@ -1777,7 +1799,7 @@ static hdfsFile
fileFutureGetWithTimeout(hdfsOpenFileFuture *future,
file = calloc(1, sizeof(struct hdfsFile_internal));
if (!file) {
- fprintf(stderr, "hdfsOpenFileFutureGet(%s): OOM when creating "
+ _hdfsLogger->errLogMessage("hdfsOpenFileFutureGet(%s): OOM when
creating "
"hdfsFile\n", future->path);
ret = ENOMEM;
goto done;
@@ -2094,7 +2116,7 @@ static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
//Error checking... make sure that this file is 'readable'
if (f->type != HDFS_STREAM_INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot read from a non-InputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2272,7 +2294,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
//Error checking... make sure that this file is 'readable'
if (f->type != HDFS_STREAM_INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot read from a non-InputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2335,7 +2357,7 @@ tSize preadDirect(hdfsFS fs, hdfsFile f, tOffset
position, void* buffer,
//Error checking... make sure that this file is 'readable'
if (f->type != HDFS_STREAM_INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot read from a non-InputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2403,7 +2425,7 @@ int hdfsPreadFully(hdfsFS fs, hdfsFile f, tOffset
position,
//Error checking... make sure that this file is 'readable'
if (f->type != HDFS_STREAM_INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot read from a non-InputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2457,7 +2479,7 @@ int preadFullyDirect(hdfsFS fs, hdfsFile f, tOffset
position, void* buffer,
//Error checking... make sure that this file is 'readable'
if (f->type != HDFS_STREAM_INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot read from a non-InputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2514,7 +2536,7 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void*
buffer, tSize length)
//Error checking... make sure that this file is 'writable'
if (f->type != HDFS_STREAM_OUTPUT) {
- fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
+ _hdfsLogger->errLogMessage("Cannot write into a non-OutputStream
object!\n");
errno = EINVAL;
return -1;
}
@@ -2970,7 +2992,7 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer,
size_t bufferSize)
}
jPath = jVal.l;
if (!jPath) {
- fprintf(stderr, "hdfsGetWorkingDirectory: "
+ _hdfsLogger->errLogMessage("hdfsGetWorkingDirectory: "
"FileSystem#getWorkingDirectory returned NULL");
ret = -EIO;
goto done;
@@ -3580,7 +3602,7 @@ static int hadoopReadZeroExtractBuffer(JNIEnv *env,
}
mallocBuf = malloc(buffer->length);
if (!mallocBuf) {
- fprintf(stderr, "hadoopReadZeroExtractBuffer: failed to allocate %d
bytes of memory\n",
+ _hdfsLogger->errLogMessage("hadoopReadZeroExtractBuffer: failed to
allocate %d bytes of memory\n",
buffer->length);
ret = ENOMEM;
goto done;
@@ -3838,7 +3860,7 @@ hdfsGetHosts(hdfsFS fs, const char *path, tOffset start,
tOffset length)
}
jFileBlockHosts = jVal.l;
if (!jFileBlockHosts) {
- fprintf(stderr,
+ _hdfsLogger->errLogMessage(
"hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
"BlockLocation#getHosts returned NULL", path, start, length);
ret = EINTERNAL;
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index f88545f3ba3..a34d65be194 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -68,6 +68,20 @@
#ifdef __cplusplus
extern "C" {
#endif
+
+ /**
+ * hdfs logger
+ */
+ struct hdfsLogger {
+ void (*errLogMessage)(const char* fmt, ...);
+ void (*vaErrLogMessage)(const char* fmt, va_list ap);
+ };
+
+ extern struct hdfsLogger *_hdfsLogger;
+
+ LIBHDFS_EXTERNAL
+ void hdfsSetLogger(struct hdfsLogger *hdfsLogger);
+
/**
* Some utility decls used in libhdfs.
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]