HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb74f396
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb74f396
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb74f396

Branch: refs/heads/HDFS-6581
Commit: cb74f39697b57ec1189073fe128ce5ed3e7d73f0
Parents: ef784a2
Author: Allen Wittenauer <a...@apache.org>
Authored: Wed Sep 24 08:22:02 2014 -0700
Committer: Allen Wittenauer <a...@apache.org>
Committed: Wed Sep 24 08:22:02 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 ++
 .../hadoop-hdfs/src/CMakeLists.txt              | 17 +++++++---
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  |  2 +-
 .../main/native/libhdfs/test/test_libhdfs_ops.c | 22 ++++++-------
 .../src/main/native/libhdfs/test/vecsum.c       | 33 ++++++++++++++++++--
 .../main/native/libhdfs/test_libhdfs_threaded.c |  2 +-
 6 files changed, 58 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index af6c135..4a52d44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -935,6 +935,8 @@ Release 2.6.0 - UNRELEASED
     HDFS-7130. TestDataTransferKeepalive fails intermittently on Windows.
     (cnauroth)
 
+    HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw)
+
 Release 2.5.1 - 2014-09-05
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index 854988b..227be45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -211,11 +211,18 @@ if (NOT WIN32)
     add_executable(test_libhdfs_vecsum
         main/native/libhdfs/test/vecsum.c
     )
-    target_link_libraries(test_libhdfs_vecsum
-        hdfs
-        pthread
-        rt
-    )
+    if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        target_link_libraries(test_libhdfs_vecsum
+            hdfs
+            pthread
+        )
+    else (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        target_link_libraries(test_libhdfs_vecsum
+            hdfs
+            pthread
+            rt
+        )
+    endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
 endif(NOT WIN32)
 
 IF(REQUIRE_LIBWEBHDFS)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
index ebdcad3..dc8f39d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
@@ -3215,7 +3215,7 @@ static void hdfsFreeFileInfoEntry(hdfsFileInfo 
*hdfsFileInfo)
     free(hdfsFileInfo->mName);
     free(hdfsFileInfo->mOwner);
     free(hdfsFileInfo->mGroup);
-    memset(hdfsFileInfo, 0, sizeof(hdfsFileInfo));
+    memset(hdfsFileInfo, 0, sizeof(*hdfsFileInfo));
 }
 
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
index a6e1a13..aa9441a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
@@ -122,11 +122,11 @@ int main(int argc, char **argv) {
         currentPos = -1;
         if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
             fprintf(stderr, 
-                    "Failed to get current file position correctly! Got 
%ld!\n",
+                    "Failed to get current file position correctly! Got %" 
PRId64 "!\n",
                     currentPos);
             exit(-1);
         }
-        fprintf(stderr, "Current position: %ld\n", currentPos);
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (hdfsFlush(fs, writeFile)) {
             fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
@@ -177,11 +177,11 @@ int main(int argc, char **argv) {
         currentPos = -1;
         if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
             fprintf(stderr, 
-                    "Failed to get current file position correctly! Got 
%ld!\n", 
+                    "Failed to get current file position correctly! Got %" 
PRId64 "!\n",
                     currentPos);
             exit(-1);
         }
-        fprintf(stderr, "Current position: %ld\n", currentPos);
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
 
         if (!hdfsFileUsesDirectRead(readFile)) {
           fprintf(stderr, "Direct read support incorrectly not detected "
@@ -283,9 +283,9 @@ int main(int argc, char **argv) {
         fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = 
hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : 
"Failed!"));
         totalResult += (resp ? 0 : 1);
 
-        fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", 
hdfsGetDefaultBlockSize(fs));
-        fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
-        fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
+        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", 
hdfsGetDefaultBlockSize(fs));
+        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
+        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
 
         fileInfo = NULL;
         if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
@@ -293,8 +293,8 @@ int main(int argc, char **argv) {
             fprintf(stderr, "Name: %s, ", fileInfo->mName);
             fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
             fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
-            fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize);
-            fprintf(stderr, "Size: %ld, ", fileInfo->mSize);
+            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
+            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
             fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
             fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
             fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
@@ -312,8 +312,8 @@ int main(int argc, char **argv) {
                 fprintf(stderr, "Name: %s, ", fileList[i].mName);
                 fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                 fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
-                fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize);
-                fprintf(stderr, "Size: %ld, ", fileList[i].mSize);
+                fprintf(stderr, "BlockSize: %" PRId64 ", ", 
fileList[i].mBlockSize);
+                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
                 fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                 fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                 fprintf(stderr, "Group: %s, ", fileList[i].mGroup);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c
index fd18c9d..80a64b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c
@@ -29,6 +29,12 @@
 #include <time.h>
 #include <unistd.h>
 
+#ifdef __MACH__ // OS X does not have clock_gettime
+#include <mach/clock.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#endif
+
 #include "config.h"
 #include "hdfs.h"
 
@@ -49,6 +55,29 @@ struct stopwatch {
     struct timespec stop;
 };
 
+
+#ifdef __MACH__
+static int clock_gettime_mono(struct timespec * ts) {
+    static mach_timebase_info_data_t tb;
+    static uint64_t timestart = 0;
+    uint64_t t = 0;
+    if (timestart == 0) {
+        mach_timebase_info(&tb);
+        timestart = mach_absolute_time();
+    }
+    t = mach_absolute_time() - timestart;
+    t *= tb.numer;
+    t /= tb.denom;
+    ts->tv_sec = t / 1000000000ULL;
+    ts->tv_nsec = t - (ts->tv_sec * 1000000000ULL);
+    return 0;
+}
+#else
+static int clock_gettime_mono(struct timespec * ts) {
+    return clock_gettime(CLOCK_MONOTONIC, ts);
+}
+#endif
+
 static struct stopwatch *stopwatch_create(void)
 {
     struct stopwatch *watch;
@@ -58,7 +87,7 @@ static struct stopwatch *stopwatch_create(void)
         fprintf(stderr, "failed to allocate memory for stopwatch\n");
         goto error;
     }
-    if (clock_gettime(CLOCK_MONOTONIC, &watch->start)) {
+    if (clock_gettime_mono(&watch->start)) {
         int err = errno;
         fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
             "error %d (%s)\n", err, strerror(err));
@@ -76,7 +105,7 @@ static void stopwatch_stop(struct stopwatch *watch,
 {
     double elapsed, rate;
 
-    if (clock_gettime(CLOCK_MONOTONIC, &watch->stop)) {
+    if (clock_gettime_mono(&watch->stop)) {
         int err = errno;
         fprintf(stderr, "clock_gettime(CLOCK_MONOTONIC) failed with "
             "error %d (%s)\n", err, strerror(err));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb74f396/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
index 64c1a8f..016f0b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
@@ -84,7 +84,7 @@ static int hdfsSingleNameNodeConnect(struct 
NativeMiniDfsCluster *cl, hdfsFS *fs
 
 static int doTestGetDefaultBlockSize(hdfsFS fs, const char *path)
 {
-    uint64_t blockSize;
+    int64_t blockSize;
     int ret;
 
     blockSize = hdfsGetDefaultBlockSize(fs);

Reply via email to