Author: dhruba Date: Tue Jul 31 11:59:39 2007 New Revision: 561433 URL: http://svn.apache.org/viewvc?view=rev&rev=561433 Log: HADOOP-1551. libhdfs supports setting replication factor and retrieving modification time of files. (Sameer Paranjpye via dhruba) Merge -r 561432:561431 from trunk to 0.14.
Modified: lucene/hadoop/branches/branch-0.14/CHANGES.txt lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.c lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.h lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfsJniHelper.c lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs_test.c Modified: lucene/hadoop/branches/branch-0.14/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.14/CHANGES.txt?view=diff&rev=561433&r1=561432&r2=561433 ============================================================================== --- lucene/hadoop/branches/branch-0.14/CHANGES.txt (original) +++ lucene/hadoop/branches/branch-0.14/CHANGES.txt Tue Jul 31 11:59:39 2007 @@ -418,6 +418,9 @@ 140. HADOOP-1066. Restructure documentation to make more user friendly. (Connie Kleinjans and Jeff Hammerbacher via cutting) +141. HADOOP-1551. libhdfs supports setting replication factor and + retrieving modification time of files. (Sameer Paranjpye via dhruba) + Release 0.13.0 - 2007-06-08 Modified: lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.c URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.14/src/c%2B%2B/libhdfs/hdfs.c?view=diff&rev=561433&r1=561432&r2=561433 ============================================================================== --- lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.c (original) +++ lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.c Tue Jul 31 11:59:39 2007 @@ -28,6 +28,7 @@ #define HADOOP_DFS "org/apache/hadoop/dfs/DistributedFileSystem" #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream" #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream" +#define HADOOP_STAT "org/apache/hadoop/fs/FileStatus" #define JAVA_NET_ISA "java/net/InetSocketAddress" #define JAVA_NET_URI "java/net/URI" @@ -1076,6 +1077,42 @@ } +int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication) +{ + // JAVA EQUIVALENT: + // fs.setReplication(new Path(path), replication); + + //Get the JNIEnv* corresponding to current thread + JNIEnv* env = getJNIEnv(); + + jobject jFS = (jobject)fs; + + //Create an object of org.apache.hadoop.fs.Path + jobject jPath = constructNewObjectOfPath(env, path); + if (jPath == NULL) { + return -1; + } + + //Create the directory + jvalue jVal; + if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, + "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z", + jPath, replication) != 0) { + fprintf(stderr, "Call to org.apache.hadoop.fs.FileSystem::" + "setReplication failed!\n"); + errno = EINTERNAL; + goto done; + } + + done: + + //Delete unnecessary local references + destroyLocalReference(env, jPath); + + return (jVal.z) ? 0 : -1; +} + + char*** hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length) @@ -1287,8 +1324,8 @@ // fs.getLength(f) // f.getPath() - jboolean jIsDir; - jvalue jVal; + jobject jStat; + jvalue jVal; if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), @@ -1300,47 +1337,69 @@ } if (jVal.z == 0) { - errno = EINTERNAL; + errno = ENOENT; return -1; } if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, - "isDirectory", "(Lorg/apache/hadoop/fs/Path;)Z", + "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), jPath) != 0) { fprintf(stderr, "Call to org.apache.hadoop.fs." - "FileSystem::isDirectory failed!\n"); + "FileSystem::getFileStatus failed!\n"); errno = EINTERNAL; return -1; } - jIsDir = jVal.z; + jStat = jVal.l; - /* - jlong jModTime = 0; - if (invokeMethod(env, (RetVal*)&jModTime, &jException, INSTANCE, jFS, - "org/apache/hadoop/fs/FileSystem", "lastModified", - "(Lorg/apache/hadoop/fs/Path;)J", jPath) != 0) { - fprintf(stderr, - "Call to org.apache.hadoop.fs.FileSystem::lastModified failed!\n" - ); + if (invokeMethod(env, &jVal, INSTANCE, jStat, + HADOOP_STAT, "isDir", "()Z") != 0) { + fprintf(stderr, "Call to org.apache.hadoop.fs." + "FileStatus::isDir failed!\n"); errno = EINTERNAL; return -1; } - */ + fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile; - jlong jFileLength = 0; - if (!jIsDir) { - if (invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, - "getLength", "(Lorg/apache/hadoop/fs/Path;)J", - jPath) != 0) { + if (invokeMethod(env, &jVal, INSTANCE, jStat, + HADOOP_STAT, "getReplication", "()S") != 0) { + fprintf(stderr, "Call to org.apache.hadoop.fs." + "FileStatus::getReplication failed!\n"); + errno = EINTERNAL; + return -1; + } + fileInfo->mReplication = jVal.s; + + if (invokeMethod(env, &jVal, INSTANCE, jStat, + HADOOP_STAT, "getBlockSize", "()J") != 0) { + fprintf(stderr, "Call to org.apache.hadoop.fs." + "FileStatus::getBlockSize failed!\n"); + errno = EINTERNAL; + return -1; + } + fileInfo->mBlockSize = jVal.j; + + if (invokeMethod(env, &jVal, INSTANCE, jStat, + HADOOP_STAT, "getModificationTime", "()J") != 0) { + fprintf(stderr, "Call to org.apache.hadoop.fs." + "FileStatus::getModificationTime failed!\n"); + errno = EINTERNAL; + return -1; + } + fileInfo->mLastMod = (tTime) (jVal.j / 1000); + + if (fileInfo->mKind == kObjectKindFile) { + if (invokeMethod(env, &jVal, INSTANCE, jStat, + HADOOP_STAT, "getLen", "()J") != 0) { fprintf(stderr, "Call to org.apache.hadoop.fs." - "FileSystem::getLength failed!\n"); + "FileStatus::getLen failed!\n"); errno = EINTERNAL; return -1; } - jFileLength = jVal.j; + fileInfo->mSize = jVal.j; } - jstring jPathName; + jstring jPathName; + const char *cPathName; if (invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH, "toString", "()Ljava/lang/String;")) { fprintf(stderr, "Call to org.apache.hadoop.fs." @@ -1349,18 +1408,9 @@ return -1; } jPathName = jVal.l; - - fileInfo->mKind = (jIsDir ? kObjectKindDirectory : kObjectKindFile); - //fileInfo->mCreationTime = jModTime; - fileInfo->mSize = jFileLength; - - const char* cPathName = (const char*) - ((*env)->GetStringUTFChars(env, jPathName, NULL)); - + cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL)); fileInfo->mName = strdup(cPathName); - (*env)->ReleaseStringUTFChars(env, jPathName, cPathName); - destroyLocalReference(env, jPathName); return 0; Modified: lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.h URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.14/src/c%2B%2B/libhdfs/hdfs.h?view=diff&rev=561433&r1=561432&r2=561433 ============================================================================== --- lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.h (original) +++ lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs.h Tue Jul 31 11:59:39 2007 @@ -299,14 +299,25 @@ /** + * hdfsSetReplication - Set the replication of the specified + * file to the supplied value + * @param fs The configured filesystem handle. + * @param path The path of the file. + * @return Returns 0 on success, -1 on error. + */ + int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication); + + + /** * hdfsFileInfo - Information about a file/directory. */ typedef struct { tObjectKind mKind; /* file or directory */ char *mName; /* the name of the file */ - tTime mCreationTime; /* the creation time for the file*/ + tTime mLastMod; /* the last modification time for the file*/ tOffset mSize; /* the size of the file in bytes */ - int replicaCount; /* the count of replicas */ + short mReplication; /* the count of replicas */ + tOffset mBlockSize; /* the block size for the file */ } hdfsFileInfo; Modified: lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfsJniHelper.c URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.14/src/c%2B%2B/libhdfs/hdfsJniHelper.c?view=diff&rev=561433&r1=561432&r2=561433 ============================================================================== --- lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfsJniHelper.c (original) +++ lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfsJniHelper.c Tue Jul 31 11:59:39 2007 @@ -175,6 +175,17 @@ CHECK_EXCEPTION_IN_METH_INVOC retval->z = jbool; } + else if (returnType == JSHORT) { + jshort js = 0; + if (methType == STATIC) { + js = (*env)->CallStaticShortMethodV(env, cls, mid, args); + } + else if (methType == INSTANCE) { + js = (*env)->CallShortMethodV(env, instObj, mid, args); + } + CHECK_EXCEPTION_IN_METH_INVOC + retval->s = js; + } else if (returnType == JLONG) { jlong jl = -1; if (methType == STATIC) { Modified: lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs_test.c URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.14/src/c%2B%2B/libhdfs/hdfs_test.c?view=diff&rev=561433&r1=561432&r2=561433 ============================================================================== --- lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs_test.c (original) +++ lucene/hadoop/branches/branch-0.14/src/c++/libhdfs/hdfs_test.c Tue Jul 31 11:59:39 2007 @@ -120,9 +120,7 @@ //Generic file-system operations const char* srcPath = "/tmp/testfile.txt"; - const char* localSrcPath = "testfile.txt"; const char* dstPath = "/tmp/testfile2.txt"; - const char* localDstPath = "testfile2.txt"; fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!")); totalResult += result; @@ -143,13 +141,17 @@ fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!")); totalResult += result; + fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!")); + totalResult += result; + char buffer[256]; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); - totalResult += (result ? 0 : 1); + const char *resp; + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); + totalResult += (resp ? 0 : 1); fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!")); totalResult += result; - fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); - totalResult += (result ? 0 : 1); + fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!")); + totalResult += (resp ? 0 : 1); fprintf(stderr, "hdfsGetDefaultBlockSize: %Ld\n", hdfsGetDefaultBlockSize(fs)); fprintf(stderr, "hdfsGetCapacity: %Ld\n", hdfsGetCapacity(fs)); @@ -158,9 +160,12 @@ hdfsFileInfo *fileInfo = NULL; if(fileInfo = hdfsGetPathInfo(fs, slashTmp)) { fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n"); - fprintf(stderr, "Name: %s,", fileInfo->mName); - fprintf(stderr, "Type: %c,", (char)fileInfo->mKind); - fprintf(stderr, "Size: %ld\n", fileInfo->mSize); + fprintf(stderr, "Name: %s, ", fileInfo->mName); + fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind)); + fprintf(stderr, "Replication: %d, ", fileInfo->mReplication); + fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize); + fprintf(stderr, "Size: %ld, ", fileInfo->mSize); + fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); hdfsFreeFileInfo(fileInfo, 1); } else { totalResult++; @@ -172,9 +177,12 @@ if(fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) { int i = 0; for(i=0; i < numEntries; ++i) { - fprintf(stderr, "Name: %s,", fileList[i].mName); - fprintf(stderr, "Type: %c,", (char)fileList[i].mKind); - fprintf(stderr, "Size: %ld\n", fileList[i].mSize); + fprintf(stderr, "Name: %s, ", fileList[i].mName); + fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind); + fprintf(stderr, "Replication: %d, ", fileList[i].mReplication); + fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize); + fprintf(stderr, "Size: %ld, ", fileList[i].mSize); + fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod)); } hdfsFreeFileInfo(fileList, numEntries); } else {