Author: zshao
Date: Thu Sep 18 10:44:45 2008
New Revision: 696741
URL: http://svn.apache.org/viewvc?rev=696741&view=rev
Log:
HADOOP-4104. libhdfs: add time, permission and user attribute support. (Pete
Wyckoff through zshao)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/c++/libhdfs/hdfs.c
hadoop/core/trunk/src/c++/libhdfs/hdfs.h
hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c
hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h
hadoop/core/trunk/src/c++/libhdfs/hdfs_test.c
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu Sep 18 10:44:45 2008
@@ -177,7 +177,10 @@
IMPROVEMENTS
- HADOOP-3908. Fuse-dfs: better error message if llibhdfs.so doesn't exist.
+ HADOOP-4104. libhdfs: add time, permission and user attribute support.
+ (Pete Wyckoff through zshao)
+
+ HADOOP-3908. libhdfs: better error message if llibhdfs.so doesn't exist.
(Pete Wyckoff through zshao)
HADOOP-3732. Delay intialization of datanode block verification till
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfs.c
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfs.c?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfs.c Thu Sep 18 10:44:45 2008
@@ -30,16 +30,21 @@
#define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
#define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
#define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
+#define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
+#define HADOOP_UNIX_USER_GROUP_INFO
"org/apache/hadoop/security/UnixUserGroupInformation"
+#define HADOOP_USER_GROUP_INFO
"org/apache/hadoop/security/UserGroupInformation"
#define JAVA_NET_ISA "java/net/InetSocketAddress"
#define JAVA_NET_URI "java/net/URI"
+#define JAVA_STRING "java/lang/String"
-
+#define JAVA_VOID "V"
/* Macros for constructing method signatures */
#define JPARAM(X) "L" X ";"
#define JARRPARAM(X) "[L" X ";"
#define JMETHOD1(X, R) "(" X ")" R
#define JMETHOD2(X, Y, R) "(" X Y ")" R
+#define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
/**
@@ -148,7 +153,16 @@
return errnum;
}
-hdfsFS hdfsConnect(const char* host, tPort port)
+
+
+
+hdfsFS hdfsConnect(const char* host, tPort port) {
+ // conect with NULL as user name/groups
+ return hdfsConnectAsUser(host, port, NULL, NULL, 0);
+}
+
+
+hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user ,
const char **groups, int groups_size )
{
// JAVA EQUIVALENT:
// FileSystem fs = FileSystem.get(new Configuration());
@@ -183,6 +197,82 @@
return NULL;
}
+ if (user != NULL) {
+
+ if (groups == NULL || groups_size <= 0) {
+ fprintf(stderr, "ERROR: groups must not be empty/null\n");
+ errno = EINVAL;
+ return NULL;
+ }
+
+ jstring jUserString = (*env)->NewStringUTF(env, user);
+ jarray jGroups = constructNewArrayString(env, &jExc, groups,
groups_size);
+ if (jGroups == NULL) {
+ errno = EINTERNAL;
+ fprintf(stderr, "ERROR: could not construct groups array\n");
+ return NULL;
+ }
+
+ jobject jUgi;
+ if ((jUgi = constructNewObjectOfClass(env, &jExc,
HADOOP_UNIX_USER_GROUP_INFO, JMETHOD2(JPARAM(JAVA_STRING),
JARRPARAM(JAVA_STRING), JAVA_VOID), jUserString, jGroups)) == NULL) {
+ fprintf(stderr,"failed to construct hadoop user unix group info
object\n");
+ errno = errnoFromException(jExc, env, HADOOP_UNIX_USER_GROUP_INFO,
+ "init");
+ destroyLocalReference(env, jConfiguration);
+ destroyLocalReference(env, jUserString);
+ if (jGroups != NULL) {
+ destroyLocalReference(env, jGroups);
+ }
+ return NULL;
+ }
+#define USE_UUGI
+#ifdef USE_UUGI
+
+ // UnixUserGroupInformation.UGI_PROPERTY_NAME
+ jstring jAttrString = (*env)->NewStringUTF(env,"hadoop.job.ugi");
+
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
HADOOP_UNIX_USER_GROUP_INFO, "saveToConf",
+ JMETHOD3(JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
JPARAM(HADOOP_UNIX_USER_GROUP_INFO), JAVA_VOID),
+ jConfiguration, jAttrString, jUgi) != 0) {
+ errno = errnoFromException(jExc, env, HADOOP_FSPERM,
+ "init");
+ destroyLocalReference(env, jConfiguration);
+ destroyLocalReference(env, jUserString);
+ if (jGroups != NULL) {
+ destroyLocalReference(env, jGroups);
+ }
+ destroyLocalReference(env, jUgi);
+ return NULL;
+ }
+
+ destroyLocalReference(env, jUserString);
+ destroyLocalReference(env, jGroups);
+ destroyLocalReference(env, jUgi);
+ }
+#else
+
+ // what does "current" mean in the context of libhdfs ? does it mean for
the last hdfs connection we used?
+ // that's why this code cannot be activated. We know the above use of the
conf object should work well with
+ // multiple connections.
+ if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
HADOOP_USER_GROUP_INFO, "setCurrentUGI",
+ JMETHOD1(JPARAM(HADOOP_USER_GROUP_INFO), JAVA_VOID),
+ jUgi) != 0) {
+ errno = errnoFromException(jExc, env, HADOOP_USER_GROUP_INFO,
+ "setCurrentUGI");
+ destroyLocalReference(env, jConfiguration);
+ destroyLocalReference(env, jUserString);
+ if (jGroups != NULL) {
+ destroyLocalReference(env, jGroups);
+ }
+ destroyLocalReference(env, jUgi);
+ return NULL;
+ }
+
+ destroyLocalReference(env, jUserString);
+ destroyLocalReference(env, jGroups);
+ destroyLocalReference(env, jUgi);
+ }
+#endif
//Check what type of FileSystem the caller wants...
if (host == NULL) {
// fs = FileSytem::getLocal(conf);
@@ -314,7 +404,7 @@
jobject jFS = (jobject)fs;
- if(flags & O_RDWR) {
+ if (flags & O_RDWR) {
fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
errno = ENOTSUP;
return NULL;
@@ -1261,6 +1351,145 @@
return (jVal.z) ? 0 : -1;
}
+int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char
*group)
+{
+ // JAVA EQUIVALENT:
+ // fs.setOwner(path, owner, group)
+
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
+
+ if (owner == NULL && group == NULL) {
+ fprintf(stderr, "Both owner and group cannot be null in chown");
+ errno = EINVAL;
+ return -1;
+ }
+
+ jobject jFS = (jobject)fs;
+
+ jobject jPath = constructNewObjectOfPath(env, path);
+ if (jPath == NULL) {
+ return -1;
+ }
+
+ jstring jOwnerString = (*env)->NewStringUTF(env, owner);
+ jstring jGroupString = (*env)->NewStringUTF(env, group);
+
+ //Create the directory
+ int ret = 0;
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
+ "setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
+ jPath, jOwnerString, jGroupString) != 0) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::setOwner");
+ ret = -1;
+ goto done;
+ }
+
+ done:
+ destroyLocalReference(env, jPath);
+ destroyLocalReference(env, jOwnerString);
+ destroyLocalReference(env, jGroupString);
+
+ return ret;
+}
+
+int hdfsChmod(hdfsFS fs, const char* path, short mode)
+{
+ // JAVA EQUIVALENT:
+ // fs.setPermission(path, FsPermission)
+
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
+
+ jobject jFS = (jobject)fs;
+
+ // construct jPerm = FsPermission.createImmutable(short mode);
+
+ jshort jmode = mode;
+
+ jobject jPermObj =
+ constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
+ if (jPermObj == NULL) {
+ return -2;
+ }
+
+ //Create an object of org.apache.hadoop.fs.Path
+ jobject jPath = constructNewObjectOfPath(env, path);
+ if (jPath == NULL) {
+ return -3;
+ }
+
+ //Create the directory
+ int ret = 0;
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
+ "setPermission", JMETHOD2(JPARAM(HADOOP_PATH),
JPARAM(HADOOP_FSPERM), JAVA_VOID),
+ jPath, jPermObj) != 0) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::setPermission");
+ ret = -1;
+ goto done;
+ }
+
+ done:
+ destroyLocalReference(env, jPath);
+ destroyLocalReference(env, jPermObj);
+
+ return ret;
+}
+
+int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
+{
+ // JAVA EQUIVALENT:
+ // fs.setTimes(src, mtime, atime)
+
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
+
+ jobject jFS = (jobject)fs;
+
+ //Create an object of org.apache.hadoop.fs.Path
+ jobject jPath = constructNewObjectOfPath(env, path);
+ if (jPath == NULL) {
+ fprintf(stderr, "could not construct path object\n");
+ return -2;
+ }
+
+ jlong jmtime = mtime * 1000;
+ jlong jatime = atime * 1000;
+
+ int ret = 0;
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
+ "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J",
JAVA_VOID),
+ jPath, jmtime, jatime) != 0) {
+ fprintf(stderr, "call to setTime failed\n");
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::setTimes");
+ ret = -1;
+ goto done;
+ }
+
+ done:
+ destroyLocalReference(env, jPath);
+ return ret;
+}
+
+
char***
@@ -1546,7 +1775,16 @@
"FileStatus::getModificationTime");
return -1;
}
- fileInfo->mLastMod = (tTime) (jVal.j / 1000);
+ fileInfo->mLastMod = (tTime) (jVal.j) / 1000;
+
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
+ HADOOP_STAT, "getAccessTime", "()J") != 0) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileStatus::getAccessTime");
+ return -1;
+ }
+ fileInfo->mLastAccess = (tTime) (jVal.j) / 1000;
+
if (fileInfo->mKind == kObjectKindFile) {
if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
@@ -1583,6 +1821,55 @@
(*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
destroyLocalReference(env, jPath);
destroyLocalReference(env, jPathName);
+ jstring jUserName;
+ const char* cUserName;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
+ "getOwner", "()Ljava/lang/String;")) {
+ fprintf(stderr, "Call to org.apache.hadoop.fs."
+ "FileStatus::getOwner failed!\n");
+ errno = EINTERNAL;
+ return -1;
+ }
+ jUserName = jVal.l;
+ cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName,
NULL));
+ fileInfo->mOwner = strdup(cUserName);
+ (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
+ destroyLocalReference(env, jUserName);
+
+ jstring jGroupName;
+ const char* cGroupName;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
+ "getGroup", "()Ljava/lang/String;")) {
+ fprintf(stderr, "Call to org.apache.hadoop.fs."
+ "FileStatus::getGroup failed!\n");
+ errno = EINTERNAL;
+ return -1;
+ }
+ jGroupName = jVal.l;
+ cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName,
NULL));
+ fileInfo->mGroup = strdup(cGroupName);
+ (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
+ destroyLocalReference(env, jGroupName);
+
+ jobject jPermission;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
+ "getPermission",
"()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
+ jVal.l == NULL) {
+ fprintf(stderr, "Call to org.apache.hadoop.fs."
+ "FileStatus::getPermission failed!\n");
+ errno = EINTERNAL;
+ return -1;
+ }
+ jPermission = jVal.l;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
+ "toShort", "()S") != 0) {
+ fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
+ "FsPermission::toShort failed!\n");
+ errno = EINTERNAL;
+ return -1;
+ }
+ fileInfo->mPermissions = jVal.s;
+ destroyLocalReference(env, jPermission);
return 0;
}
@@ -1592,9 +1879,13 @@
{
// JAVA EQUIVALENT:
// fs.isDirectory(f)
- // fs.lastModified() ??
+ // fs.getModificationTime()
+ // fs.getAccessTime()
// fs.getLength(f)
// f.getPath()
+ // f.getOwner()
+ // f.getGroup()
+ // f.getPermission().toShort()
jobject jStat;
jvalue jVal;
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfs.h
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfs.h?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfs.h (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfs.h Thu Sep 18 10:44:45 2008
@@ -55,7 +55,7 @@
*/
typedef int32_t tSize; /// size of data for read/write io ops
- typedef time_t tTime; /// time type
+ typedef time_t tTime; /// time type in seconds
typedef int64_t tOffset;/// offset within the file
typedef uint16_t tPort; /// port
typedef enum tObjectKind {
@@ -92,6 +92,22 @@
/**
+ * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
+ * Connect to the hdfs.
+ * @param host A string containing either a host name, or an ip address
+ * of the namenode of a hdfs cluster. 'host' should be passed as NULL if
+ * you want to connect to local filesystem. 'host' should be passed as
+ * 'default' (and port as 0) to used the 'configured' filesystem
+ * (hadoop-site/hadoop-default.xml).
+ * @param port The port on which the server is listening.
+ * @param user the user name (this is hadoop domain user). Or NULL is
equivelant to hhdfsConnect(host, port)
+ * @param groups the groups (these are hadoop domain groups)
+ * @return Returns a handle to the filesystem or NULL on error.
+ */
+ hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user ,
const char *groups[], int groups_size );
+
+
+ /**
* hdfsConnect - Connect to a hdfs file system.
* Connect to the hdfs.
* @param host A string containing either a host name, or an ip address
@@ -102,7 +118,7 @@
* @param port The port on which the server is listening.
* @return Returns a handle to the filesystem or NULL on error.
*/
- hdfsFS hdfsConnect(const char* host, tPort port);
+ hdfsFS hdfsConnect(const char* host, tPort port);
/**
@@ -314,10 +330,14 @@
typedef struct {
tObjectKind mKind; /* file or directory */
char *mName; /* the name of the file */
- tTime mLastMod; /* the last modification time for the file*/
+ tTime mLastMod; /* the last modification time for the file in
seconds */
tOffset mSize; /* the size of the file in bytes */
short mReplication; /* the count of replicas */
tOffset mBlockSize; /* the block size for the file */
+ char *mOwner; /* the owner of the file */
+ char *mGroup; /* the group associated with the file */
+ short mPermissions; /* the permissions associated with the file */
+ tTime mLastAccess; /* the last access time for the file in seconds
*/
} hdfsFileInfo;
@@ -402,6 +422,35 @@
* @return Returns the total-size; -1 on error.
*/
tOffset hdfsGetUsed(hdfsFS fs);
+
+ /**
+ * hdfsChown
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file or directory
+ * @param owner this is a string in Hadoop land. Set to null or "" if only
setting group
+ * @param group this is a string in Hadoop land. Set to null or "" if
only setting user
+ * @return 0 on success else -1
+ */
+ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char
*group);
+
+ /**
+ * hdfsChmod
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file or directory
+ * @param mode the bitmask to set it to
+ * @return 0 on success else -1
+ */
+ int hdfsChmod(hdfsFS fs, const char* path, short mode);
+
+ /**
+ * hdfsUtime
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file or directory
+ * @param mtime new modification time or 0 for only set access time in
seconds
+ * @param atime new access time or 0 for only set modification time in
seconds
+ * @return 0 on success else -1
+ */
+ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
#ifdef __cplusplus
}
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfsJniHelper.c?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.c Thu Sep 18 10:44:45 2008
@@ -80,7 +80,7 @@
if (key == NULL || data == NULL) {
return 0;
}
- if(! hashTableInit()) {
+ if (! hashTableInit()) {
return -1;
}
e.data = data;
@@ -218,6 +218,30 @@
return 0;
}
+jarray constructNewArrayString(JNIEnv *env, Exc *exc, const char **elements,
int size) {
+ const char *className = "Ljava/lang/String;";
+ jobjectArray result;
+ int i;
+ jclass arrCls = (*env)->FindClass(env, className);
+ if (arrCls == NULL) {
+ fprintf(stderr, "could not find class %s\n",className);
+ return NULL; /* exception thrown */
+ }
+ result = (*env)->NewObjectArray(env, size, arrCls,
+ NULL);
+ if (result == NULL) {
+ fprintf(stderr, "ERROR: could not construct new array\n");
+ return NULL; /* out of memory error thrown */
+ }
+ for (i = 0; i < size; i++) {
+ jstring jelem = (*env)->NewStringUTF(env,elements[i]);
+ if (jelem == NULL) {
+ fprintf(stderr, "ERROR: jelem == NULL\n");
+ }
+ (*env)->SetObjectArrayElement(env, result, i, jelem);
+ }
+ return result;
+}
jobject constructNewObjectOfClass(JNIEnv *env, Exc *exc, const char
*className,
const char *ctorSignature, ...)
@@ -262,11 +286,13 @@
{
jclass cls = globalClassReference(className, env);
if (cls == NULL) {
+ fprintf(stderr, "could not find class %s\n", className);
return NULL;
}
jmethodID mid = 0;
if (!validateMethodType(methType)) {
+ fprintf(stderr, "invalid method type\n");
return NULL;
}
@@ -276,6 +302,9 @@
else if (methType == INSTANCE) {
mid = (*env)->GetMethodID(env, cls, methName, methSignature);
}
+ if (mid == NULL) {
+ fprintf(stderr, "could not find method %s from class %s with signature
%s\n",methName, className, methSignature);
+ }
return mid;
}
@@ -389,7 +418,7 @@
if (hadoopJvmArgs != NULL) {
char *result = NULL;
result = strtok( hadoopJvmArgs, jvmArgDelims );
- while( result != NULL ) {
+ while ( result != NULL ) {
noArgs++;
result = strtok( NULL, jvmArgDelims);
}
@@ -401,7 +430,7 @@
char *result = NULL;
result = strtok( hadoopJvmArgs, jvmArgDelims );
int argNum = 1;
- for(;argNum < noArgs ; argNum++) {
+ for (;argNum < noArgs ; argNum++) {
options[argNum].optionString = result; //optHadoopArg;
}
}
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfsJniHelper.h?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfsJniHelper.h Thu Sep 18 10:44:45 2008
@@ -101,6 +101,8 @@
* */
JNIEnv* getJNIEnv(void);
+jarray constructNewArrayString(JNIEnv *env, Exc *exc, const char **elements,
int size) ;
+
#endif /*LIBHDFS_JNI_HELPER_H*/
/**
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfs_test.c
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfs_test.c?rev=696741&r1=696740&r2=696741&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfs_test.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfs_test.c Thu Sep 18 10:44:45 2008
@@ -18,6 +18,38 @@
#include "hdfs.h"
+void permission_disp(short permissions, char *rtr) {
+ rtr[9] = '\0';
+ int i;
+ for(i=2;i>=0;i--)
+ {
+ short permissionsId = permissions >> (i * 3) & (short)7;
+ char* perm;
+ switch(permissionsId) {
+ case 7:
+ perm = "rwx"; break;
+ case 6:
+ perm = "rw-"; break;
+ case 5:
+ perm = "r-x"; break;
+ case 4:
+ perm = "r--"; break;
+ case 3:
+ perm = "-wx"; break;
+ case 2:
+ perm = "-w-"; break;
+ case 1:
+ perm = "--x"; break;
+ case 0:
+ perm = "---"; break;
+ default:
+ perm = "???";
+ }
+ strncpy(rtr, perm, 3);
+ rtr+=3;
+ }
+}
+
int main(int argc, char **argv) {
hdfsFS fs = hdfsConnect("default", 0);
@@ -32,10 +64,10 @@
exit(-1);
}
+ const char* writePath = "/tmp/testfile.txt";
{
//Write tests
- const char* writePath = "/tmp/testfile.txt";
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0,
0, 0);
if(!writeFile) {
@@ -51,7 +83,7 @@
tOffset currentPos = -1;
if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
fprintf(stderr,
- "Failed to get current file position correctly! Got %d!\n",
+ "Failed to get current file position correctly! Got
%ld!\n",
currentPos);
exit(-1);
}
@@ -94,7 +126,7 @@
tOffset currentPos = -1;
if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
fprintf(stderr,
- "Failed to get current file position correctly! Got
%d!\n",
+ "Failed to get current file position correctly! Got
%ld!\n",
currentPos);
exit(-1);
}
@@ -153,12 +185,12 @@
fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp =
hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
totalResult += (resp ? 0 : 1);
- fprintf(stderr, "hdfsGetDefaultBlockSize: %Ld\n",
hdfsGetDefaultBlockSize(fs));
- fprintf(stderr, "hdfsGetCapacity: %Ld\n", hdfsGetCapacity(fs));
- fprintf(stderr, "hdfsGetUsed: %Ld\n", hdfsGetUsed(fs));
+ fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n",
hdfsGetDefaultBlockSize(fs));
+ fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
+ fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
hdfsFileInfo *fileInfo = NULL;
- if(fileInfo = hdfsGetPathInfo(fs, slashTmp)) {
+ if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
fprintf(stderr, "Name: %s, ", fileInfo->mName);
fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
@@ -166,6 +198,11 @@
fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize);
fprintf(stderr, "Size: %ld, ", fileInfo->mSize);
fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
+ fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
+ fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
+ char permissions[10];
+ permission_disp(fileInfo->mPermissions, permissions);
+ fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions,
permissions);
hdfsFreeFileInfo(fileInfo, 1);
} else {
totalResult++;
@@ -174,7 +211,7 @@
hdfsFileInfo *fileList = 0;
int numEntries = 0;
- if(fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) {
+ if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
int i = 0;
for(i=0; i < numEntries; ++i) {
fprintf(stderr, "Name: %s, ", fileList[i].mName);
@@ -183,6 +220,11 @@
fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize);
fprintf(stderr, "Size: %ld, ", fileList[i].mSize);
fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
+ fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
+ fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
+ char permissions[10];
+ permission_disp(fileList[i].mPermissions, permissions);
+ fprintf(stderr, "Permissions: %d (%s)\n",
fileList[i].mPermissions, permissions);
}
hdfsFreeFileInfo(fileList, numEntries);
} else {
@@ -211,7 +253,57 @@
totalResult++;
fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
}
-
+
+ char *newOwner = "root";
+ // setting tmp dir to 777 so later when connectAsUser nobody, we can
write to it
+ short newPerm = 0666;
+
+ // chown write
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath,
NULL, "users")) ? "Failed!" : "Success!"));
+ totalResult += result;
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath,
newOwner, NULL)) ? "Failed!" : "Success!"));
+ totalResult += result;
+ // chmod write
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath,
newPerm)) ? "Failed!" : "Success!"));
+ totalResult += result;
+
+
+
+ sleep(2);
+ tTime newMtime = time(NULL);
+ tTime newAtime = time(NULL);
+
+ // utime write
+ fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath,
newMtime, newAtime)) ? "Failed!" : "Success!"));
+
+ totalResult += result;
+
+ // chown/chmod/utime read
+ hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+
+ fprintf(stderr, "hdfsChown read: %s\n", ((result =
(strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+ totalResult += result;
+
+ fprintf(stderr, "hdfsChmod read: %s\n", ((result =
(finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+ totalResult += result;
+
+ // will later use /tmp/ as a different user so enable it
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/",
0777)) ? "Failed!" : "Success!"));
+ totalResult += result;
+
+ fprintf(stderr,"newMTime=%ld\n",newMtime);
+ fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
+
+
+ fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result =
(finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
+ totalResult += result;
+
+ // No easy way to turn on access times from hdfs_test right now
+ // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result =
(finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
+ // totalResult += result;
+
+ hdfsFreeFileInfo(finfo, 1);
+
// Clean up
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs,
newDirectory)) ? "Failed!" : "Success!"));
totalResult += result;
@@ -225,6 +317,54 @@
totalResult += (result ? 0 : 1);
}
+
+ totalResult += (hdfsDisconnect(fs) != 0);
+
+ {
+ //
+ // Now test as connecting as a specific user
+ // This is only meant to test that we connected as that user, not to test
+ // the actual fs user capabilities. Thus just create a file and read
+ // the owner is correct.
+
+ const char *tuser = "nobody";
+ const char* writePath = "/tmp/usertestfile.txt";
+ const char **groups = (const char**)malloc(sizeof(char*)* 2);
+ groups[0] = "users";
+ groups[1] = "nobody";
+
+ fs = hdfsConnectAsUser("default", 0, tuser, groups, 2);
+ if(!fs) {
+ fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+ exit(-1);
+ }
+
+ hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0,
0, 0);
+ if(!writeFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ exit(-1);
+ }
+ fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+
+ char* buffer = "Hello, World!";
+ tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer,
strlen(buffer)+1);
+ fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+ if (hdfsFlush(fs, writeFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ exit(-1);
+ }
+ fprintf(stderr, "Flushed %s successfully!\n", writePath);
+
+ hdfsCloseFile(fs, writeFile);
+
+ hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+ fprintf(stderr, "hdfs new file user is correct: %s\n", ((result =
(strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+ totalResult += result;
+ }
+
+ totalResult += (hdfsDisconnect(fs) != 0);
+
if (totalResult != 0) {
return -1;
} else {