Author: dhruba
Date: Fri Sep 5 13:44:19 2008
New Revision: 692542
URL: http://svn.apache.org/viewvc?rev=692542&view=rev
Log:
HADOOP-1869. Support access times for HDFS files. (dhruba)
Added:
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestSetTimes.java
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/conf/hadoop-default.xml
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileStatus.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/HarFileSystem.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Sep 5 13:44:19 2008
@@ -130,6 +130,8 @@
HADOOP-3698. Add access control to control who is allowed to submit or
modify jobs in the JobTracker. (Hemanth Yamijala via omalley)
+ HADOOP-1869. Support access times for HDFS files. (dhruba)
+
IMPROVEMENTS
HADOOP-3908. Fuse-dfs: better error message if llibhdfs.so doesn't exist.
Modified: hadoop/core/trunk/conf/hadoop-default.xml
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/conf/hadoop-default.xml?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/conf/hadoop-default.xml (original)
+++ hadoop/core/trunk/conf/hadoop-default.xml Fri Sep 5 13:44:19 2008
@@ -578,6 +578,15 @@
</property>
<property>
+ <name>dfs.access.time.precision</name>
+ <value>3600000</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
<name>fs.s3.block.size</name>
<value>67108864</value>
<description>Block size to use when writing files to S3.</description>
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileStatus.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileStatus.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileStatus.java Fri Sep 5
13:44:19 2008
@@ -35,22 +35,23 @@
private short block_replication;
private long blocksize;
private long modification_time;
+ private long access_time;
private FsPermission permission;
private String owner;
private String group;
- public FileStatus() { this(0, false, 0, 0, 0, null, null, null, null); }
+ public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
//We should deprecate this soon?
public FileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, Path path) {
this(length, isdir, block_replication, blocksize, modification_time,
- null, null, null, path);
+ 0, null, null, null, path);
}
public FileStatus(long length, boolean isdir, int block_replication,
- long blocksize, long modification_time,
+ long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
Path path) {
this.length = length;
@@ -58,6 +59,7 @@
this.block_replication = (short)block_replication;
this.blocksize = blocksize;
this.modification_time = modification_time;
+ this.access_time = access_time;
this.permission = (permission == null) ?
FsPermission.getDefault() : permission;
this.owner = (owner == null) ? "" : owner;
@@ -105,6 +107,14 @@
}
/**
+ * Get the access time of the file.
+ * @return the access time of file in milliseconds since January 1, 1970 UTC.
+ */
+ public long getAccessTime() {
+ return access_time;
+ }
+
+ /**
* Get FsPermission associated with the file.
* @return permssion. If a filesystem does not have a notion of permissions
* or if permissions could not be determined, then default
@@ -166,7 +176,7 @@
protected void setGroup(String group) {
this.group = (group == null) ? "" : group;
}
-
+
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
@@ -177,6 +187,7 @@
out.writeShort(block_replication);
out.writeLong(blocksize);
out.writeLong(modification_time);
+ out.writeLong(access_time);
permission.write(out);
Text.writeString(out, owner);
Text.writeString(out, group);
@@ -190,6 +201,7 @@
this.block_replication = in.readShort();
blocksize = in.readLong();
modification_time = in.readLong();
+ access_time = in.readLong();
permission.readFields(in);
owner = Text.readString(in);
group = Text.readString(in);
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java Fri Sep 5
13:44:19 2008
@@ -1329,6 +1329,20 @@
) throws IOException {
}
+ /**
+ * Set access time of a file
+ * @param p The path
+ * @param mtime Set the modification time of this file.
+ * The number of milliseconds since Jan 1, 1970.
+ * A value of -1 means that this call should not set
modification time.
+ * @param atime Set the access time of this file.
+ * The number of milliseconds since Jan 1, 1970.
+ * A value of -1 means that this call should not set access
time.
+ */
+ public void setTimes(Path p, long mtime, long atime
+ ) throws IOException {
+ }
+
private static FileSystem createFileSystem(URI uri, Configuration conf
) throws IOException {
Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/HarFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/HarFileSystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/HarFileSystem.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/HarFileSystem.java Fri Sep
5 13:44:19 2008
@@ -535,7 +535,8 @@
hstatus = new HarStatus(readStr);
return new FileStatus(hstatus.isDir()?0:hstatus.getLength(),
hstatus.isDir(),
(int)archiveStatus.getReplication(), archiveStatus.getBlockSize(),
- archiveStatus.getModificationTime(), new FsPermission(
+ archiveStatus.getModificationTime(), archiveStatus.getAccessTime(),
+ new FsPermission(
archiveStatus.getPermission()), archiveStatus.getOwner(),
archiveStatus.getGroup(),
makeRelative(this.uri.toString(), new Path(hstatus.name)));
@@ -640,7 +641,7 @@
statuses.add(new FileStatus(hstatus.getLength(),
hstatus.isDir(),
archiveStatus.getReplication(), archiveStatus.getBlockSize(),
- archiveStatus.getModificationTime(),
+ archiveStatus.getModificationTime(), archiveStatus.getAccessTime(),
new FsPermission(archiveStatus.getPermission()),
archiveStatus.getOwner(), archiveStatus.getGroup(),
makeRelative(this.uri.toString(), new Path(hstatus.name))));
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/ftp/FTPFileSystem.java Fri
Sep 5 13:44:19 2008
@@ -438,12 +438,13 @@
// block sizes on server. The assumption could be less than ideal.
long blockSize = DEFAULT_BLOCK_SIZE;
long modTime = ftpFile.getTimestamp().getTimeInMillis();
+ long accessTime = 0;
FsPermission permission = getPermissions(ftpFile);
String user = ftpFile.getUser();
String group = ftpFile.getGroup();
Path filePath = new Path(parentPath, ftpFile.getName());
return new FileStatus(length, isDir, blockReplication, blockSize, modTime,
- permission, user, group, filePath.makeQualified(this));
+ accessTime, permission, user, group, filePath.makeQualified(this));
}
@Override
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Fri Sep 5
13:44:19 2008
@@ -754,6 +754,21 @@
QuotaExceededException.class);
}
}
+
+ /**
+ * set the modification and access time of a file
+ * @throws FileNotFoundException if the path is not a file
+ */
+ public void setTimes(String src, long mtime, long atime) throws IOException {
+ checkOpen();
+ try {
+ namenode.setTimes(src, mtime, atime);
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class);
+ }
+ }
+
/**
* Pick the best node from which to stream the data.
* Entries in <i>nodes</i> are already in the priority order
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Fri Sep 5 13:44:19 2008
@@ -228,6 +228,7 @@
private FileStatus makeQualified(FileStatus f) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(),
+ f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.getPath().makeQualified(this)); // fully-qualify path
}
@@ -419,4 +420,10 @@
}
dfs.setOwner(getPathName(p), username, groupname);
}
+
+ /** [EMAIL PROTECTED] }*/
+ public void setTimes(Path p, long mtime, long atime
+ ) throws IOException {
+ dfs.setTimes(getPathName(p), mtime, atime);
+ }
}
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/HftpFileSystem.java Fri
Sep 5 13:44:19 2008
@@ -159,20 +159,22 @@
throw new SAXException("Unrecognized entry: " + qname);
}
long modif;
+ long atime = 0;
try {
modif = df.parse(attrs.getValue("modified")).getTime();
+ atime = df.parse(attrs.getValue("accesstime")).getTime();
} catch (ParseException e) { throw new SAXException(e); }
FileStatus fs = "file".equals(qname)
? new FileStatus(
Long.valueOf(attrs.getValue("size")).longValue(), false,
Short.valueOf(attrs.getValue("replication")).shortValue(),
Long.valueOf(attrs.getValue("blocksize")).longValue(),
- modif, FsPermission.valueOf(attrs.getValue("permission")),
+ modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
new Path(getUri().toString(), attrs.getValue("path"))
.makeQualified(HftpFileSystem.this))
: new FileStatus(0L, true, 0, 0L,
- modif, FsPermission.valueOf(attrs.getValue("permission")),
+ modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
new Path(getUri().toString(), attrs.getValue("path"))
.makeQualified(HftpFileSystem.this));
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Fri Sep 5 13:44:19 2008
@@ -39,9 +39,9 @@
* Compared to the previous version the following changes have been
introduced:
* (Only the latest change is reflected.
* The log of historical changes can be retrieved from the svn).
- * 36 : Added append(...).
+ * 37 : Added setTimes
*/
- public static final long versionID = 36L;
+ public static final long versionID = 37L;
///////////////////////////////////////
// File contents
@@ -456,4 +456,16 @@
* @param client The string representation of the client
*/
public void fsync(String src, String client) throws IOException;
+
+ /**
+ * Sets the modification and access time of the file to the specified time.
+ * @param src The string representation of the path
+ * @param mtime The number of milliseconds since Jan 1, 1970.
+ * Setting mtime to -1 means that modification time should not
be set
+ * by this call.
+ * @param atime The number of milliseconds since Jan 1, 1970.
+ * Setting atime to -1 means that access time should not be set
+ * by this call.
+ */
+ public void setTimes(String src, long mtime, long atime) throws IOException;
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DFSFileInfo.java
Fri Sep 5 13:44:19 2008
@@ -55,6 +55,7 @@
node.isDirectory() ? 0 : ((INodeFile)node).getReplication(),
node.isDirectory() ? 0 : ((INodeFile)node).getPreferredBlockSize(),
node.getModificationTime(),
+ node.getAccessTime(),
node.getFsPermission(),
node.getUserName(),
node.getGroupName(),
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java
Fri Sep 5 13:44:19 2008
@@ -189,7 +189,7 @@
// Version is reflected in the data storage file.
// Versions are negative.
// Decrement LAYOUT_VERSION to define a new version.
- public static final int LAYOUT_VERSION = -16;
+ public static final int LAYOUT_VERSION = -17;
// Current version:
- // Change edit log and fsimage to support quotas
+ // Support Access time on files
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Fri Sep 5 13:44:19 2008
@@ -177,13 +177,14 @@
Block[] blocks,
short replication,
long modificationTime,
+ long atime,
long preferredBlockSize) {
INode newNode;
if (blocks == null)
newNode = new INodeDirectory(permissions, modificationTime);
else
newNode = new INodeFile(permissions, blocks.length, replication,
- modificationTime, preferredBlockSize);
+ modificationTime, atime, preferredBlockSize);
synchronized (rootDir) {
try {
newNode = addNode(path, newNode, false);
@@ -208,6 +209,7 @@
Block[] blocks,
short replication,
long modificationTime,
+ long atime,
long quota,
long preferredBlockSize) {
// create new inode
@@ -221,7 +223,7 @@
}
} else
newNode = new INodeFile(permissions, blocks.length, replication,
- modificationTime, preferredBlockSize);
+ modificationTime, atime, preferredBlockSize);
// add new node to the parent
INodeDirectory newParent = null;
synchronized (rootDir) {
@@ -1066,4 +1068,42 @@
return rootDir.numItemsInTree();
}
}
+
+ /**
+ * Sets the access time on the file. Logs it in the transaction log
+ */
+ void setTimes(String src, INodeFile inode, long mtime, long atime, boolean
force)
+ throws IOException {
+ if (unprotectedSetTimes(src, inode, mtime, atime, force)) {
+ fsImage.getEditLog().logTimes(src, mtime, atime);
+ }
+ }
+
+ boolean unprotectedSetTimes(String src, long mtime, long atime, boolean
force)
+ throws IOException {
+ INodeFile inode = getFileINode(src);
+ return unprotectedSetTimes(src, inode, mtime, atime, force);
+ }
+
+ private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime,
+ long atime, boolean force) throws
IOException {
+ boolean status = false;
+ if (mtime != -1) {
+ inode.setModificationTimeForce(mtime);
+ status = true;
+ }
+ if (atime != -1) {
+ long inodeTime = inode.getAccessTime();
+
+ // if the last access time update was within the last precision
interval, then
+ // no need to store access time
+ if (atime <= inodeTime + namesystem.getAccessTimePrecision() && !force) {
+ status = false;
+ } else {
+ inode.setAccessTime(atime);
+ status = true;
+ }
+ }
+ return status;
+ }
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
Fri Sep 5 13:44:19 2008
@@ -64,6 +64,7 @@
private static final byte OP_SET_GENSTAMP = 10; // store genstamp
private static final byte OP_SET_QUOTA = 11; // set a directory's quota
private static final byte OP_CLEAR_QUOTA = 12; // clear a directory's quota
+ private static final byte OP_TIMES = 13; // sets mod & access time on a file
private static int sizeFlushBuffer = 512*1024;
private ArrayList<EditLogOutputStream> editStreams = null;
@@ -486,7 +487,7 @@
int numOpAdd = 0, numOpClose = 0, numOpDelete = 0,
numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0,
numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0,
- numOpOther = 0;
+ numOpTimes = 0, numOpOther = 0;
long startTime = FSNamesystem.now();
DataInputStream in = new DataInputStream(new BufferedInputStream(edits));
@@ -516,6 +517,7 @@
while (true) {
long timestamp = 0;
long mtime = 0;
+ long atime = 0;
long blockSize = 0;
byte opcode = -1;
try {
@@ -536,7 +538,8 @@
// get name and replication
int length = in.readInt();
if (-7 == logVersion && length != 3||
- logVersion < -7 && length != 4) {
+ -17 < logVersion && logVersion < -7 && length != 4 ||
+ logVersion <= -17 && length != 5) {
throw new IOException("Incorrect data format." +
" logVersion is " + logVersion +
" but writables.length is " +
@@ -545,6 +548,9 @@
path = FSImage.readString(in);
short replication = adjustReplication(readShort(in));
mtime = readLong(in);
+ if (logVersion <= -17) {
+ atime = readLong(in);
+ }
if (logVersion < -7) {
blockSize = readLong(in);
}
@@ -608,7 +614,7 @@
INodeFile node = (INodeFile)fsDir.unprotectedAddFile(
path, permissions,
blocks, replication,
- mtime, blockSize);
+ mtime, atime, blockSize);
if (opcode == OP_ADD) {
numOpAdd++;
//
@@ -681,13 +687,21 @@
numOpMkDir++;
PermissionStatus permissions = fsNamesys.getUpgradePermission();
int length = in.readInt();
- if (length != 2) {
+ if (-17 < logVersion && length != 2 ||
+ logVersion <= -17 && length != 3) {
throw new IOException("Incorrect data format. "
+ "Mkdir operation.");
}
path = FSImage.readString(in);
timestamp = readLong(in);
+ // The disk format stores atimes for directories as well.
+ // However, currently this is not being updated/used because of
+ // performance reasons.
+ if (logVersion <= -17) {
+ atime = readLong(in);
+ }
+
if (logVersion <= -11) {
permissions = PermissionStatus.read(in);
}
@@ -749,6 +763,19 @@
fsDir.unprotectedClearQuota(FSImage.readString(in));
break;
}
+ case OP_TIMES: {
+ numOpTimes++;
+ int length = in.readInt();
+ if (length != 3) {
+ throw new IOException("Incorrect data format. "
+ + "times operation.");
+ }
+ path = FSImage.readString(in);
+ mtime = readLong(in);
+ atime = readLong(in);
+ fsDir.unprotectedSetTimes(path, mtime, atime, true);
+ break;
+ }
default: {
throw new IOException("Never seen opcode " + opcode);
}
@@ -768,6 +795,7 @@
+ " numOpSetPerm = " + numOpSetPerm
+ " numOpSetOwner = " + numOpSetOwner
+ " numOpSetGenStamp = " + numOpSetGenStamp
+ + " numOpTimes = " + numOpTimes
+ " numOpOther = " + numOpOther);
}
@@ -942,6 +970,7 @@
new UTF8(path),
FSEditLog.toLogReplication(newNode.getReplication()),
FSEditLog.toLogLong(newNode.getModificationTime()),
+ FSEditLog.toLogLong(newNode.getModificationTime()),
FSEditLog.toLogLong(newNode.getPreferredBlockSize())};
logEdit(OP_ADD,
new ArrayWritable(UTF8.class, nameReplicationPair),
@@ -959,6 +988,7 @@
new UTF8(path),
FSEditLog.toLogReplication(newNode.getReplication()),
FSEditLog.toLogLong(newNode.getModificationTime()),
+ FSEditLog.toLogLong(newNode.getAccessTime()),
FSEditLog.toLogLong(newNode.getPreferredBlockSize())};
logEdit(OP_CLOSE,
new ArrayWritable(UTF8.class, nameReplicationPair),
@@ -972,7 +1002,8 @@
public void logMkDir(String path, INode newNode) {
UTF8 info[] = new UTF8[] {
new UTF8(path),
- FSEditLog.toLogLong(newNode.getModificationTime())
+ FSEditLog.toLogLong(newNode.getModificationTime()),
+ FSEditLog.toLogLong(newNode.getAccessTime())
};
logEdit(OP_MKDIR, new ArrayWritable(UTF8.class, info),
newNode.getPermissionStatus());
@@ -1044,6 +1075,17 @@
void logGenerationStamp(long genstamp) {
logEdit(OP_SET_GENSTAMP, new LongWritable(genstamp));
}
+
+ /**
+ * Add access time record to edit log
+ */
+ void logTimes(String src, long mtime, long atime) {
+ UTF8 info[] = new UTF8[] {
+ new UTF8(src),
+ FSEditLog.toLogLong(mtime),
+ FSEditLog.toLogLong(atime)};
+ logEdit(OP_TIMES, new ArrayWritable(UTF8.class, info));
+ }
static private UTF8 toLogReplication(short replication) {
return new UTF8(Short.toString(replication));
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
Fri Sep 5 13:44:19 2008
@@ -849,11 +849,15 @@
INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) {
long modificationTime = 0;
+ long atime = 0;
long blockSize = 0;
path = readString(in);
replication = in.readShort();
replication = FSEditLog.adjustReplication(replication);
modificationTime = in.readLong();
+ if (imgVersion <= -17) {
+ atime = in.readLong();
+ }
if (imgVersion <= -8) {
blockSize = in.readLong();
}
@@ -914,7 +918,7 @@
}
// add new inode
parentINode = fsDir.addToParent(path, parentINode, permissions,
- blocks, replication, modificationTime, quota, blockSize);
+ blocks, replication, modificationTime, atime, quota, blockSize);
}
// load datanode info
@@ -1089,6 +1093,7 @@
INodeFile fileINode = (INodeFile)node;
out.writeShort(fileINode.getReplication());
out.writeLong(fileINode.getModificationTime());
+ out.writeLong(fileINode.getAccessTime());
out.writeLong(fileINode.getPreferredBlockSize());
Block[] blocks = fileINode.getBlocks();
out.writeInt(blocks.length);
@@ -1101,6 +1106,7 @@
} else { // write directory inode
out.writeShort(0); // replication
out.writeLong(node.getModificationTime());
+ out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-1); // # of blocks
out.writeLong(node.getQuota());
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Fri Sep 5 13:44:19 2008
@@ -276,6 +276,9 @@
// Ask Datanode only up to this many blocks to delete.
private int blockInvalidateLimit = FSConstants.BLOCK_INVALIDATE_CHUNK;
+ // precision of access times.
+ private long accessTimePrecision = 0;
+
/**
* FSNamesystem constructor.
*/
@@ -292,7 +295,7 @@
/**
* Initialize FSNamesystem.
*/
- private void initialize(NameNode nn, Configuration conf) throws IOException {
+ private synchronized void initialize(NameNode nn, Configuration conf) throws
IOException {
this.systemStart = now();
this.startTime = new Date(systemStart);
setConfigurationParameters(conf);
@@ -411,7 +414,7 @@
/**
* Initializes some of the members from configuration
*/
- private void setConfigurationParameters(Configuration conf)
+ private synchronized void setConfigurationParameters(Configuration conf)
throws IOException {
fsNamesystemObject = this;
try {
@@ -466,6 +469,7 @@
this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit,
20*(int)(heartbeatInterval/1000));
+ this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
}
/**
@@ -570,6 +574,14 @@
long getDefaultBlockSize() {
return defaultBlockSize;
}
+
+ long getAccessTimePrecision() {
+ return accessTimePrecision;
+ }
+
+ private boolean isAccessTimeSupported() {
+ return accessTimePrecision > 0;
+ }
/* get replication factor of a block */
private int getReplication(Block block) {
@@ -728,7 +740,7 @@
checkPathAccess(src, FsAction.READ);
}
- LocatedBlocks blocks = getBlockLocations(src, offset, length);
+ LocatedBlocks blocks = getBlockLocations(src, offset, length, true);
if (blocks != null) {
//sort the blocks
DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost(
@@ -746,14 +758,23 @@
*/
public LocatedBlocks getBlockLocations(String src, long offset, long length
) throws IOException {
+ return getBlockLocations(src, offset, length, false);
+ }
+
+ /**
+ * Get block locations within the specified range.
+ * @see ClientProtocol#getBlockLocations(String, long, long)
+ */
+ public LocatedBlocks getBlockLocations(String src, long offset, long length,
+ boolean doAccessTime) throws IOException {
if (offset < 0) {
throw new IOException("Negative offset is not supported. File: " + src );
}
if (length < 0) {
throw new IOException("Negative length is not supported. File: " + src );
}
- final LocatedBlocks ret = getBlockLocationsInternal(dir.getFileINode(src),
- offset, length, Integer.MAX_VALUE);
+ final LocatedBlocks ret = getBlockLocationsInternal(src,
dir.getFileINode(src),
+ offset, length, Integer.MAX_VALUE, doAccessTime);
if (auditLog.isInfoEnabled()) {
logAuditEvent(UserGroupInformation.getCurrentUGI(),
Server.getRemoteIp(),
@@ -762,13 +783,19 @@
return ret;
}
- private synchronized LocatedBlocks getBlockLocationsInternal(INodeFile inode,
+ private synchronized LocatedBlocks getBlockLocationsInternal(String src,
+ INodeFile inode,
long offset,
long length,
- int nrBlocksToReturn) {
+ int nrBlocksToReturn,
+ boolean doAccessTime)
+ throws IOException {
if(inode == null) {
return null;
}
+ if (doAccessTime & isAccessTimeSupported()) {
+ dir.setTimes(src, inode, -1, now(), false);
+ }
Block[] blocks = inode.getBlocks();
if (blocks == null) {
return null;
@@ -829,6 +856,38 @@
}
/**
+ * stores the modification and access time for this inode.
+ * The access time is precise upto an hour. The transaction, if needed, is
+ * written to the edits log but is not flushed.
+ */
+ public synchronized void setTimes(String src, long mtime, long atime) throws
IOException {
+ if (!isAccessTimeSupported() && atime != -1) {
+ throw new IOException("Access time for hdfs is not configured. " +
+ " Please set dfs.support.accessTime configuration
parameter.");
+ }
+ //
+ // The caller needs to have read-access to set access times
+ // and write access to set modification times.
+ if (isPermissionEnabled) {
+ if (mtime == -1) {
+ checkPathAccess(src, FsAction.READ);
+ } else {
+ checkPathAccess(src, FsAction.WRITE);
+ }
+ }
+ INodeFile inode = dir.getFileINode(src);
+ if (inode != null) {
+ dir.setTimes(src, inode, mtime, atime, true);
+ if (auditLog.isInfoEnabled()) {
+ final FileStatus stat = dir.getFileInfo(src);
+ logAuditEvent(UserGroupInformation.getCurrentUGI(),
+ Server.getRemoteIp(),
+ "setTimes", src, null, stat);
+ }
+ }
+ }
+
+ /**
* Set replication for an existing file.
*
* The NameNode sets new replication and schedules either replication of
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INode.java
Fri Sep 5 13:44:19 2008
@@ -33,6 +33,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
/**
* We keep an in-memory representation of the file/block hierarchy.
@@ -43,6 +44,7 @@
protected byte[] name;
protected INodeDirectory parent;
protected long modificationTime;
+ protected int accessTime; // precise to the last hour
//Only updated by updatePermissionStatus(...).
//Other codes should not modify it.
@@ -76,17 +78,19 @@
name = null;
parent = null;
modificationTime = 0;
+ accessTime = 0;
}
- INode(PermissionStatus permissions, long mTime) {
+ INode(PermissionStatus permissions, long mTime, long atime) {
this.name = null;
this.parent = null;
this.modificationTime = mTime;
+ setAccessTime(atime);
setPermissionStatus(permissions);
}
protected INode(String name, PermissionStatus permissions) {
- this(permissions, 0L);
+ this(permissions, 0L, 0L);
setLocalName(name);
}
@@ -99,6 +103,7 @@
this.parent = other.getParent();
setPermissionStatus(other.getPermissionStatus());
setModificationTime(other.getModificationTime());
+ setAccessTime(other.getAccessTime());
}
/**
@@ -237,7 +242,7 @@
return this.parent;
}
- /**
+ /**
* Get last modification time of inode.
* @return access time
*/
@@ -256,6 +261,34 @@
}
/**
+ * Always set the last modification time of inode.
+ */
+ void setModificationTimeForce(long modtime) {
+ assert !isDirectory();
+ this.modificationTime = modtime;
+ }
+
+ /**
+ * Get access time of inode.
+ * @return access time
+ */
+ public long getAccessTime() {
+ return this.accessTime *
FSNamesystem.getFSNamesystem().getAccessTimePrecision();
+ }
+
+ /**
+ * Set last access time of inode.
+ */
+ void setAccessTime(long atime) {
+ long precision = FSNamesystem.getFSNamesystem().getAccessTimePrecision();
+ if (precision == 0) {
+ this.accessTime = 0;
+ } else {
+ this.accessTime = (int)(atime/precision);
+ }
+ }
+
+ /**
* Is this inode being constructed?
*/
boolean isUnderConstruction() {
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
Fri Sep 5 13:44:19 2008
@@ -42,7 +42,7 @@
}
public INodeDirectory(PermissionStatus permissions, long mTime) {
- super(permissions, mTime);
+ super(permissions, mTime, 0);
this.children = null;
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
Fri Sep 5 13:44:19 2008
@@ -35,9 +35,9 @@
INodeFile(PermissionStatus permissions,
int nrBlocks, short replication, long modificationTime,
- long preferredBlockSize) {
+ long atime, long preferredBlockSize) {
this(permissions, new BlockInfo[nrBlocks], replication,
- modificationTime, preferredBlockSize);
+ modificationTime, atime, preferredBlockSize);
}
protected INodeFile() {
@@ -48,8 +48,8 @@
protected INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
short replication, long modificationTime,
- long preferredBlockSize) {
- super(permissions, modificationTime);
+ long atime, long preferredBlockSize) {
+ super(permissions, modificationTime, atime);
this.blockReplication = replication;
this.preferredBlockSize = preferredBlockSize;
blocks = blklist;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
Fri Sep 5 13:44:19 2008
@@ -42,7 +42,7 @@
String clientMachine,
DatanodeDescriptor clientNode)
throws IOException {
- super(permissions.applyUMask(UMASK), 0, replication, modTime,
+ super(permissions.applyUMask(UMASK), 0, replication, modTime, modTime,
preferredBlockSize);
this.clientName = new StringBytesWritable(clientName);
this.clientMachine = new StringBytesWritable(clientMachine);
@@ -59,7 +59,7 @@
String clientMachine,
DatanodeDescriptor clientNode)
throws IOException {
- super(perm, blocks, blockReplication, modificationTime,
+ super(perm, blocks, blockReplication, modificationTime, modificationTime,
preferredBlockSize);
setLocalName(name);
this.clientName = new StringBytesWritable(clientName);
@@ -98,12 +98,14 @@
//
// converts a INodeFileUnderConstruction into a INodeFile
+ // use the modification time as the access time
//
INodeFile convertToInodeFile() {
INodeFile obj = new INodeFile(getPermissionStatus(),
getBlocks(),
getReplication(),
getModificationTime(),
+ getModificationTime(),
getPreferredBlockSize());
return obj;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
Fri Sep 5 13:44:19 2008
@@ -62,6 +62,7 @@
doc.startTag(i.isDir() ? "directory" : "file");
doc.attribute("path", i.getPath().toUri().getPath());
doc.attribute("modified", df.format(new Date(i.getModificationTime())));
+ doc.attribute("accesstime", df.format(new Date(i.getAccessTime())));
if (!i.isDir()) {
doc.attribute("size", String.valueOf(i.getLen()));
doc.attribute("replication", String.valueOf(i.getReplication()));
@@ -114,7 +115,8 @@
* <listing path="..." recursive="(yes|no)" filter="..."
* time="yyyy-MM-dd hh:mm:ss UTC" version="...">
* <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
- * <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" blocksize="..."
+ * <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ"
accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
+ * blocksize="..."
* replication="..." size="..."/>
* </listing>
* }
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=692542&r1=692541&r2=692542&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Fri Sep 5 13:44:19 2008
@@ -593,6 +593,11 @@
namesystem.fsync(src, clientName);
}
+ /** @inheritDoc */
+ public void setTimes(String src, long mtime, long atime) throws IOException {
+ namesystem.setTimes(src, mtime, atime);
+ }
+
////////////////////////////////////////////////////////////////
// DatanodeProtocol
////////////////////////////////////////////////////////////////
Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestSetTimes.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestSetTimes.java?rev=692542&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestSetTimes.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestSetTimes.java Fri Sep
5 13:44:19 2008
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import java.net.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * This class tests the access time on files.
+ *
+ */
+public class TestSetTimes extends TestCase {
+ static final long seed = 0xDEADBEEFL;
+ static final int blockSize = 8192;
+ static final int fileSize = 16384;
+ static final int numDatanodes = 1;
+
+ static final SimpleDateFormat dateForm = new SimpleDateFormat("yyyy-MM-dd
HH:mm");
+
+ Random myrand = new Random();
+ Path hostsFile;
+ Path excludeFile;
+
+ private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl)
+ throws IOException {
+ FSDataOutputStream stm = fileSys.create(name, true,
+
fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)repl, (long)blockSize);
+ byte[] buffer = new byte[fileSize];
+ Random rand = new Random(seed);
+ rand.nextBytes(buffer);
+ stm.write(buffer);
+ return stm;
+ }
+
+ private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+ assertTrue(fileSys.exists(name));
+ fileSys.delete(name, true);
+ assertTrue(!fileSys.exists(name));
+ }
+
+ private void printDatanodeReport(DatanodeInfo[] info) {
+ System.out.println("-------------------------------------------------");
+ for (int i = 0; i < info.length; i++) {
+ System.out.println(info[i].getDatanodeReport());
+ System.out.println();
+ }
+ }
+
+ /**
+ * Tests mod & access time in DFS.
+ */
+ public void testTimes() throws IOException {
+ Configuration conf = new Configuration();
+ final int MAX_IDLE_TIME = 2000; // 2s
+ conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+ conf.setInt("heartbeat.recheck.interval", 1000);
+ conf.setInt("dfs.heartbeat.interval", 1);
+
+
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true,
null);
+ cluster.waitActive();
+ final int nnport = cluster.getNameNodePort();
+ InetSocketAddress addr = new InetSocketAddress("localhost",
+ cluster.getNameNodePort());
+ DFSClient client = new DFSClient(addr, conf);
+ DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+ assertEquals("Number of Datanodes ", numDatanodes, info.length);
+ FileSystem fileSys = cluster.getFileSystem();
+ int replicas = 1;
+ assertTrue(fileSys instanceof DistributedFileSystem);
+
+ try {
+ //
+ // create file and record atime/mtime
+ //
+ System.out.println("Creating testdir1 and testdir1/test1.dat.");
+ Path dir1 = new Path("testdir1");
+ Path file1 = new Path(dir1, "test1.dat");
+ FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
+ FileStatus stat = fileSys.getFileStatus(file1);
+ long atimeBeforeClose = stat.getAccessTime();
+ String adate = dateForm.format(new Date(atimeBeforeClose));
+ System.out.println("atime on " + file1 + " before close is " +
+ adate + " (" + atimeBeforeClose + ")");
+ assertTrue(atimeBeforeClose != 0);
+ stm.close();
+
+ stat = fileSys.getFileStatus(file1);
+ long atime1 = stat.getAccessTime();
+ long mtime1 = stat.getModificationTime();
+ adate = dateForm.format(new Date(atime1));
+ String mdate = dateForm.format(new Date(mtime1));
+ System.out.println("atime on " + file1 + " is " + adate +
+ " (" + atime1 + ")");
+ System.out.println("mtime on " + file1 + " is " + mdate +
+ " (" + mtime1 + ")");
+ assertTrue(atime1 != 0);
+
+ //
+ // record dir times
+ //
+ stat = fileSys.getFileStatus(dir1);
+ long mdir1 = stat.getAccessTime();
+ assertTrue(mdir1 == 0);
+
+ // set the access time to be one day in the past
+ long atime2 = atime1 - (24L * 3600L * 1000L);
+ fileSys.setTimes(file1, -1, atime2);
+
+ // check new access time on file
+ stat = fileSys.getFileStatus(file1);
+ long atime3 = stat.getAccessTime();
+ String adate3 = dateForm.format(new Date(atime3));
+ System.out.println("new atime on " + file1 + " is " +
+ adate3 + " (" + atime3 + ")");
+ assertTrue(atime2 == atime3);
+ assertTrue(mtime1 == stat.getModificationTime());
+
+ // set the modification time to be 1 hour in the past
+ long mtime2 = mtime1 - (3600L * 1000L);
+ fileSys.setTimes(file1, mtime2, -1);
+
+ // check new modification time on file
+ stat = fileSys.getFileStatus(file1);
+ long mtime3 = stat.getModificationTime();
+ String mdate3 = dateForm.format(new Date(mtime3));
+ System.out.println("new mtime on " + file1 + " is " +
+ mdate3 + " (" + mtime3 + ")");
+ assertTrue(atime2 == stat.getAccessTime());
+ assertTrue(mtime2 == mtime3);
+
+ // shutdown cluster and restart
+ cluster.shutdown();
+ try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+ cluster = new MiniDFSCluster(nnport, conf, 1, false, true,
+ null, null, null);
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
+
+ // verify that access times and modification times persist after a
+ // cluster restart.
+ System.out.println("Verifying times after cluster restart");
+ stat = fileSys.getFileStatus(file1);
+ assertTrue(atime2 == stat.getAccessTime());
+ assertTrue(mtime3 == stat.getModificationTime());
+
+ cleanupFile(fileSys, file1);
+ cleanupFile(fileSys, dir1);
+ } catch (IOException e) {
+ info = client.datanodeReport(DatanodeReportType.ALL);
+ printDatanodeReport(info);
+ throw e;
+ } finally {
+ fileSys.close();
+ cluster.shutdown();
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ new TestSetTimes().testTimes();
+ }
+}