Author: szetszwo
Date: Tue Sep 16 13:17:48 2008
New Revision: 696039
URL: http://svn.apache.org/viewvc?rev=696039&view=rev
Log:
HADOOP-4174. Move fs image/edit log methods from ClientProtocol to
NamenodeProtocol. (shv via szetszwo)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Tue Sep 16 13:17:48 2008
@@ -320,6 +320,9 @@
HADOOP-4151. Add a byte-comparable interface that both Text and
BytesWritable implement. (cdouglas via omalley)
+ HADOOP-4174. Move fs image/edit log methods from ClientProtocol to
+ NamenodeProtocol. (shv via szetszwo)
+
OPTIMIZATIONS
HADOOP-3556. Removed lock contention in MD5Hash by changing the
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/BlocksWithLocations.java
Tue Sep 16 13:17:48 2008
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
-/** A class to implement an array of BlockLocations
- * It provide efficient customized serialization/deserialization methods
- * in stead of using the default array (de)serialization provided by RPC
- */
-public class BlocksWithLocations implements Writable {
-
- /**
- * A class to keep track of a block and its locations
- */
- public static class BlockWithLocations implements Writable {
- Block block;
- String datanodeIDs[];
-
- /** default constructor */
- public BlockWithLocations() {
- block = new Block();
- datanodeIDs = null;
- }
-
- /** constructor */
- public BlockWithLocations(Block b, String[] datanodes) {
- block = b;
- datanodeIDs = datanodes;
- }
-
- /** get the block */
- public Block getBlock() {
- return block;
- }
-
- /** get the block's locations */
- public String[] getDatanodes() {
- return datanodeIDs;
- }
-
- /** deserialization method */
- public void readFields(DataInput in) throws IOException {
- block.readFields(in);
- int len = WritableUtils.readVInt(in); // variable length integer
- datanodeIDs = new String[len];
- for(int i=0; i<len; i++) {
- datanodeIDs[i] = Text.readString(in);
- }
- }
-
- /** serialization method */
- public void write(DataOutput out) throws IOException {
- block.write(out);
- WritableUtils.writeVInt(out, datanodeIDs.length); // variable length int
- for(String id:datanodeIDs) {
- Text.writeString(out, id);
- }
- }
- }
-
- private BlockWithLocations[] blocks;
-
- /** default constructor */
- BlocksWithLocations() {
- }
-
- /** Constructor with one parameter */
- public BlocksWithLocations( BlockWithLocations[] blocks ) {
- this.blocks = blocks;
- }
-
- /** getter */
- public BlockWithLocations[] getBlocks() {
- return blocks;
- }
-
- /** serialization method */
- public void write( DataOutput out ) throws IOException {
- WritableUtils.writeVInt(out, blocks.length);
- for(int i=0; i<blocks.length; i++) {
- blocks[i].write(out);
- }
- }
-
- /** deserialization method */
- public void readFields(DataInput in) throws IOException {
- int len = WritableUtils.readVInt(in);
- blocks = new BlockWithLocations[len];
- for(int i=0; i<len; i++) {
- blocks[i] = new BlockWithLocations();
- blocks[i].readFields(in);
- }
- }
-}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Tue Sep 16 13:17:48 2008
@@ -22,7 +22,6 @@
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.fs.ContentSummary;
@@ -39,9 +38,10 @@
* Compared to the previous version the following changes have been
introduced:
* (Only the latest change is reflected.
* The log of historical changes can be retrieved from the svn).
- * 37 : Added setTimes
+ * 38 : getEditLogSize(), rollEditLog(), rollFSImage()
+ * moved to NamenodeProtocol.
*/
- public static final long versionID = 37L;
+ public static final long versionID = 38L;
///////////////////////////////////////
// File contents
@@ -365,30 +365,6 @@
*/
public void refreshNodes() throws IOException;
-
- /**
- * Get the size of the current edit log (in bytes).
- * @return The number of bytes in the current edit log.
- * @throws IOException
- */
- public long getEditLogSize() throws IOException;
-
- /**
- * Closes the current edit log and opens a new one. The
- * call fails if the file system is in SafeMode.
- * @throws IOException
- * @return a unique token to identify this transaction.
- */
- public CheckpointSignature rollEditLog() throws IOException;
-
- /**
- * Rolls the fsImage log. It removes the old fsImage, copies the
- * new image to fsImage, removes the old edits and renames edits.new
- * to edits. The call fails if any of the four files are missing.
- * @throws IOException
- */
- public void rollFsImage() throws IOException;
-
/**
* Finalize previous upgrade.
* Remove file system state saved during the upgrade.
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
Tue Sep 16 13:17:48 2008
@@ -54,10 +54,10 @@
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Tue Sep 16 13:17:48 2008
@@ -26,7 +26,6 @@
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
-import org.apache.hadoop.hdfs.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -40,6 +39,8 @@
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Tue Sep 16 13:17:48 2008
@@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Tue Sep 16 13:17:48 2008
@@ -20,7 +20,7 @@
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.ipc.*;
@@ -51,7 +51,7 @@
* primary NameNode.
*
**********************************************************/
-public class SecondaryNameNode implements FSConstants, Runnable {
+public class SecondaryNameNode implements Runnable {
public static final Log LOG =
LogFactory.getLog(SecondaryNameNode.class.getName());
@@ -59,7 +59,7 @@
private String fsName;
private CheckpointStorage checkpointImage;
- private ClientProtocol namenode;
+ private NamenodeProtocol namenode;
private Configuration conf;
private InetSocketAddress nameNodeAddr;
private boolean shouldRun;
@@ -131,8 +131,8 @@
this.conf = conf;
this.namenode =
- (ClientProtocol) RPC.waitForProxy(ClientProtocol.class,
- ClientProtocol.versionID, nameNodeAddr, conf);
+ (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
+ NamenodeProtocol.versionID, nameNodeAddr, conf);
// initialize checkpoint directories
fsName = getInfoServer();
@@ -513,7 +513,7 @@
"cannot access checkpoint directory.");
StorageState curState;
try {
- curState = sd.analyzeStorage(StartupOption.REGULAR);
+ curState = sd.analyzeStorage(FSConstants.StartupOption.REGULAR);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
Tue Sep 16 13:17:48 2008
@@ -20,8 +20,8 @@
import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.ipc.VersionedProtocol;
/*****************************************************************************
@@ -30,9 +30,9 @@
*****************************************************************************/
public interface NamenodeProtocol extends VersionedProtocol {
/**
- * 1: changed the serialization in DatanodeInfo
+ * 2: Added getEditLogSize(), rollEditLog(), rollFSImage().
*/
- public static final long versionID = 1L;
+ public static final long versionID = 2L;
/** Get a list of blocks belonged to <code>datanode</code>
* whose total size is equal to <code>size</code>
@@ -45,4 +45,26 @@
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException;
+ /**
+ * Get the size of the current edit log (in bytes).
+ * @return The number of bytes in the current edit log.
+ * @throws IOException
+ */
+ public long getEditLogSize() throws IOException;
+
+ /**
+ * Closes the current edit log and opens a new one. The
+ * call fails if the file system is in SafeMode.
+ * @throws IOException
+ * @return a unique token to identify this transaction.
+ */
+ public CheckpointSignature rollEditLog() throws IOException;
+
+ /**
+ * Rolls the fsImage log. It removes the old fsImage, copies the
+ * new image to fsImage, removes the old edits and renames edits.new
+ * to edits. The call fails if any of the four files are missing.
+ * @throws IOException
+ */
+ public void rollFsImage() throws IOException;
}
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=696039&r1=696038&r2=696039&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestGetBlocks.java Tue
Sep 16 13:17:48 2008
@@ -25,9 +25,9 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;