Author: szetszwo
Date: Thu Jan 12 06:56:57 2012
New Revision: 1230417
URL: http://svn.apache.org/viewvc?rev=1230417&view=rev
Log:
svn merge -c 1212606 from trunk for HDFS-2642.
Added:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
- copied unchanged from r1212606,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java
- copied unchanged from r1212606,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
- copied unchanged from r1212606,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/
(props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
Propchange:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jan 12 06:56:57 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1206178,1206786,1206830,1207585,1207694,1208140,1208153,1208313,1210208,1210657,
1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1213537,1213586,1213592-1213593,1213954,1214027,1214046,1220510,1221106,1221348,1226211,1227091,1227423,1229897
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1206178,1206786,1206830,1207585,1207694,1208140,1208153,1208313,1210208,1210657,
1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213537,1213586,1213592-1213593,1213954,1214027,1214046,1220510,1221106,1221348,1226211,1227091,1227423,1229897
/hadoop/core/branches/branch-0.19/hdfs:713112
/hadoop/hdfs/branches/HDFS-1052:987665-1095512
/hadoop/hdfs/branches/HDFS-265:796829-820463
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Thu Jan 12 06:56:57 2012
@@ -23,6 +23,8 @@ Release 0.23-PB - Unreleased
HDFS-2636. Implement protobuf service for ClientDatanodeProtocol. (suresh)
+ HDFS-2642. Protobuf translators for DatanodeProtocol. (jitendra)
+
IMPROVEMENTS
HDFS-2018. Move all journal stream management code into one place.
Propchange:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Jan 12 06:56:57 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1206786,1206830,1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,12
11249,1211769,1212021,1212062,1212073,1212084,1212299,1213537,1213586,1213592-1213593,1213954,1214027,1214046,1220510,1221106,1221348,1226211,1227091,1227423,1229897
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1206786,1206830,1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,12
11249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213537,1213586,1213592-1213593,1213954,1214027,1214046,1220510,1221106,1221348,1226211,1227091,1227423,1229897
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
/hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
Thu Jan 12 06:56:57 2012
@@ -15,6 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
Thu Jan 12 06:56:57 2012
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.protocolP
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -39,8 +40,6 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
-import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
-import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -240,14 +239,6 @@ public class NamenodeProtocolServerSideT
throw new ServiceException(e);
}
return VersionResponseProto.newBuilder()
- .setInfo(convert(info)).build();
- }
-
- private NamespaceInfoProto convert(NamespaceInfo info) {
- return NamespaceInfoProto.newBuilder()
- .setBlockPoolID(info.getBlockPoolID())
- .setBuildVersion(info.getBuildVersion())
- .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
- .setStorageInfo(PBHelper.convert(info)).build();
+ .setInfo(PBHelper.convert(info)).build();
}
}
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
Thu Jan 12 06:56:57 2012
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
@@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
-import
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
Thu Jan 12 06:56:57 2012
@@ -27,6 +27,16 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
@@ -37,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -57,15 +68,26 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
+import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
@@ -337,10 +359,10 @@ class PBHelper {
}
public static DatanodeInfoProto convert(DatanodeInfo info) {
- return DatanodeInfoProto.newBuilder()
- .setAdminState(PBHelper.convert(info.getAdminState()))
- .setBlockPoolUsed(info.getBlockPoolUsed())
- .setCapacity(info.getCapacity())
+ DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
+ builder.setBlockPoolUsed(info.getBlockPoolUsed());
+ builder.setAdminState(PBHelper.convert(info.getAdminState()));
+ builder.setCapacity(info.getCapacity())
.setDfsUsed(info.getDfsUsed())
.setHostName(info.getHostName())
.setId(PBHelper.convert((DatanodeID)info))
@@ -349,6 +371,7 @@ class PBHelper {
.setRemaining(info.getRemaining())
.setXceiverCount(info.getXceiverCount())
.build();
+ return builder.build();
}
public static AdminStates convert(AdminState adminState) {
@@ -378,13 +401,25 @@ class PBHelper {
public static LocatedBlockProto convert(LocatedBlock b) {
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
- for(DatanodeInfo loc : locs) {
- builder.addLocs(PBHelper.convert(loc));
+ for (int i = 0; i < locs.length; i++) {
+ builder.addLocs(i, PBHelper.convert(locs[i]));
}
return builder.setB(PBHelper.convert(b.getBlock()))
.setBlockToken(PBHelper.convert(b.getBlockToken()))
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
+
+ public static LocatedBlock convert(LocatedBlockProto proto) {
+ List<DatanodeInfoProto> locs = proto.getLocsList();
+ DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
+ for (int i = 0; i < locs.size(); i++) {
+ targets[i] = PBHelper.convert(locs.get(i));
+ }
+ LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets,
+ proto.getOffset(), proto.getCorrupt());
+ lb.setBlockToken(PBHelper.convert(proto.getBlockToken()));
+ return lb;
+ }
public static BlockTokenIdentifierProto convert(
Token<BlockTokenIdentifier> token) {
@@ -417,4 +452,245 @@ class PBHelper {
return ReplicaState.FINALIZED;
}
}
+
+ public static DatanodeRegistrationProto convert(
+ DatanodeRegistration registration) {
+ DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
+ .newBuilder();
+ return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
+ .setStorageInfo(PBHelper.convert(registration.storageInfo))
+ .setKeys(PBHelper.convert(registration.exportedKeys)).build();
+ }
+
+ public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
+ return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
+ PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
+ .getKeys()));
+ }
+
+ public static DatanodeCommand convert(DatanodeCommandProto proto) {
+ switch (proto.getCmdType()) {
+ case BalancerBandwidthCommand:
+ return PBHelper.convert(proto.getBalancerCmd());
+ case BlockCommand:
+ return PBHelper.convert(proto.getBlkCmd());
+ case BlockRecoveryCommand:
+ return PBHelper.convert(proto.getRecoveryCmd());
+ case FinalizeCommand:
+ return PBHelper.convert(proto.getFinalizeCmd());
+ case KeyUpdateCommand:
+ return PBHelper.convert(proto.getKeyUpdateCmd());
+ case RegisterCommand:
+ return PBHelper.convert(proto.getRegisterCmd());
+ case UpgradeCommand:
+ return PBHelper.convert(proto.getUpgradeCmd());
+ }
+ return null;
+ }
+
+ public static BalancerBandwidthCommandProto convert(
+ BalancerBandwidthCommand bbCmd) {
+ return BalancerBandwidthCommandProto.newBuilder()
+ .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
+ }
+
+ public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) {
+ return KeyUpdateCommandProto.newBuilder()
+ .setKeys(PBHelper.convert(cmd.getExportedKeys())).build();
+ }
+
+ public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) {
+ BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto
+ .newBuilder();
+ for (RecoveringBlock b : cmd.getRecoveringBlocks()) {
+ builder.addBlocks(PBHelper.convert(b));
+ }
+ return builder.build();
+ }
+
+ public static FinalizeCommandProto convert(FinalizeCommand cmd) {
+ return FinalizeCommandProto.newBuilder()
+ .setBlockPoolId(cmd.getBlockPoolId()).build();
+ }
+
+ public static RegisterCommandProto convert(RegisterCommand cmd) {
+ return RegisterCommandProto.newBuilder().build();
+ }
+
+ public static BlockCommandProto convert(BlockCommand cmd) {
+ BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
+ .setBlockPoolId(cmd.getBlockPoolId());
+ switch (cmd.getAction()) {
+ case DatanodeProtocol.DNA_TRANSFER:
+ builder.setAction(BlockCommandProto.Action.TRANSFER);
+ break;
+ case DatanodeProtocol.DNA_INVALIDATE:
+ builder.setAction(BlockCommandProto.Action.INVALIDATE);
+ break;
+ }
+ Block[] blocks = cmd.getBlocks();
+ for (int i = 0; i < blocks.length; i++) {
+ builder.addBlocks(PBHelper.convert(blocks[i]));
+ }
+ DatanodeInfo[][] infos = cmd.getTargets();
+ for (int i = 0; i < infos.length; i++) {
+ builder.addTargets(PBHelper.convert(infos[i]));
+ }
+ return builder.build();
+ }
+
+ public static DatanodeInfosProto convert(DatanodeInfo[] datanodeInfos) {
+ DatanodeInfosProto.Builder builder = DatanodeInfosProto.newBuilder();
+ for (int i = 0; i < datanodeInfos.length; i++) {
+ builder.addDatanodes(PBHelper.convert(datanodeInfos[i]));
+ }
+ return builder.build();
+ }
+
+ public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
+ DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
+ switch (datanodeCommand.getAction()) {
+ case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
+ builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
+ .setBalancerCmd(
+ PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
+ builder
+ .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
+ .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand)
datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_RECOVERBLOCK:
+ builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
+ .setRecoveryCmd(
+ PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_FINALIZE:
+ builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
+ .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_REGISTER:
+ builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
+ .setRegisterCmd(PBHelper.convert((RegisterCommand) datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_TRANSFER:
+ case DatanodeProtocol.DNA_INVALIDATE:
+ builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
+ PBHelper.convert((BlockCommand) datanodeCommand));
+ break;
+ case DatanodeProtocol.DNA_SHUTDOWN: //Not expected
+ case DatanodeProtocol.DNA_UNKNOWN: //Not expected
+ }
+ return builder.build();
+ }
+
+ public static UpgradeCommand convert(UpgradeCommandProto upgradeCmd) {
+ int action = UpgradeCommand.UC_ACTION_UNKNOWN;
+ switch (upgradeCmd.getAction()) {
+ case REPORT_STATUS:
+ action = UpgradeCommand.UC_ACTION_REPORT_STATUS;
+ break;
+ case START_UPGRADE:
+ action = UpgradeCommand.UC_ACTION_START_UPGRADE;
+ }
+ return new UpgradeCommand(action, upgradeCmd.getVersion(),
+ (short) upgradeCmd.getUpgradeStatus());
+ }
+
+ public static RegisterCommand convert(RegisterCommandProto registerCmd) {
+ return new RegisterCommand();
+ }
+
+ public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) {
+ return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys()));
+ }
+
+ public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) {
+ return new FinalizeCommand(finalizeCmd.getBlockPoolId());
+ }
+
+ public static BlockRecoveryCommand convert(
+ BlockRecoveryCommandProto recoveryCmd) {
+ List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
+ List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(
+ list.size());
+ for (int i = 0; i < list.size(); i++) {
+ recoveringBlocks.add(PBHelper.convert(list.get(0)));
+ }
+ return new BlockRecoveryCommand(recoveringBlocks);
+ }
+
+ public static BlockCommand convert(BlockCommandProto blkCmd) {
+ List<BlockProto> blockProtoList = blkCmd.getBlocksList();
+ List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
+ DatanodeInfo[][] targets = new DatanodeInfo[blockProtoList.size()][];
+ Block[] blocks = new Block[blockProtoList.size()];
+ for (int i = 0; i < blockProtoList.size(); i++) {
+ targets[i] = PBHelper.convert(targetList.get(i));
+ blocks[i] = PBHelper.convert(blockProtoList.get(i));
+ }
+ int action = DatanodeProtocol.DNA_UNKNOWN;
+ switch (blkCmd.getAction()) {
+ case TRANSFER:
+ action = DatanodeProtocol.DNA_TRANSFER;
+ break;
+ case INVALIDATE:
+ action = DatanodeProtocol.DNA_INVALIDATE;
+ break;
+ }
+ return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
+ }
+
+ public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
+ List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
+ DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
+ for (int i = 0; i < infos.length; i++) {
+ infos[i] = PBHelper.convert(proto.get(i));
+ }
+ return infos;
+ }
+
+ public static BalancerBandwidthCommand convert(
+ BalancerBandwidthCommandProto balancerCmd) {
+ return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
+ }
+
+ public static ReceivedDeletedBlockInfoProto convert(
+ ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
+ return ReceivedDeletedBlockInfoProto.newBuilder()
+ .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
+ .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
+ }
+
+ public static UpgradeCommandProto convert(UpgradeCommand comm) {
+ UpgradeCommandProto.Builder builder = UpgradeCommandProto.newBuilder()
+ .setVersion(comm.getVersion())
+ .setUpgradeStatus(comm.getCurrentStatus());
+ switch (comm.getAction()) {
+ case UpgradeCommand.UC_ACTION_REPORT_STATUS:
+ builder.setAction(UpgradeCommandProto.Action.REPORT_STATUS);
+ break;
+ case UpgradeCommand.UC_ACTION_START_UPGRADE:
+ builder.setAction(UpgradeCommandProto.Action.START_UPGRADE);
+ break;
+ default:
+ builder.setAction(UpgradeCommandProto.Action.UNKNOWN);
+ break;
+ }
+ return builder.build();
+ }
+
+ public static ReceivedDeletedBlockInfo convert(
+ ReceivedDeletedBlockInfoProto proto) {
+ return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
+ proto.getDeleteHint());
+ }
+
+ public static NamespaceInfoProto convert(NamespaceInfo info) {
+ return NamespaceInfoProto.newBuilder()
+ .setBlockPoolID(info.getBlockPoolID())
+ .setBuildVersion(info.getBuildVersion())
+ .setDistUpgradeVersion(info.getDistributedUpgradeVersion())
+ .setStorageInfo(PBHelper.convert((StorageInfo)info)).build();
+ }
}
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
Thu Jan 12 06:56:57 2012
@@ -66,6 +66,13 @@ implements Writable, NodeRegistration {
this(nodeName, new StorageInfo(), new ExportedBlockKeys());
}
+ public DatanodeRegistration(DatanodeID dn, StorageInfo info,
+ ExportedBlockKeys keys) {
+ super(dn);
+ this.storageInfo = info;
+ this.exportedKeys = keys;
+ }
+
public DatanodeRegistration(String nodeName, StorageInfo info,
ExportedBlockKeys keys) {
super(nodeName);
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java
Thu Jan 12 06:56:57 2012
@@ -40,7 +40,7 @@ import org.apache.hadoop.io.WritableFact
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class UpgradeCommand extends DatanodeCommand {
- final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
+ public final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
public final static int UC_ACTION_REPORT_STATUS = 100; // report upgrade
status
public final static int UC_ACTION_START_UPGRADE = 101; // start upgrade
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
Thu Jan 12 06:56:57 2012
@@ -31,7 +31,7 @@ import "hdfs.proto";
*/
message DatanodeRegistrationProto {
required DatanodeIDProto datanodeID = 1; // Datanode information
- required StorageInfoProto storateInfo = 2; // Node information
+ required StorageInfoProto storageInfo = 2; // Node information
required ExportedBlockKeysProto keys = 3; // Block keys
}
@@ -55,7 +55,7 @@ message DatanodeCommandProto {
// cmdType is set
optional BalancerBandwidthCommandProto balancerCmd = 2;
optional BlockCommandProto blkCmd = 3;
- optional BlockRecoveryCommndProto recoveryCmd = 4;
+ optional BlockRecoveryCommandProto recoveryCmd = 4;
optional FinalizeCommandProto finalizeCmd = 5;
optional KeyUpdateCommandProto keyUpdateCmd = 6;
optional RegisterCommandProto registerCmd = 7;
@@ -77,22 +77,20 @@ message BalancerBandwidthCommandProto {
* on the given set of blocks.
*/
message BlockCommandProto {
- enum Action {
- UNKNOWN = 0; // Unknown action
+ enum Action {
TRANSFER = 1; // Transfer blocks to another datanode
INVALIDATE = 2; // Invalidate blocks
- SHUTDOWN = 3; // Shutdown node
}
- required uint32 action = 1;
+ required Action action = 1;
required string blockPoolId = 2;
repeated BlockProto blocks = 3;
- repeated DatanodeIDsProto targets = 4;
+ repeated DatanodeInfosProto targets = 4;
}
/**
* List of blocks to be recovered by the datanode
*/
-message BlockRecoveryCommndProto {
+message BlockRecoveryCommandProto {
repeated RecoveringBlockProto blocks = 1;
}
@@ -126,7 +124,7 @@ message UpgradeCommandProto {
REPORT_STATUS = 100; // Report upgrade status
START_UPGRADE = 101; // Start upgrade
}
- required uint32 action = 1; // Upgrade action
+ required Action action = 1; // Upgrade action
required uint32 version = 2; // Version of the upgrade
required uint32 upgradeStatus = 3; // % completed in range 0 & 100
}
@@ -324,6 +322,11 @@ service DatanodeProtocolService {
* Used for debugging.
*/
rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto);
+
+ /**
+ * Request the version
+ */
+ rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
/**
* Generic way to send commands from datanode to namenode during
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
Thu Jan 12 06:56:57 2012
@@ -85,19 +85,6 @@ message RollEditLogResponseProto {
}
/**
- * void request
- */
-message VersionRequestProto {
-}
-
-/**
- * void request
- */
-message VersionResponseProto {
- required NamespaceInfoProto info = 1;
-}
-
-/**
* registration - Namenode reporting the error
* errorCode - error code indicating the error
* msg - Free text description of the error
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
Thu Jan 12 06:56:57 2012
@@ -54,10 +54,10 @@ message DatanodeIDProto {
}
/**
- * DatanodeID array
+ * DatanodeInfo array
*/
-message DatanodeIDsProto {
- repeated DatanodeIDProto datanodes = 1;
+message DatanodeInfosProto {
+ repeated DatanodeInfoProto datanodes = 1;
}
/**
@@ -345,3 +345,16 @@ message RecoveringBlockProto {
required LocatedBlockProto block = 2; // Block to be recovered
}
+/**
+ * void request
+ */
+message VersionRequestProto {
+}
+
+/**
+ * Version response from namenode.
+ */
+message VersionResponseProto {
+ required NamespaceInfoProto info = 1;
+}
+
Modified:
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1230417&r1=1230416&r2=1230417&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
(original)
+++
hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
Thu Jan 12 06:56:57 2012
@@ -17,29 +17,34 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
-import static junit.framework.Assert.*;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
@@ -47,14 +52,17 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
-import
org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.Text;
@@ -120,6 +128,10 @@ public class TestPBHelper {
DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
DatanodeIDProto dnProto = PBHelper.convert(dn);
DatanodeID dn2 = PBHelper.convert(dnProto);
+ compare(dn, dn2);
+ }
+
+ void compare(DatanodeID dn, DatanodeID dn2) {
assertEquals(dn.getHost(), dn2.getHost());
assertEquals(dn.getInfoPort(), dn2.getInfoPort());
assertEquals(dn.getIpcPort(), dn2.getIpcPort());
@@ -177,7 +189,6 @@ public class TestPBHelper {
assertEquals(k1.getExpiryDate(), k2.getExpiryDate());
assertEquals(k1.getKeyId(), k2.getKeyId());
assertTrue(Arrays.equals(k1.getEncodedKey(), k2.getEncodedKey()));
-
}
@Test
@@ -195,7 +206,10 @@ public class TestPBHelper {
getBlockKey(1), keys);
ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
-
+ compare(expKeys, expKeys1);
+ }
+
+ void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
BlockKey[] allKeys = expKeys.getAllKeys();
BlockKey[] allKeys1 = expKeys1.getAllKeys();
assertEquals(allKeys.length, allKeys1.length);
@@ -314,15 +328,108 @@ public class TestPBHelper {
}
@Test
- public void testBlockTokenIdentifier() {
+ public void testConvertBlockToken() {
Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service"));
BlockTokenIdentifierProto tokenProto = PBHelper.convert(token);
Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
- assertTrue(Arrays.equals(token.getIdentifier(), token2.getIdentifier()));
- assertTrue(Arrays.equals(token.getPassword(), token2.getPassword()));
- assertEquals(token.getKind(), token2.getKind());
- assertEquals(token.getService(), token2.getService());
+ compare(token, token2);
+ }
+
+ @Test
+ public void testConvertNamespaceInfo() {
+ NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300, 53);
+ NamespaceInfoProto proto = PBHelper.convert(info);
+ NamespaceInfo info2 = PBHelper.convert(proto);
+ compare(info, info2); //Compare the StorageInfo
+ assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
+ assertEquals(info.getBuildVersion(), info2.getBuildVersion());
+ assertEquals(info.getDistributedUpgradeVersion(),
+ info2.getDistributedUpgradeVersion());
+ }
+
+ private void compare(StorageInfo expected, StorageInfo actual) {
+ assertEquals(expected.clusterID, actual.clusterID);
+ assertEquals(expected.namespaceID, actual.namespaceID);
+ assertEquals(expected.cTime, actual.cTime);
+ assertEquals(expected.layoutVersion, actual.layoutVersion);
+ }
+
+ private void compare(Token<BlockTokenIdentifier> expected,
+ Token<BlockTokenIdentifier> actual) {
+ assertTrue(Arrays.equals(expected.getIdentifier(),
actual.getIdentifier()));
+ assertTrue(Arrays.equals(expected.getPassword(), actual.getPassword()));
+ assertEquals(expected.getKind(), actual.getKind());
+ assertEquals(expected.getService(), actual.getService());
+ }
+
+ @Test
+ public void testConvertLocatedBlock() {
+ DatanodeInfo [] dnInfos = new DatanodeInfo[3];
+ dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999,
+ 59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS);
+ dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999,
+ 59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED);
+ dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999,
+ 59, 69, 32, "local", "host1", AdminStates.NORMAL);
+ LocatedBlock lb = new LocatedBlock(
+ new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
+ LocatedBlockProto lbProto = PBHelper.convert(lb);
+ LocatedBlock lb2 = PBHelper.convert(lbProto);
+ assertEquals(lb.getBlock(), lb2.getBlock());
+ compare(lb.getBlockToken(), lb2.getBlockToken());
+ assertEquals(lb.getStartOffset(), lb2.getStartOffset());
+ assertEquals(lb.isCorrupt(), lb2.isCorrupt());
+ DatanodeInfo [] dnInfos2 = lb2.getLocations();
+ assertEquals(dnInfos.length, dnInfos2.length);
+ for (int i = 0; i < dnInfos.length ; i++) {
+ compare(dnInfos[i], dnInfos2[i]);
+ }
+ }
+
+ @Test
+ public void testConvertDatanodeRegistration() {
+ DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0);
+ BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
+ ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
+ getBlockKey(1), keys);
+ DatanodeRegistration reg = new DatanodeRegistration(dnId,
+ new StorageInfo(), expKeys);
+ DatanodeRegistrationProto proto = PBHelper.convert(reg);
+ DatanodeRegistration reg2 = PBHelper.convert(proto);
+ compare(reg.storageInfo, reg2.storageInfo);
+ compare(reg.exportedKeys, reg2.exportedKeys);
+ compare((DatanodeID)reg, (DatanodeID)reg2);
+ }
+
+ @Test
+ public void testConvertBlockCommand() {
+ Block[] blocks = new Block[] { new Block(21), new Block(22) };
+ DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
+ new DatanodeInfo[2] };
+ dnInfos[0][0] = new DatanodeInfo();
+ dnInfos[1][0] = new DatanodeInfo();
+ dnInfos[1][1] = new DatanodeInfo();
+ BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
+ blocks, dnInfos);
+ BlockCommandProto bcProto = PBHelper.convert(bc);
+ BlockCommand bc2 = PBHelper.convert(bcProto);
+ assertEquals(bc.getAction(), bc2.getAction());
+ assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
+ Block[] blocks2 = bc2.getBlocks();
+ for (int i = 0; i < blocks.length; i++) {
+ assertEquals(blocks[i], blocks2[i]);
+ }
+ DatanodeInfo[][] dnInfos2 = bc2.getTargets();
+ assertEquals(dnInfos.length, dnInfos2.length);
+ for (int i = 0; i < dnInfos.length; i++) {
+ DatanodeInfo[] d1 = dnInfos[i];
+ DatanodeInfo[] d2 = dnInfos2[i];
+ assertEquals(d1.length, d2.length);
+ for (int j = 0; j < d1.length; j++) {
+ compare(d1[j], d2[j]);
+ }
+ }
}
}