Repository: hadoop Updated Branches: refs/heads/trunk 252e69f56 -> 12f4df043
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java deleted file mode 100644 index a6de289..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java +++ /dev/null @@ -1,545 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.inotify; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.permission.AclEntry; -import org.apache.hadoop.fs.permission.FsPermission; - -import java.util.List; - -/** - * Events sent by the inotify system. Note that no events are necessarily sent - * when a file is opened for read (although a MetadataUpdateEvent will be sent - * if the atime is updated). - */ [email protected] [email protected] -public abstract class Event { - public static enum EventType { - CREATE, CLOSE, APPEND, RENAME, METADATA, UNLINK - } - - private EventType eventType; - - public EventType getEventType() { - return eventType; - } - - public Event(EventType eventType) { - this.eventType = eventType; - } - - /** - * Sent when a file is closed after append or create. - */ - public static class CloseEvent extends Event { - private String path; - private long fileSize; - private long timestamp; - - public CloseEvent(String path, long fileSize, long timestamp) { - super(EventType.CLOSE); - this.path = path; - this.fileSize = fileSize; - this.timestamp = timestamp; - } - - public String getPath() { - return path; - } - - /** - * The size of the closed file in bytes. May be -1 if the size is not - * available (e.g. in the case of a close generated by a concat operation). - */ - public long getFileSize() { - return fileSize; - } - - /** - * The time when this event occurred, in milliseconds since the epoch. - */ - public long getTimestamp() { - return timestamp; - } - } - - /** - * Sent when a new file is created (including overwrite). - */ - public static class CreateEvent extends Event { - - public static enum INodeType { - FILE, DIRECTORY, SYMLINK; - } - - private INodeType iNodeType; - private String path; - private long ctime; - private int replication; - private String ownerName; - private String groupName; - private FsPermission perms; - private String symlinkTarget; - private boolean overwrite; - private long defaultBlockSize; - - public static class Builder { - private INodeType iNodeType; - private String path; - private long ctime; - private int replication; - private String ownerName; - private String groupName; - private FsPermission perms; - private String symlinkTarget; - private boolean overwrite; - private long defaultBlockSize = 0; - - public Builder iNodeType(INodeType type) { - this.iNodeType = type; - return this; - } - - public Builder path(String path) { - this.path = path; - return this; - } - - public Builder ctime(long ctime) { - this.ctime = ctime; - return this; - } - - public Builder replication(int replication) { - this.replication = replication; - return this; - } - - public Builder ownerName(String ownerName) { - this.ownerName = ownerName; - return this; - } - - public Builder groupName(String groupName) { - this.groupName = groupName; - return this; - } - - public Builder perms(FsPermission perms) { - this.perms = perms; - return this; - } - - public Builder symlinkTarget(String symlinkTarget) { - this.symlinkTarget = symlinkTarget; - return this; - } - - public Builder overwrite(boolean overwrite) { - this.overwrite = overwrite; - return this; - } - - public Builder defaultBlockSize(long defaultBlockSize) { - this.defaultBlockSize = defaultBlockSize; - return this; - } - - public CreateEvent build() { - return new CreateEvent(this); - } - } - - private CreateEvent(Builder b) { - super(EventType.CREATE); - this.iNodeType = b.iNodeType; - this.path = b.path; - this.ctime = b.ctime; - this.replication = b.replication; - this.ownerName = b.ownerName; - this.groupName = b.groupName; - this.perms = b.perms; - this.symlinkTarget = b.symlinkTarget; - this.overwrite = b.overwrite; - this.defaultBlockSize = b.defaultBlockSize; - } - - public INodeType getiNodeType() { - return iNodeType; - } - - public String getPath() { - return path; - } - - /** - * Creation time of the file, directory, or symlink. - */ - public long getCtime() { - return ctime; - } - - /** - * Replication is zero if the CreateEvent iNodeType is directory or symlink. - */ - public int getReplication() { - return replication; - } - - public String getOwnerName() { - return ownerName; - } - - public String getGroupName() { - return groupName; - } - - public FsPermission getPerms() { - return perms; - } - - /** - * Symlink target is null if the CreateEvent iNodeType is not symlink. - */ - public String getSymlinkTarget() { - return symlinkTarget; - } - - public boolean getOverwrite() { - return overwrite; - } - - public long getDefaultBlockSize() { - return defaultBlockSize; - } - } - - /** - * Sent when there is an update to directory or file (none of the metadata - * tracked here applies to symlinks) that is not associated with another - * inotify event. The tracked metadata includes atime/mtime, replication, - * owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the - * metadataType of the MetadataUpdateEvent will be null or will have their default - * values. - */ - public static class MetadataUpdateEvent extends Event { - - public static enum MetadataType { - TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS; - } - - private String path; - private MetadataType metadataType; - private long mtime; - private long atime; - private int replication; - private String ownerName; - private String groupName; - private FsPermission perms; - private List<AclEntry> acls; - private List<XAttr> xAttrs; - private boolean xAttrsRemoved; - - public static class Builder { - private String path; - private MetadataType metadataType; - private long mtime; - private long atime; - private int replication; - private String ownerName; - private String groupName; - private FsPermission perms; - private List<AclEntry> acls; - private List<XAttr> xAttrs; - private boolean xAttrsRemoved; - - public Builder path(String path) { - this.path = path; - return this; - } - - public Builder metadataType(MetadataType type) { - this.metadataType = type; - return this; - } - - public Builder mtime(long mtime) { - this.mtime = mtime; - return this; - } - - public Builder atime(long atime) { - this.atime = atime; - return this; - } - - public Builder replication(int replication) { - this.replication = replication; - return this; - } - - public Builder ownerName(String ownerName) { - this.ownerName = ownerName; - return this; - } - - public Builder groupName(String groupName) { - this.groupName = groupName; - return this; - } - - public Builder perms(FsPermission perms) { - this.perms = perms; - return this; - } - - public Builder acls(List<AclEntry> acls) { - this.acls = acls; - return this; - } - - public Builder xAttrs(List<XAttr> xAttrs) { - this.xAttrs = xAttrs; - return this; - } - - public Builder xAttrsRemoved(boolean xAttrsRemoved) { - this.xAttrsRemoved = xAttrsRemoved; - return this; - } - - public MetadataUpdateEvent build() { - return new MetadataUpdateEvent(this); - } - } - - private MetadataUpdateEvent(Builder b) { - super(EventType.METADATA); - this.path = b.path; - this.metadataType = b.metadataType; - this.mtime = b.mtime; - this.atime = b.atime; - this.replication = b.replication; - this.ownerName = b.ownerName; - this.groupName = b.groupName; - this.perms = b.perms; - this.acls = b.acls; - this.xAttrs = b.xAttrs; - this.xAttrsRemoved = b.xAttrsRemoved; - } - - public String getPath() { - return path; - } - - public MetadataType getMetadataType() { - return metadataType; - } - - public long getMtime() { - return mtime; - } - - public long getAtime() { - return atime; - } - - public int getReplication() { - return replication; - } - - public String getOwnerName() { - return ownerName; - } - - public String getGroupName() { - return groupName; - } - - public FsPermission getPerms() { - return perms; - } - - /** - * The full set of ACLs currently associated with this file or directory. - * May be null if all ACLs were removed. - */ - public List<AclEntry> getAcls() { - return acls; - } - - public List<XAttr> getxAttrs() { - return xAttrs; - } - - /** - * Whether the xAttrs returned by getxAttrs() were removed (as opposed to - * added). - */ - public boolean isxAttrsRemoved() { - return xAttrsRemoved; - } - - } - - /** - * Sent when a file, directory, or symlink is renamed. - */ - public static class RenameEvent extends Event { - private String srcPath; - private String dstPath; - private long timestamp; - - public static class Builder { - private String srcPath; - private String dstPath; - private long timestamp; - - public Builder srcPath(String srcPath) { - this.srcPath = srcPath; - return this; - } - - public Builder dstPath(String dstPath) { - this.dstPath = dstPath; - return this; - } - - public Builder timestamp(long timestamp) { - this.timestamp = timestamp; - return this; - } - - public RenameEvent build() { - return new RenameEvent(this); - } - } - - private RenameEvent(Builder builder) { - super(EventType.RENAME); - this.srcPath = builder.srcPath; - this.dstPath = builder.dstPath; - this.timestamp = builder.timestamp; - } - - public String getSrcPath() { - return srcPath; - } - - public String getDstPath() { - return dstPath; - } - - /** - * The time when this event occurred, in milliseconds since the epoch. - */ - public long getTimestamp() { - return timestamp; - } - } - - /** - * Sent when an existing file is opened for append. - */ - public static class AppendEvent extends Event { - private String path; - private boolean newBlock; - - public static class Builder { - private String path; - private boolean newBlock; - - public Builder path(String path) { - this.path = path; - return this; - } - - public Builder newBlock(boolean newBlock) { - this.newBlock = newBlock; - return this; - } - - public AppendEvent build() { - return new AppendEvent(this); - } - } - - private AppendEvent(Builder b) { - super(EventType.APPEND); - this.path = b.path; - this.newBlock = b.newBlock; - } - - public String getPath() { - return path; - } - - public boolean toNewBlock() { - return newBlock; - } - } - - /** - * Sent when a file, directory, or symlink is deleted. - */ - public static class UnlinkEvent extends Event { - private String path; - private long timestamp; - - public static class Builder { - private String path; - private long timestamp; - - public Builder path(String path) { - this.path = path; - return this; - } - - public Builder timestamp(long timestamp) { - this.timestamp = timestamp; - return this; - } - - public UnlinkEvent build() { - return new UnlinkEvent(this); - } - } - - private UnlinkEvent(Builder builder) { - super(EventType.UNLINK); - this.path = builder.path; - this.timestamp = builder.timestamp; - } - - public String getPath() { - return path; - } - - /** - * The time when this event occurred, in milliseconds since the epoch. - */ - public long getTimestamp() { - return timestamp; - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java deleted file mode 100644 index 0ad1070..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.inotify; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * A batch of events that all happened on the same transaction ID. - */ [email protected] -public class EventBatch { - private final long txid; - private final Event[] events; - - public EventBatch(long txid, Event[] events) { - this.txid = txid; - this.events = events; - } - - public long getTxid() { - return txid; - } - - public Event[] getEvents() { return events; } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java deleted file mode 100644 index 9c97038..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.inotify; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.util.List; - -/** - * Contains a list of event batches, the transaction ID in the edit log up to - * which we read to produce these events, and the first txid we observed when - * producing these events (the last of which is for the purpose of determining - * whether we have missed events due to edit deletion). Also contains the most - * recent txid that the NameNode has sync'ed, so the client can determine how - * far behind in the edit log it is. - */ [email protected] -public class EventBatchList { - private List<EventBatch> batches; - private long firstTxid; - private long lastTxid; - private long syncTxid; - - public EventBatchList(List<EventBatch> batches, long firstTxid, - long lastTxid, long syncTxid) { - this.batches = batches; - this.firstTxid = firstTxid; - this.lastTxid = lastTxid; - this.syncTxid = syncTxid; - } - - public List<EventBatch> getBatches() { - return batches; - } - - public long getFirstTxid() { - return firstTxid; - } - - public long getLastTxid() { - return lastTxid; - } - - public long getSyncTxid() { - return syncTxid; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java deleted file mode 100644 index cfa4f10..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.protocol; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * The exception that happens when you ask to create a file that already - * is being created, but is not closed yet. - */ [email protected] [email protected] -public class AlreadyBeingCreatedException extends IOException { - static final long serialVersionUID = 0x12308AD009L; - public AlreadyBeingCreatedException(String msg) { - super(msg); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java deleted file mode 100644 index 4bfb33b..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ /dev/null @@ -1,271 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.protocol; - -import java.util.Arrays; -import java.util.EnumSet; -import java.util.LinkedList; -import java.util.List; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.StorageType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A block storage policy describes how to select the storage types - * for the replicas of a block. - */ [email protected] -public class BlockStoragePolicy { - public static final Logger LOG = LoggerFactory.getLogger(BlockStoragePolicy - .class); - - /** A 4-bit policy ID */ - private final byte id; - /** Policy name */ - private final String name; - - /** The storage types to store the replicas of a new block. */ - private final StorageType[] storageTypes; - /** The fallback storage type for block creation. */ - private final StorageType[] creationFallbacks; - /** The fallback storage type for replication. */ - private final StorageType[] replicationFallbacks; - /** - * Whether the policy is inherited during file creation. - * If set then the policy cannot be changed after file creation. - */ - private boolean copyOnCreateFile; - - @VisibleForTesting - public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes, - StorageType[] creationFallbacks, StorageType[] replicationFallbacks) { - this(id, name, storageTypes, creationFallbacks, replicationFallbacks, - false); - } - - @VisibleForTesting - public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes, - StorageType[] creationFallbacks, StorageType[] replicationFallbacks, - boolean copyOnCreateFile) { - this.id = id; - this.name = name; - this.storageTypes = storageTypes; - this.creationFallbacks = creationFallbacks; - this.replicationFallbacks = replicationFallbacks; - this.copyOnCreateFile = copyOnCreateFile; - } - - /** - * @return a list of {@link StorageType}s for storing the replicas of a block. - */ - public List<StorageType> chooseStorageTypes(final short replication) { - final List<StorageType> types = new LinkedList<StorageType>(); - int i = 0, j = 0; - - // Do not return transient storage types. We will not have accurate - // usage information for transient types. - for (;i < replication && j < storageTypes.length; ++j) { - if (!storageTypes[j].isTransient()) { - types.add(storageTypes[j]); - ++i; - } - } - - final StorageType last = storageTypes[storageTypes.length - 1]; - if (!last.isTransient()) { - for (; i < replication; i++) { - types.add(last); - } - } - return types; - } - - /** - * Choose the storage types for storing the remaining replicas, given the - * replication number and the storage types of the chosen replicas. - * - * @param replication the replication number. - * @param chosen the storage types of the chosen replicas. - * @return a list of {@link StorageType}s for storing the replicas of a block. - */ - public List<StorageType> chooseStorageTypes(final short replication, - final Iterable<StorageType> chosen) { - return chooseStorageTypes(replication, chosen, null); - } - - private List<StorageType> chooseStorageTypes(final short replication, - final Iterable<StorageType> chosen, final List<StorageType> excess) { - final List<StorageType> types = chooseStorageTypes(replication); - diff(types, chosen, excess); - return types; - } - - /** - * Choose the storage types for storing the remaining replicas, given the - * replication number, the storage types of the chosen replicas and - * the unavailable storage types. It uses fallback storage in case that - * the desired storage type is unavailable. - * - * @param replication the replication number. - * @param chosen the storage types of the chosen replicas. - * @param unavailables the unavailable storage types. - * @param isNewBlock Is it for new block creation? - * @return a list of {@link StorageType}s for storing the replicas of a block. - */ - public List<StorageType> chooseStorageTypes(final short replication, - final Iterable<StorageType> chosen, - final EnumSet<StorageType> unavailables, - final boolean isNewBlock) { - final List<StorageType> excess = new LinkedList<StorageType>(); - final List<StorageType> storageTypes = chooseStorageTypes( - replication, chosen, excess); - final int expectedSize = storageTypes.size() - excess.size(); - final List<StorageType> removed = new LinkedList<StorageType>(); - for(int i = storageTypes.size() - 1; i >= 0; i--) { - // replace/remove unavailable storage types. - final StorageType t = storageTypes.get(i); - if (unavailables.contains(t)) { - final StorageType fallback = isNewBlock? - getCreationFallback(unavailables) - : getReplicationFallback(unavailables); - if (fallback == null) { - removed.add(storageTypes.remove(i)); - } else { - storageTypes.set(i, fallback); - } - } - } - // remove excess storage types after fallback replacement. - diff(storageTypes, excess, null); - if (storageTypes.size() < expectedSize) { - LOG.warn("Failed to place enough replicas: expected size is " + expectedSize - + " but only " + storageTypes.size() + " storage types can be selected " - + "(replication=" + replication - + ", selected=" + storageTypes - + ", unavailable=" + unavailables - + ", removed=" + removed - + ", policy=" + this + ")"); - } - return storageTypes; - } - - /** - * Compute the difference between two lists t and c so that after the diff - * computation we have: t = t - c; - * Further, if e is not null, set e = e + c - t; - */ - private static void diff(List<StorageType> t, Iterable<StorageType> c, - List<StorageType> e) { - for(StorageType storagetype : c) { - final int i = t.indexOf(storagetype); - if (i >= 0) { - t.remove(i); - } else if (e != null) { - e.add(storagetype); - } - } - } - - /** - * Choose excess storage types for deletion, given the - * replication number and the storage types of the chosen replicas. - * - * @param replication the replication number. - * @param chosen the storage types of the chosen replicas. - * @return a list of {@link StorageType}s for deletion. - */ - public List<StorageType> chooseExcess(final short replication, - final Iterable<StorageType> chosen) { - final List<StorageType> types = chooseStorageTypes(replication); - final List<StorageType> excess = new LinkedList<StorageType>(); - diff(types, chosen, excess); - return excess; - } - - /** @return the fallback {@link StorageType} for creation. */ - public StorageType getCreationFallback(EnumSet<StorageType> unavailables) { - return getFallback(unavailables, creationFallbacks); - } - - /** @return the fallback {@link StorageType} for replication. */ - public StorageType getReplicationFallback(EnumSet<StorageType> unavailables) { - return getFallback(unavailables, replicationFallbacks); - } - - @Override - public int hashCode() { - return Byte.valueOf(id).hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } else if (obj == null || !(obj instanceof BlockStoragePolicy)) { - return false; - } - final BlockStoragePolicy that = (BlockStoragePolicy)obj; - return this.id == that.id; - } - - @Override - public String toString() { - return getClass().getSimpleName() + "{" + name + ":" + id - + ", storageTypes=" + Arrays.asList(storageTypes) - + ", creationFallbacks=" + Arrays.asList(creationFallbacks) - + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}"; - } - - public byte getId() { - return id; - } - - public String getName() { - return name; - } - - public StorageType[] getStorageTypes() { - return this.storageTypes; - } - - public StorageType[] getCreationFallbacks() { - return this.creationFallbacks; - } - - public StorageType[] getReplicationFallbacks() { - return this.replicationFallbacks; - } - - private static StorageType getFallback(EnumSet<StorageType> unavailables, - StorageType[] fallbacks) { - for(StorageType fb : fallbacks) { - if (!unavailables.contains(fb)) { - return fb; - } - } - return null; - } - - public boolean isCopyOnCreateFile() { - return copyOnCreateFile; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java deleted file mode 100644 index 6065786..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import java.util.Arrays; - -/** - * Contains a list of paths corresponding to corrupt files and a cookie - * used for iterative calls to NameNode.listCorruptFileBlocks. - * - */ -public class CorruptFileBlocks { - // used for hashCode - private static final int PRIME = 16777619; - - private final String[] files; - private final String cookie; - - public CorruptFileBlocks() { - this(new String[0], ""); - } - - public CorruptFileBlocks(String[] files, String cookie) { - this.files = files; - this.cookie = cookie; - } - - public String[] getFiles() { - return files; - } - - public String getCookie() { - return cookie; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof CorruptFileBlocks)) { - return false; - } - CorruptFileBlocks other = (CorruptFileBlocks) obj; - return cookie.equals(other.cookie) && - Arrays.equals(files, other.files); - } - - - @Override - public int hashCode() { - int result = cookie.hashCode(); - - for (String file : files) { - result = PRIME * result + file.hashCode(); - } - - return result; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java deleted file mode 100644 index 481c130..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String; - [email protected] [email protected] -public class DSQuotaExceededException extends QuotaExceededException { - protected static final long serialVersionUID = 1L; - - public DSQuotaExceededException() {} - - public DSQuotaExceededException(String msg) { - super(msg); - } - - public DSQuotaExceededException(long quota, long count) { - super(quota, count); - } - - @Override - public String getMessage() { - String msg = super.getMessage(); - if (msg == null) { - return "The DiskSpace quota" + (pathName==null?"": " of " + pathName) - + " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2) - + " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2); - } else { - return msg; - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java deleted file mode 100644 index b7b2289..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - -/** - * Locally available datanode information - */ [email protected] [email protected] -public class DatanodeLocalInfo { - private final String softwareVersion; - private final String configVersion; - private final long uptime; // datanode uptime in seconds. - - public DatanodeLocalInfo(String softwareVersion, - String configVersion, long uptime) { - this.softwareVersion = softwareVersion; - this.configVersion = configVersion; - this.uptime = uptime; - } - - /** get software version */ - public String getSoftwareVersion() { - return this.softwareVersion; - } - - /** get config version */ - public String getConfigVersion() { - return this.configVersion; - } - - /** get uptime */ - public long getUptime() { - return this.uptime; - } - - /** A formatted string for printing the status of the DataNode. */ - public String getDatanodeLocalReport() { - StringBuilder buffer = new StringBuilder(); - buffer.append("Uptime: " + getUptime()); - buffer.append(", Software version: " + getSoftwareVersion()); - buffer.append(", Config version: " + getConfigVersion()); - return buffer.toString(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java deleted file mode 100644 index 169287b..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java +++ /dev/null @@ -1,85 +0,0 @@ -/* Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This class defines a partial listing of a directory to support - * iterative directory listing. - */ [email protected] [email protected] -public class DirectoryListing { - private HdfsFileStatus[] partialListing; - private int remainingEntries; - - /** - * constructor - * @param partialListing a partial listing of a directory - * @param remainingEntries number of entries that are left to be listed - */ - public DirectoryListing(HdfsFileStatus[] partialListing, - int remainingEntries) { - if (partialListing == null) { - throw new IllegalArgumentException("partial listing should not be null"); - } - if (partialListing.length == 0 && remainingEntries != 0) { - throw new IllegalArgumentException("Partial listing is empty but " + - "the number of remaining entries is not zero"); - } - this.partialListing = partialListing; - this.remainingEntries = remainingEntries; - } - - /** - * Get the partial listing of file status - * @return the partial listing of file status - */ - public HdfsFileStatus[] getPartialListing() { - return partialListing; - } - - /** - * Get the number of remaining entries that are left to be listed - * @return the number of remaining entries that are left to be listed - */ - public int getRemainingEntries() { - return remainingEntries; - } - - /** - * Check if there are more entries that are left to be listed - * @return true if there are more entries that are left to be listed; - * return false otherwise. - */ - public boolean hasMore() { - return remainingEntries != 0; - } - - /** - * Get the last name in this list - * @return the last name in the list if it is not empty; otherwise return null - */ - public byte[] getLastName() { - if (partialListing.length == 0) { - return null; - } - return partialListing[partialListing.length-1].getLocalNameInBytes(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java deleted file mode 100644 index f1441b5..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoProtocolVersion; - -/** - * A simple class for representing an encryption zone. Presently an encryption - * zone only has a path (the root of the encryption zone), a key name, and a - * unique id. The id is used to implement batched listing of encryption zones. - */ [email protected] [email protected] -public class EncryptionZone { - - private final long id; - private final String path; - private final CipherSuite suite; - private final CryptoProtocolVersion version; - private final String keyName; - - public EncryptionZone(long id, String path, CipherSuite suite, - CryptoProtocolVersion version, String keyName) { - this.id = id; - this.path = path; - this.suite = suite; - this.version = version; - this.keyName = keyName; - } - - public long getId() { - return id; - } - - public String getPath() { - return path; - } - - public CipherSuite getSuite() { - return suite; - } - - public CryptoProtocolVersion getVersion() { return version; } - - public String getKeyName() { - return keyName; - } - - @Override - public int hashCode() { - return new HashCodeBuilder(13, 31) - .append(id) - .append(path) - .append(suite) - .append(version) - .append(keyName). - toHashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj == this) { - return true; - } - if (obj.getClass() != getClass()) { - return false; - } - - EncryptionZone rhs = (EncryptionZone) obj; - return new EqualsBuilder(). - append(id, rhs.id). - append(path, rhs.path). - append(suite, rhs.suite). - append(version, rhs.version). - append(keyName, rhs.keyName). - isEquals(); - } - - @Override - public String toString() { - return "EncryptionZone [id=" + id + - ", path=" + path + - ", suite=" + suite + - ", version=" + version + - ", keyName=" + keyName + "]"; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java deleted file mode 100644 index 1cd80f9..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Class to contain Lastblock and HdfsFileStatus for the Append operation - */ [email protected] [email protected] -public class LastBlockWithStatus { - - private final LocatedBlock lastBlock; - - private final HdfsFileStatus fileStatus; - - public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus fileStatus) { - this.lastBlock = lastBlock; - this.fileStatus = fileStatus; - } - - public LocatedBlock getLastBlock() { - return lastBlock; - } - - public HdfsFileStatus getFileStatus() { - return fileStatus; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java deleted file mode 100644 index 7c28cd9..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - [email protected] [email protected] -public final class NSQuotaExceededException extends QuotaExceededException { - protected static final long serialVersionUID = 1L; - - private String prefix; - - public NSQuotaExceededException() {} - - public NSQuotaExceededException(String msg) { - super(msg); - } - - public NSQuotaExceededException(long quota, long count) { - super(quota, count); - } - - @Override - public String getMessage() { - String msg = super.getMessage(); - if (msg == null) { - msg = "The NameSpace quota (directories and files)" + - (pathName==null?"":(" of directory " + pathName)) + - " is exceeded: quota=" + quota + " file count=" + count; - - if (prefix != null) { - msg = prefix + ": " + msg; - } - } - return msg; - } - - /** Set a prefix for the error message. */ - public void setMessagePrefix(final String prefix) { - this.prefix = prefix; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java deleted file mode 100644 index 6dfed79..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.protocol; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This exception is thrown when modification to HDFS results in violation - * of a directory quota. A directory quota might be namespace quota (limit - * on number of files and directories) or a diskspace quota (limit on space - * taken by all the file under the directory tree). <br> <br> - * - * The message for the exception specifies the directory where the quota - * was violated and actual quotas. Specific message is generated in the - * corresponding Exception class: - * DSQuotaExceededException or - * NSQuotaExceededException - */ [email protected] [email protected] -public class QuotaExceededException extends IOException { - protected static final long serialVersionUID = 1L; - protected String pathName=null; - protected long quota; // quota - protected long count; // actual value - - protected QuotaExceededException() {} - - protected QuotaExceededException(String msg) { - super(msg); - } - - protected QuotaExceededException(long quota, long count) { - this.quota = quota; - this.count = count; - } - - public void setPathName(String path) { - this.pathName = path; - } - - @Override - public String getMessage() { - return super.getMessage(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java deleted file mode 100644 index 80e3e34..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import java.util.Date; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Rolling upgrade information - */ [email protected] [email protected] -public class RollingUpgradeInfo extends RollingUpgradeStatus { - private final long startTime; - private long finalizeTime; - private boolean createdRollbackImages; - - public RollingUpgradeInfo(String blockPoolId, boolean createdRollbackImages, - long startTime, long finalizeTime) { - super(blockPoolId, finalizeTime != 0); - this.createdRollbackImages = createdRollbackImages; - this.startTime = startTime; - this.finalizeTime = finalizeTime; - } - - public boolean createdRollbackImages() { - return createdRollbackImages; - } - - public void setCreatedRollbackImages(boolean created) { - this.createdRollbackImages = created; - } - - public boolean isStarted() { - return startTime != 0; - } - - /** @return The rolling upgrade starting time. */ - public long getStartTime() { - return startTime; - } - - @Override - public boolean isFinalized() { - return finalizeTime != 0; - } - - /** - * Finalize the upgrade if not already finalized - * @param finalizeTime - */ - public void finalize(long finalizeTime) { - if (finalizeTime != 0) { - this.finalizeTime = finalizeTime; - createdRollbackImages = false; - } - } - - public long getFinalizeTime() { - return finalizeTime; - } - - @Override - public int hashCode() { - //only use lower 32 bits - return super.hashCode() ^ (int)startTime ^ (int)finalizeTime; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } else if (obj == null || !(obj instanceof RollingUpgradeInfo)) { - return false; - } - final RollingUpgradeInfo that = (RollingUpgradeInfo)obj; - return super.equals(that) - && this.startTime == that.startTime - && this.finalizeTime == that.finalizeTime; - } - - @Override - public String toString() { - return super.toString() - + "\n Start Time: " + (startTime == 0? "<NOT STARTED>": timestamp2String(startTime)) - + "\n Finalize Time: " + (finalizeTime == 0? "<NOT FINALIZED>": timestamp2String(finalizeTime)); - } - - private static String timestamp2String(long timestamp) { - return new Date(timestamp) + " (=" + timestamp + ")"; - } - - public static class Bean { - private final String blockPoolId; - private final long startTime; - private final long finalizeTime; - private final boolean createdRollbackImages; - - public Bean(RollingUpgradeInfo f) { - this.blockPoolId = f.getBlockPoolId(); - this.startTime = f.startTime; - this.finalizeTime = f.finalizeTime; - this.createdRollbackImages = f.createdRollbackImages(); - } - - public String getBlockPoolId() { - return blockPoolId; - } - - public long getStartTime() { - return startTime; - } - - public long getFinalizeTime() { - return finalizeTime; - } - - public boolean isCreatedRollbackImages() { - return createdRollbackImages; - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java deleted file mode 100644 index 1f969fb..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Rolling upgrade status - */ [email protected] [email protected] -public class RollingUpgradeStatus { - private final String blockPoolId; - private final boolean finalized; - - public RollingUpgradeStatus(String blockPoolId, boolean finalized) { - this.blockPoolId = blockPoolId; - this.finalized = finalized; - } - - public String getBlockPoolId() { - return blockPoolId; - } - - public boolean isFinalized() { - return finalized; - } - - @Override - public int hashCode() { - return blockPoolId.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } else if (obj == null || !(obj instanceof RollingUpgradeStatus)) { - return false; - } - final RollingUpgradeStatus that = (RollingUpgradeStatus) obj; - return this.blockPoolId.equals(that.blockPoolId) - && this.isFinalized() == that.isFinalized(); - } - - @Override - public String toString() { - return " Block Pool ID: " + blockPoolId; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java deleted file mode 100644 index 2def72f..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.protocol; - -import org.apache.hadoop.security.AccessControlException; - -/** Snapshot access related exception. */ -public class SnapshotAccessControlException extends AccessControlException { - private static final long serialVersionUID = 1L; - - public SnapshotAccessControlException(final String message) { - super(message); - } - - public SnapshotAccessControlException(final Throwable cause) { - super(cause); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java deleted file mode 100644 index 41c84e2..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.security.token.block; - -import org.apache.hadoop.classification.InterfaceAudience; - -/** - * A little struct class to contain all fields required to perform encryption of - * the DataTransferProtocol. - */ [email protected] -public class DataEncryptionKey { - public final int keyId; - public final String blockPoolId; - public final byte[] nonce; - public final byte[] encryptionKey; - public final long expiryDate; - public final String encryptionAlgorithm; - - public DataEncryptionKey(int keyId, String blockPoolId, byte[] nonce, - byte[] encryptionKey, long expiryDate, String encryptionAlgorithm) { - this.keyId = keyId; - this.blockPoolId = blockPoolId; - this.nonce = nonce; - this.encryptionKey = encryptionKey; - this.expiryDate = expiryDate; - this.encryptionAlgorithm = encryptionAlgorithm; - } - - @Override - public String toString() { - return keyId + "/" + blockPoolId + "/" + nonce.length + "/" + - encryptionKey.length; - } -}
