http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java new file mode 100644 index 0000000..3b044e7 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -0,0 +1,790 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Representation of the status of a storage cluster: + * <p> + * <ul> + * <li>regions: the total number of regions served by the cluster</li> + * <li>requests: the total number of requests per second handled by the + * cluster in the last reporting interval</li> + * <li>averageLoad: the average load of the region servers in the cluster</li> + * <li>liveNodes: detailed status of the live region servers</li> + * <li>deadNodes: the names of region servers declared dead</li> + * </ul> + * + * <pre> + * <complexType name="StorageClusterStatus"> + * <sequence> + * <element name="liveNode" type="tns:Node" + * maxOccurs="unbounded" minOccurs="0"> + * </element> + * <element name="deadNode" type="string" maxOccurs="unbounded" + * minOccurs="0"> + * </element> + * </sequence> + * <attribute name="regions" type="int"></attribute> + * <attribute name="requests" type="int"></attribute> + * <attribute name="averageLoad" type="float"></attribute> + * </complexType> + * + * <complexType name="Node"> + * <sequence> + * <element name="region" type="tns:Region" + * maxOccurs="unbounded" minOccurs="0"></element> + * </sequence> + * <attribute name="name" type="string"></attribute> + * <attribute name="startCode" type="int"></attribute> + * <attribute name="requests" type="int"></attribute> + * <attribute name="heapSizeMB" type="int"></attribute> + * <attribute name="maxHeapSizeMB" type="int"></attribute> + * </complexType> + * + * <complexType name="Region"> + * <attribute name="name" type="base64Binary"></attribute> + * <attribute name="stores" type="int"></attribute> + * <attribute name="storefiles" type="int"></attribute> + * <attribute name="storefileSizeMB" type="int"></attribute> + * <attribute name="memstoreSizeMB" type="int"></attribute> + * <attribute name="storefileIndexSizeMB" type="int"></attribute> + * <attribute name="readRequestsCount" type="int"></attribute> + * <attribute name="writeRequestsCount" type="int"></attribute> + * <attribute name="rootIndexSizeKB" type="int"></attribute> + * <attribute name="totalStaticIndexSizeKB" type="int"></attribute> + * <attribute name="totalStaticBloomSizeKB" type="int"></attribute> + * <attribute name="totalCompactingKVs" type="int"></attribute> + * <attribute name="currentCompactedKVs" type="int"></attribute> + * </complexType> + * </pre> + */ +@XmlRootElement(name="ClusterStatus") [email protected] +public class StorageClusterStatusModel + implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + + /** + * Represents a region server. + */ + public static class Node implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * Represents a region hosted on a region server. + */ + public static class Region { + private byte[] name; + private int stores; + private int storefiles; + private int storefileSizeMB; + private int memstoreSizeMB; + private int storefileIndexSizeMB; + private long readRequestsCount; + private long writeRequestsCount; + private int rootIndexSizeKB; + private int totalStaticIndexSizeKB; + private int totalStaticBloomSizeKB; + private long totalCompactingKVs; + private long currentCompactedKVs; + + /** + * Default constructor + */ + public Region() { + } + + /** + * Constructor + * @param name the region name + */ + public Region(byte[] name) { + this.name = name; + } + + /** + * Constructor + * @param name the region name + * @param stores the number of stores + * @param storefiles the number of store files + * @param storefileSizeMB total size of store files, in MB + * @param memstoreSizeMB total size of memstore, in MB + * @param storefileIndexSizeMB total size of store file indexes, in MB + */ + public Region(byte[] name, int stores, int storefiles, + int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB, + long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, + int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, + long totalCompactingKVs, long currentCompactedKVs) { + this.name = name; + this.stores = stores; + this.storefiles = storefiles; + this.storefileSizeMB = storefileSizeMB; + this.memstoreSizeMB = memstoreSizeMB; + this.storefileIndexSizeMB = storefileIndexSizeMB; + this.readRequestsCount = readRequestsCount; + this.writeRequestsCount = writeRequestsCount; + this.rootIndexSizeKB = rootIndexSizeKB; + this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; + this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; + this.totalCompactingKVs = totalCompactingKVs; + this.currentCompactedKVs = currentCompactedKVs; + } + + /** + * @return the region name + */ + @XmlAttribute + public byte[] getName() { + return name; + } + + /** + * @return the number of stores + */ + @XmlAttribute + public int getStores() { + return stores; + } + + /** + * @return the number of store files + */ + @XmlAttribute + public int getStorefiles() { + return storefiles; + } + + /** + * @return the total size of store files, in MB + */ + @XmlAttribute + public int getStorefileSizeMB() { + return storefileSizeMB; + } + + /** + * @return memstore size, in MB + */ + @XmlAttribute + public int getMemstoreSizeMB() { + return memstoreSizeMB; + } + + /** + * @return the total size of store file indexes, in MB + */ + @XmlAttribute + public int getStorefileIndexSizeMB() { + return storefileIndexSizeMB; + } + + /** + * @return the current total read requests made to region + */ + @XmlAttribute + public long getReadRequestsCount() { + return readRequestsCount; + } + + /** + * @return the current total write requests made to region + */ + @XmlAttribute + public long getWriteRequestsCount() { + return writeRequestsCount; + } + + /** + * @return The current total size of root-level indexes for the region, in KB. + */ + @XmlAttribute + public int getRootIndexSizeKB() { + return rootIndexSizeKB; + } + + /** + * @return The total size of static index, in KB + */ + @XmlAttribute + public int getTotalStaticIndexSizeKB() { + return totalStaticIndexSizeKB; + } + + /** + * @return The total size of static bloom, in KB + */ + @XmlAttribute + public int getTotalStaticBloomSizeKB() { + return totalStaticBloomSizeKB; + } + + /** + * @return The total number of compacting key-values + */ + @XmlAttribute + public long getTotalCompactingKVs() { + return totalCompactingKVs; + } + + /** + * @return The number of current compacted key-values + */ + @XmlAttribute + public long getCurrentCompactedKVs() { + return currentCompactedKVs; + } + + /** + * @param readRequestsCount The current total read requests made to region + */ + public void setReadRequestsCount(long readRequestsCount) { + this.readRequestsCount = readRequestsCount; + } + + /** + * @param rootIndexSizeKB The current total size of root-level indexes + * for the region, in KB + */ + public void setRootIndexSizeKB(int rootIndexSizeKB) { + this.rootIndexSizeKB = rootIndexSizeKB; + } + + /** + * @param writeRequestsCount The current total write requests made to region + */ + public void setWriteRequestsCount(long writeRequestsCount) { + this.writeRequestsCount = writeRequestsCount; + } + + /** + * @param currentCompactedKVs The completed count of key values + * in currently running compaction + */ + public void setCurrentCompactedKVs(long currentCompactedKVs) { + this.currentCompactedKVs = currentCompactedKVs; + } + + /** + * @param totalCompactingKVs The total compacting key values + * in currently running compaction + */ + public void setTotalCompactingKVs(long totalCompactingKVs) { + this.totalCompactingKVs = totalCompactingKVs; + } + + /** + * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, + * not just loaded into the block cache, in KB. + */ + public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { + this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; + } + + /** + * @param totalStaticIndexSizeKB The total size of all index blocks, + * not just the root level, in KB. + */ + public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { + this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; + } + + /** + * @param name the region name + */ + public void setName(byte[] name) { + this.name = name; + } + + /** + * @param stores the number of stores + */ + public void setStores(int stores) { + this.stores = stores; + } + + /** + * @param storefiles the number of store files + */ + public void setStorefiles(int storefiles) { + this.storefiles = storefiles; + } + + /** + * @param storefileSizeMB total size of store files, in MB + */ + public void setStorefileSizeMB(int storefileSizeMB) { + this.storefileSizeMB = storefileSizeMB; + } + + /** + * @param memstoreSizeMB memstore size, in MB + */ + public void setMemstoreSizeMB(int memstoreSizeMB) { + this.memstoreSizeMB = memstoreSizeMB; + } + + /** + * @param storefileIndexSizeMB total size of store file indexes, in MB + */ + public void setStorefileIndexSizeMB(int storefileIndexSizeMB) { + this.storefileIndexSizeMB = storefileIndexSizeMB; + } + } + + private String name; + private long startCode; + private int requests; + private int heapSizeMB; + private int maxHeapSizeMB; + private List<Region> regions = new ArrayList<Region>(); + + /** + * Add a region name to the list + * @param name the region name + */ + public void addRegion(byte[] name, int stores, int storefiles, + int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB, + long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, + int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, + long totalCompactingKVs, long currentCompactedKVs) { + regions.add(new Region(name, stores, storefiles, storefileSizeMB, + memstoreSizeMB, storefileIndexSizeMB, readRequestsCount, + writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, + totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + } + + /** + * @param index the index + * @return the region name + */ + public Region getRegion(int index) { + return regions.get(index); + } + + /** + * Default constructor + */ + public Node() {} + + /** + * Constructor + * @param name the region server name + * @param startCode the region server's start code + */ + public Node(String name, long startCode) { + this.name = name; + this.startCode = startCode; + } + + /** + * @return the region server's name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the region server's start code + */ + @XmlAttribute + public long getStartCode() { + return startCode; + } + + /** + * @return the current heap size, in MB + */ + @XmlAttribute + public int getHeapSizeMB() { + return heapSizeMB; + } + + /** + * @return the maximum heap size, in MB + */ + @XmlAttribute + public int getMaxHeapSizeMB() { + return maxHeapSizeMB; + } + + /** + * @return the list of regions served by the region server + */ + @XmlElement(name="Region") + public List<Region> getRegions() { + return regions; + } + + /** + * @return the number of requests per second processed by the region server + */ + @XmlAttribute + public int getRequests() { + return requests; + } + + /** + * @param name the region server's hostname + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param startCode the region server's start code + */ + public void setStartCode(long startCode) { + this.startCode = startCode; + } + + /** + * @param heapSizeMB the current heap size, in MB + */ + public void setHeapSizeMB(int heapSizeMB) { + this.heapSizeMB = heapSizeMB; + } + + /** + * @param maxHeapSizeMB the maximum heap size, in MB + */ + public void setMaxHeapSizeMB(int maxHeapSizeMB) { + this.maxHeapSizeMB = maxHeapSizeMB; + } + + /** + * @param regions a list of regions served by the region server + */ + public void setRegions(List<Region> regions) { + this.regions = regions; + } + + /** + * @param requests the number of requests per second processed by the + * region server + */ + public void setRequests(int requests) { + this.requests = requests; + } + } + + private List<Node> liveNodes = new ArrayList<Node>(); + private List<String> deadNodes = new ArrayList<String>(); + private int regions; + private int requests; + private double averageLoad; + + /** + * Add a live node to the cluster representation. + * @param name the region server name + * @param startCode the region server's start code + * @param heapSizeMB the current heap size, in MB + * @param maxHeapSizeMB the maximum heap size, in MB + */ + public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) { + Node node = new Node(name, startCode); + node.setHeapSizeMB(heapSizeMB); + node.setMaxHeapSizeMB(maxHeapSizeMB); + liveNodes.add(node); + return node; + } + + /** + * @param index the index + * @return the region server model + */ + public Node getLiveNode(int index) { + return liveNodes.get(index); + } + + /** + * Add a dead node to the cluster representation. + * @param node the dead region server's name + */ + public void addDeadNode(String node) { + deadNodes.add(node); + } + + /** + * @param index the index + * @return the dead region server's name + */ + public String getDeadNode(int index) { + return deadNodes.get(index); + } + + /** + * Default constructor + */ + public StorageClusterStatusModel() { + } + + /** + * @return the list of live nodes + */ + @XmlElement(name = "Node") + @XmlElementWrapper(name = "LiveNodes") + public List<Node> getLiveNodes() { + return liveNodes; + } + + /** + * @return the list of dead nodes + */ + @XmlElement(name = "Node") + @XmlElementWrapper(name = "DeadNodes") + public List<String> getDeadNodes() { + return deadNodes; + } + + /** + * @return the total number of regions served by the cluster + */ + @XmlAttribute + public int getRegions() { + return regions; + } + + /** + * @return the total number of requests per second handled by the cluster in + * the last reporting interval + */ + @XmlAttribute + public int getRequests() { + return requests; + } + + /** + * @return the average load of the region servers in the cluster + */ + @XmlAttribute + public double getAverageLoad() { + return averageLoad; + } + + /** + * @param nodes the list of live node models + */ + public void setLiveNodes(List<Node> nodes) { + this.liveNodes = nodes; + } + + /** + * @param nodes the list of dead node names + */ + public void setDeadNodes(List<String> nodes) { + this.deadNodes = nodes; + } + + /** + * @param regions the total number of regions served by the cluster + */ + public void setRegions(int regions) { + this.regions = regions; + } + + /** + * @param requests the total number of requests per second handled by the + * cluster + */ + public void setRequests(int requests) { + this.requests = requests; + } + + /** + * @param averageLoad the average load of region servers in the cluster + */ + public void setAverageLoad(double averageLoad) { + this.averageLoad = averageLoad; + } + + /* + * (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(String.format("%d live servers, %d dead servers, " + + "%.4f average load%n%n", liveNodes.size(), deadNodes.size(), + averageLoad)); + if (!liveNodes.isEmpty()) { + sb.append(liveNodes.size()); + sb.append(" live servers\n"); + for (Node node: liveNodes) { + sb.append(" "); + sb.append(node.name); + sb.append(' '); + sb.append(node.startCode); + sb.append("\n requests="); + sb.append(node.requests); + sb.append(", regions="); + sb.append(node.regions.size()); + sb.append("\n heapSizeMB="); + sb.append(node.heapSizeMB); + sb.append("\n maxHeapSizeMB="); + sb.append(node.maxHeapSizeMB); + sb.append("\n\n"); + for (Node.Region region: node.regions) { + sb.append(" "); + sb.append(Bytes.toString(region.name)); + sb.append("\n stores="); + sb.append(region.stores); + sb.append("\n storefiless="); + sb.append(region.storefiles); + sb.append("\n storefileSizeMB="); + sb.append(region.storefileSizeMB); + sb.append("\n memstoreSizeMB="); + sb.append(region.memstoreSizeMB); + sb.append("\n storefileIndexSizeMB="); + sb.append(region.storefileIndexSizeMB); + sb.append("\n readRequestsCount="); + sb.append(region.readRequestsCount); + sb.append("\n writeRequestsCount="); + sb.append(region.writeRequestsCount); + sb.append("\n rootIndexSizeKB="); + sb.append(region.rootIndexSizeKB); + sb.append("\n totalStaticIndexSizeKB="); + sb.append(region.totalStaticIndexSizeKB); + sb.append("\n totalStaticBloomSizeKB="); + sb.append(region.totalStaticBloomSizeKB); + sb.append("\n totalCompactingKVs="); + sb.append(region.totalCompactingKVs); + sb.append("\n currentCompactedKVs="); + sb.append(region.currentCompactedKVs); + sb.append('\n'); + } + sb.append('\n'); + } + } + if (!deadNodes.isEmpty()) { + sb.append('\n'); + sb.append(deadNodes.size()); + sb.append(" dead servers\n"); + for (String node: deadNodes) { + sb.append(" "); + sb.append(node); + sb.append('\n'); + } + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); + builder.setRegions(regions); + builder.setRequests(requests); + builder.setAverageLoad(averageLoad); + for (Node node: liveNodes) { + StorageClusterStatus.Node.Builder nodeBuilder = + StorageClusterStatus.Node.newBuilder(); + nodeBuilder.setName(node.name); + nodeBuilder.setStartCode(node.startCode); + nodeBuilder.setRequests(node.requests); + nodeBuilder.setHeapSizeMB(node.heapSizeMB); + nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB); + for (Node.Region region: node.regions) { + StorageClusterStatus.Region.Builder regionBuilder = + StorageClusterStatus.Region.newBuilder(); + regionBuilder.setName(ByteStringer.wrap(region.name)); + regionBuilder.setStores(region.stores); + regionBuilder.setStorefiles(region.storefiles); + regionBuilder.setStorefileSizeMB(region.storefileSizeMB); + regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB); + regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB); + regionBuilder.setReadRequestsCount(region.readRequestsCount); + regionBuilder.setWriteRequestsCount(region.writeRequestsCount); + regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB); + regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB); + regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB); + regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs); + regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs); + nodeBuilder.addRegions(regionBuilder); + } + builder.addLiveNodes(nodeBuilder); + } + for (String node: deadNodes) { + builder.addDeadNodes(node); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); + builder.mergeFrom(message); + if (builder.hasRegions()) { + regions = builder.getRegions(); + } + if (builder.hasRequests()) { + requests = builder.getRequests(); + } + if (builder.hasAverageLoad()) { + averageLoad = builder.getAverageLoad(); + } + for (StorageClusterStatus.Node node: builder.getLiveNodesList()) { + long startCode = node.hasStartCode() ? node.getStartCode() : -1; + StorageClusterStatusModel.Node nodeModel = + addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), + node.getMaxHeapSizeMB()); + int requests = node.hasRequests() ? node.getRequests() : 0; + nodeModel.setRequests(requests); + for (StorageClusterStatus.Region region: node.getRegionsList()) { + nodeModel.addRegion( + region.getName().toByteArray(), + region.getStores(), + region.getStorefiles(), + region.getStorefileSizeMB(), + region.getMemstoreSizeMB(), + region.getStorefileIndexSizeMB(), + region.getReadRequestsCount(), + region.getWriteRequestsCount(), + region.getRootIndexSizeKB(), + region.getTotalStaticIndexSizeKB(), + region.getTotalStaticBloomSizeKB(), + region.getTotalCompactingKVs(), + region.getCurrentCompactedKVs()); + } + } + for (String node: builder.getDeadNodesList()) { + addDeadNode(node); + } + return this; + } +}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java new file mode 100644 index 0000000..4321a8e --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java @@ -0,0 +1,78 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import org.codehaus.jackson.annotate.JsonValue; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlValue; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Simple representation of the version of the storage cluster + * + * <pre> + * <complexType name="StorageClusterVersion"> + * <attribute name="version" type="string"></attribute> + * </complexType> + * </pre> + */ +@XmlRootElement(name="ClusterVersion") [email protected] +public class StorageClusterVersionModel implements Serializable { + private static final long serialVersionUID = 1L; + + private String version; + + /** + * @return the storage cluster version + */ + @XmlValue + public String getVersion() { + return version; + } + + /** + * @param version the storage cluster version + */ + public void setVersion(String version) { + this.version = version; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @JsonValue + @Override + public String toString() { + return version; + } + + //needed for jackson deserialization + private static StorageClusterVersionModel valueOf(String value) { + StorageClusterVersionModel versionModel + = new StorageClusterVersionModel(); + versionModel.setVersion(value); + return versionModel; + } +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java new file mode 100644 index 0000000..700e766 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -0,0 +1,159 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.protobuf.generated.TableInfoMessage.TableInfo; + +/** + * Representation of a list of table regions. + * + * <pre> + * <complexType name="TableInfo"> + * <sequence> + * <element name="region" type="tns:TableRegion" + * maxOccurs="unbounded" minOccurs="1"></element> + * </sequence> + * <attribute name="name" type="string"></attribute> + * </complexType> + * </pre> + */ +@XmlRootElement(name="TableInfo") [email protected] +public class TableInfoModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + + private String name; + private List<TableRegionModel> regions = new ArrayList<TableRegionModel>(); + + /** + * Default constructor + */ + public TableInfoModel() {} + + /** + * Constructor + * @param name + */ + public TableInfoModel(String name) { + this.name = name; + } + + /** + * Add a region model to the list + * @param region the region + */ + public void add(TableRegionModel region) { + regions.add(region); + } + + /** + * @param index the index + * @return the region model + */ + public TableRegionModel get(int index) { + return regions.get(index); + } + + /** + * @return the table name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the regions + */ + @XmlElement(name="Region") + public List<TableRegionModel> getRegions() { + return regions; + } + + /** + * @param name the table name + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param regions the regions to set + */ + public void setRegions(List<TableRegionModel> regions) { + this.regions = regions; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for(TableRegionModel aRegion : regions) { + sb.append(aRegion.toString()); + sb.append('\n'); + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + TableInfo.Builder builder = TableInfo.newBuilder(); + builder.setName(name); + for (TableRegionModel aRegion: regions) { + TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder(); + regionBuilder.setName(aRegion.getName()); + regionBuilder.setId(aRegion.getId()); + regionBuilder.setStartKey(ByteStringer.wrap(aRegion.getStartKey())); + regionBuilder.setEndKey(ByteStringer.wrap(aRegion.getEndKey())); + regionBuilder.setLocation(aRegion.getLocation()); + builder.addRegions(regionBuilder); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableInfo.Builder builder = TableInfo.newBuilder(); + builder.mergeFrom(message); + setName(builder.getName()); + for (TableInfo.Region region: builder.getRegionsList()) { + add(new TableRegionModel(builder.getName(), region.getId(), + region.getStartKey().toByteArray(), + region.getEndKey().toByteArray(), + region.getLocation())); + } + return this; + } +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java new file mode 100644 index 0000000..596adac --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -0,0 +1,113 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlElementRef; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.protobuf.generated.TableListMessage.TableList; + +/** + * Simple representation of a list of table names. + */ +@XmlRootElement(name="TableList") [email protected] +public class TableListModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + private List<TableModel> tables = new ArrayList<TableModel>(); + + /** + * Default constructor + */ + public TableListModel() {} + + /** + * Add the table name model to the list + * @param table the table model + */ + public void add(TableModel table) { + tables.add(table); + } + + /** + * @param index the index + * @return the table model + */ + public TableModel get(int index) { + return tables.get(index); + } + + /** + * @return the tables + */ + @XmlElementRef(name="table") + public List<TableModel> getTables() { + return tables; + } + + /** + * @param tables the tables to set + */ + public void setTables(List<TableModel> tables) { + this.tables = tables; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for(TableModel aTable : tables) { + sb.append(aTable.toString()); + sb.append('\n'); + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + TableList.Builder builder = TableList.newBuilder(); + for (TableModel aTable : tables) { + builder.addName(aTable.getName()); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableList.Builder builder = TableList.newBuilder(); + builder.mergeFrom(message); + for (String table: builder.getNameList()) { + this.add(new TableModel(table)); + } + return this; + } +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java new file mode 100644 index 0000000..0fb0d6e --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java @@ -0,0 +1,84 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Simple representation of a table name. + * + * <pre> + * <complexType name="Table"> + * <sequence> + * <element name="name" type="string"></element> + * </sequence> + * </complexType> + * </pre> + */ +@XmlRootElement(name="table") [email protected] +public class TableModel implements Serializable { + + private static final long serialVersionUID = 1L; + + private String name; + + /** + * Default constructor + */ + public TableModel() {} + + /** + * Constructor + * @param name + */ + public TableModel(String name) { + super(); + this.name = name; + } + + /** + * @return the name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.name; + } +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java new file mode 100644 index 0000000..d9b2b65 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -0,0 +1,196 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Representation of a region of a table and its current location on the + * storage cluster. + * + * <pre> + * <complexType name="TableRegion"> + * <attribute name="name" type="string"></attribute> + * <attribute name="id" type="int"></attribute> + * <attribute name="startKey" type="base64Binary"></attribute> + * <attribute name="endKey" type="base64Binary"></attribute> + * <attribute name="location" type="string"></attribute> + * </complexType> + * </pre> + */ +@XmlRootElement(name="Region") [email protected] +public class TableRegionModel implements Serializable { + + private static final long serialVersionUID = 1L; + + private String table; + private long id; + private byte[] startKey; + private byte[] endKey; + private String location; + + /** + * Constructor + */ + public TableRegionModel() {} + + /** + * Constructor + * @param table the table name + * @param id the encoded id of the region + * @param startKey the start key of the region + * @param endKey the end key of the region + */ + public TableRegionModel(String table, long id, byte[] startKey, + byte[] endKey) { + this(table, id, startKey, endKey, null); + } + + /** + * Constructor + * @param table the table name + * @param id the encoded id of the region + * @param startKey the start key of the region + * @param endKey the end key of the region + * @param location the name and port of the region server hosting the region + */ + public TableRegionModel(String table, long id, byte[] startKey, + byte[] endKey, String location) { + this.table = table; + this.id = id; + this.startKey = startKey; + this.endKey = endKey; + this.location = location; + } + + /** + * @return the region name + */ + @XmlAttribute + public String getName() { + byte [] tableNameAsBytes = Bytes.toBytes(this.table); + TableName tableName = TableName.valueOf(tableNameAsBytes); + byte [] nameAsBytes = HRegionInfo.createRegionName( + tableName, this.startKey, this.id, !tableName.isSystemTable()); + return Bytes.toString(nameAsBytes); + } + + /** + * @return the encoded region id + */ + @XmlAttribute + public long getId() { + return id; + } + + /** + * @return the start key + */ + @XmlAttribute + public byte[] getStartKey() { + return startKey; + } + + /** + * @return the end key + */ + @XmlAttribute + public byte[] getEndKey() { + return endKey; + } + + /** + * @return the name and port of the region server hosting the region + */ + @XmlAttribute + public String getLocation() { + return location; + } + + /** + * @param name region printable name + */ + public void setName(String name) { + String split[] = name.split(","); + this.table = split[0]; + this.startKey = Bytes.toBytes(split[1]); + String tail = split[2]; + split = tail.split("\\."); + id = Long.valueOf(split[0]); + } + + /** + * @param id the region's encoded id + */ + public void setId(long id) { + this.id = id; + } + + /** + * @param startKey the start key + */ + public void setStartKey(byte[] startKey) { + this.startKey = startKey; + } + + /** + * @param endKey the end key + */ + public void setEndKey(byte[] endKey) { + this.endKey = endKey; + } + + /** + * @param location the name and port of the region server hosting the region + */ + public void setLocation(String location) { + this.location = location; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getName()); + sb.append(" [\n id="); + sb.append(id); + sb.append("\n startKey='"); + sb.append(Bytes.toString(startKey)); + sb.append("'\n endKey='"); + sb.append(Bytes.toString(endKey)); + if (location != null) { + sb.append("'\n location='"); + sb.append(location); + } + sb.append("'\n]\n"); + return sb.toString(); + } +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java new file mode 100644 index 0000000..9e9fe47 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -0,0 +1,360 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAnyAttribute; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.namespace.QName; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema; +import org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema; +import org.apache.hadoop.hbase.util.Bytes; +import org.codehaus.jackson.annotate.JsonAnyGetter; +import org.codehaus.jackson.annotate.JsonAnySetter; +import org.codehaus.jackson.annotate.JsonIgnore; + +/** + * A representation of HBase table descriptors. + * + * <pre> + * <complexType name="TableSchema"> + * <sequence> + * <element name="column" type="tns:ColumnSchema" + * maxOccurs="unbounded" minOccurs="1"></element> + * </sequence> + * <attribute name="name" type="string"></attribute> + * <anyAttribute></anyAttribute> + * </complexType> + * </pre> + */ +@XmlRootElement(name="TableSchema") [email protected] +public class TableSchemaModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + private static final QName IS_META = new QName(HTableDescriptor.IS_META); + private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT); + private static final QName READONLY = new QName(HTableDescriptor.READONLY); + private static final QName TTL = new QName(HColumnDescriptor.TTL); + private static final QName VERSIONS = new QName(HConstants.VERSIONS); + private static final QName COMPRESSION = + new QName(HColumnDescriptor.COMPRESSION); + + private String name; + private Map<QName,Object> attrs = new LinkedHashMap<QName,Object>(); + private List<ColumnSchemaModel> columns = new ArrayList<ColumnSchemaModel>(); + + /** + * Default constructor. + */ + public TableSchemaModel() {} + + /** + * Constructor + * @param htd the table descriptor + */ + public TableSchemaModel(HTableDescriptor htd) { + setName(htd.getTableName().getNameAsString()); + for (Map.Entry<Bytes, Bytes> e: + htd.getValues().entrySet()) { + addAttribute(Bytes.toString(e.getKey().get()), + Bytes.toString(e.getValue().get())); + } + for (HColumnDescriptor hcd: htd.getFamilies()) { + ColumnSchemaModel columnModel = new ColumnSchemaModel(); + columnModel.setName(hcd.getNameAsString()); + for (Map.Entry<Bytes, Bytes> e: + hcd.getValues().entrySet()) { + columnModel.addAttribute(Bytes.toString(e.getKey().get()), + Bytes.toString(e.getValue().get())); + } + addColumnFamily(columnModel); + } + } + + /** + * Add an attribute to the table descriptor + * @param name attribute name + * @param value attribute value + */ + @JsonAnySetter + public void addAttribute(String name, Object value) { + attrs.put(new QName(name), value); + } + + /** + * Return a table descriptor value as a string. Calls toString() on the + * object stored in the descriptor value map. + * @param name the attribute name + * @return the attribute value + */ + public String getAttribute(String name) { + Object o = attrs.get(new QName(name)); + return o != null ? o.toString() : null; + } + + /** + * Add a column family to the table descriptor + * @param family the column family model + */ + public void addColumnFamily(ColumnSchemaModel family) { + columns.add(family); + } + + /** + * Retrieve the column family at the given index from the table descriptor + * @param index the index + * @return the column family model + */ + public ColumnSchemaModel getColumnFamily(int index) { + return columns.get(index); + } + + /** + * @return the table name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the map for holding unspecified (user) attributes + */ + @XmlAnyAttribute + @JsonAnyGetter + public Map<QName,Object> getAny() { + return attrs; + } + + /** + * @return the columns + */ + @XmlElement(name="ColumnSchema") + public List<ColumnSchemaModel> getColumns() { + return columns; + } + + /** + * @param name the table name + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param columns the columns to set + */ + public void setColumns(List<ColumnSchemaModel> columns) { + this.columns = columns; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{ NAME=> '"); + sb.append(name); + sb.append('\''); + for (Map.Entry<QName,Object> e: attrs.entrySet()) { + sb.append(", "); + sb.append(e.getKey().getLocalPart()); + sb.append(" => '"); + sb.append(e.getValue().toString()); + sb.append('\''); + } + sb.append(", COLUMNS => [ "); + Iterator<ColumnSchemaModel> i = columns.iterator(); + while (i.hasNext()) { + ColumnSchemaModel family = i.next(); + sb.append(family.toString()); + if (i.hasNext()) { + sb.append(','); + } + sb.append(' '); + } + sb.append("] }"); + return sb.toString(); + } + + // getters and setters for common schema attributes + + // cannot be standard bean type getters and setters, otherwise this would + // confuse JAXB + + /** + * @return true if IS_META attribute exists and is truel + */ + public boolean __getIsMeta() { + Object o = attrs.get(IS_META); + return o != null ? Boolean.valueOf(o.toString()) : false; + } + + /** + * @return true if IS_ROOT attribute exists and is truel + */ + public boolean __getIsRoot() { + Object o = attrs.get(IS_ROOT); + return o != null ? Boolean.valueOf(o.toString()) : false; + } + + /** + * @return true if READONLY attribute exists and is truel + */ + public boolean __getReadOnly() { + Object o = attrs.get(READONLY); + return o != null ? + Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY; + } + + /** + * @param value desired value of IS_META attribute + */ + public void __setIsMeta(boolean value) { + attrs.put(IS_META, Boolean.toString(value)); + } + + /** + * @param value desired value of IS_ROOT attribute + */ + public void __setIsRoot(boolean value) { + attrs.put(IS_ROOT, Boolean.toString(value)); + } + + /** + * @param value desired value of READONLY attribute + */ + public void __setReadOnly(boolean value) { + attrs.put(READONLY, Boolean.toString(value)); + } + + @Override + public byte[] createProtobufOutput() { + TableSchema.Builder builder = TableSchema.newBuilder(); + builder.setName(name); + for (Map.Entry<QName, Object> e: attrs.entrySet()) { + TableSchema.Attribute.Builder attrBuilder = + TableSchema.Attribute.newBuilder(); + attrBuilder.setName(e.getKey().getLocalPart()); + attrBuilder.setValue(e.getValue().toString()); + builder.addAttrs(attrBuilder); + } + for (ColumnSchemaModel family: columns) { + Map<QName, Object> familyAttrs = family.getAny(); + ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder(); + familyBuilder.setName(family.getName()); + for (Map.Entry<QName, Object> e: familyAttrs.entrySet()) { + ColumnSchema.Attribute.Builder attrBuilder = + ColumnSchema.Attribute.newBuilder(); + attrBuilder.setName(e.getKey().getLocalPart()); + attrBuilder.setValue(e.getValue().toString()); + familyBuilder.addAttrs(attrBuilder); + } + if (familyAttrs.containsKey(TTL)) { + familyBuilder.setTtl( + Integer.valueOf(familyAttrs.get(TTL).toString())); + } + if (familyAttrs.containsKey(VERSIONS)) { + familyBuilder.setMaxVersions( + Integer.valueOf(familyAttrs.get(VERSIONS).toString())); + } + if (familyAttrs.containsKey(COMPRESSION)) { + familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString()); + } + builder.addColumns(familyBuilder); + } + if (attrs.containsKey(READONLY)) { + builder.setReadOnly( + Boolean.valueOf(attrs.get(READONLY).toString())); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableSchema.Builder builder = TableSchema.newBuilder(); + builder.mergeFrom(message); + this.setName(builder.getName()); + for (TableSchema.Attribute attr: builder.getAttrsList()) { + this.addAttribute(attr.getName(), attr.getValue()); + } + if (builder.hasReadOnly()) { + this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly()); + } + for (ColumnSchema family: builder.getColumnsList()) { + ColumnSchemaModel familyModel = new ColumnSchemaModel(); + familyModel.setName(family.getName()); + for (ColumnSchema.Attribute attr: family.getAttrsList()) { + familyModel.addAttribute(attr.getName(), attr.getValue()); + } + if (family.hasTtl()) { + familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl()); + } + if (family.hasMaxVersions()) { + familyModel.addAttribute(HConstants.VERSIONS, + family.getMaxVersions()); + } + if (family.hasCompression()) { + familyModel.addAttribute(HColumnDescriptor.COMPRESSION, + family.getCompression()); + } + this.addColumnFamily(familyModel); + } + return this; + } + + /** + * @return a table descriptor + */ + @JsonIgnore + public HTableDescriptor getTableDescriptor() { + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName())); + for (Map.Entry<QName, Object> e: getAny().entrySet()) { + htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); + } + for (ColumnSchemaModel column: getColumns()) { + HColumnDescriptor hcd = new HColumnDescriptor(column.getName()); + for (Map.Entry<QName, Object> e: column.getAny().entrySet()) { + hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); + } + htd.addFamily(hcd); + } + return htd; + } + +} http://git-wip-us.apache.org/repos/asf/hbase/blob/6ddb2f19/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java ---------------------------------------------------------------------- diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java new file mode 100644 index 0000000..0938803 --- /dev/null +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -0,0 +1,209 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; + +import javax.servlet.ServletContext; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RESTServlet; +import org.apache.hadoop.hbase.rest.protobuf.generated.VersionMessage.Version; + +import com.sun.jersey.spi.container.servlet.ServletContainer; + +/** + * A representation of the collection of versions of the REST gateway software + * components. + * <ul> + * <li>restVersion: REST gateway revision</li> + * <li>jvmVersion: the JVM vendor and version information</li> + * <li>osVersion: the OS type, version, and hardware architecture</li> + * <li>serverVersion: the name and version of the servlet container</li> + * <li>jerseyVersion: the version of the embedded Jersey framework</li> + * </ul> + */ +@XmlRootElement(name="Version") [email protected] +public class VersionModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + private String restVersion; + private String jvmVersion; + private String osVersion; + private String serverVersion; + private String jerseyVersion; + + /** + * Default constructor. Do not use. + */ + public VersionModel() {} + + /** + * Constructor + * @param context the servlet context + */ + public VersionModel(ServletContext context) { + restVersion = RESTServlet.VERSION_STRING; + jvmVersion = System.getProperty("java.vm.vendor") + ' ' + + System.getProperty("java.version") + '-' + + System.getProperty("java.vm.version"); + osVersion = System.getProperty("os.name") + ' ' + + System.getProperty("os.version") + ' ' + + System.getProperty("os.arch"); + serverVersion = context.getServerInfo(); + jerseyVersion = ServletContainer.class.getPackage() + .getImplementationVersion(); + } + + /** + * @return the REST gateway version + */ + @XmlAttribute(name="REST") + public String getRESTVersion() { + return restVersion; + } + + /** + * @return the JVM vendor and version + */ + @XmlAttribute(name="JVM") + public String getJVMVersion() { + return jvmVersion; + } + + /** + * @return the OS name, version, and hardware architecture + */ + @XmlAttribute(name="OS") + public String getOSVersion() { + return osVersion; + } + + /** + * @return the servlet container version + */ + @XmlAttribute(name="Server") + public String getServerVersion() { + return serverVersion; + } + + /** + * @return the version of the embedded Jersey framework + */ + @XmlAttribute(name="Jersey") + public String getJerseyVersion() { + return jerseyVersion; + } + + /** + * @param version the REST gateway version string + */ + public void setRESTVersion(String version) { + this.restVersion = version; + } + + /** + * @param version the OS version string + */ + public void setOSVersion(String version) { + this.osVersion = version; + } + + /** + * @param version the JVM version string + */ + public void setJVMVersion(String version) { + this.jvmVersion = version; + } + + /** + * @param version the servlet container version string + */ + public void setServerVersion(String version) { + this.serverVersion = version; + } + + /** + * @param version the Jersey framework version string + */ + public void setJerseyVersion(String version) { + this.jerseyVersion = version; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("rest "); + sb.append(restVersion); + sb.append(" [JVM: "); + sb.append(jvmVersion); + sb.append("] [OS: "); + sb.append(osVersion); + sb.append("] [Server: "); + sb.append(serverVersion); + sb.append("] [Jersey: "); + sb.append(jerseyVersion); + sb.append("]\n"); + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + Version.Builder builder = Version.newBuilder(); + builder.setRestVersion(restVersion); + builder.setJvmVersion(jvmVersion); + builder.setOsVersion(osVersion); + builder.setServerVersion(serverVersion); + builder.setJerseyVersion(jerseyVersion); + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + Version.Builder builder = Version.newBuilder(); + builder.mergeFrom(message); + if (builder.hasRestVersion()) { + restVersion = builder.getRestVersion(); + } + if (builder.hasJvmVersion()) { + jvmVersion = builder.getJvmVersion(); + } + if (builder.hasOsVersion()) { + osVersion = builder.getOsVersion(); + } + if (builder.hasServerVersion()) { + serverVersion = builder.getServerVersion(); + } + if (builder.hasJerseyVersion()) { + jerseyVersion = builder.getJerseyVersion(); + } + return this; + } +}
