http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java deleted file mode 100644 index d385108..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.entity; - -import java.io.IOException; -import java.util.Map; -import java.util.NavigableMap; - -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute; - -/** - * Identifies partially qualified columns for the entity table. - */ -public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> { - - /** - * To store TimelineEntity getIsRelatedToEntities values. - */ - IS_RELATED_TO(EntityColumnFamily.INFO, "s"), - - /** - * To store TimelineEntity getRelatesToEntities values. - */ - RELATES_TO(EntityColumnFamily.INFO, "r"), - - /** - * To store TimelineEntity info values. - */ - INFO(EntityColumnFamily.INFO, "i"), - - /** - * Lifecycle events for an entity. - */ - EVENT(EntityColumnFamily.INFO, "e", true), - - /** - * Config column stores configuration with config key as the column name. - */ - CONFIG(EntityColumnFamily.CONFIGS, null), - - /** - * Metrics are stored with the metric name as the column name. - */ - METRIC(EntityColumnFamily.METRICS, null, new LongConverter()); - - private final ColumnHelper<EntityTable> column; - private final ColumnFamily<EntityTable> columnFamily; - - /** - * Can be null for those cases where the provided column qualifier is the - * entire column name. - */ - private final String columnPrefix; - private final byte[] columnPrefixBytes; - - /** - * Private constructor, meant to be used by the enum definition. - * - * @param columnFamily that this column is stored in. - * @param columnPrefix for this column. - */ - EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily, - String columnPrefix) { - this(columnFamily, columnPrefix, false, GenericConverter.getInstance()); - } - - EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily, - String columnPrefix, boolean compondColQual) { - this(columnFamily, columnPrefix, compondColQual, - GenericConverter.getInstance()); - } - - EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily, - String columnPrefix, ValueConverter converter) { - this(columnFamily, columnPrefix, false, converter); - } - - /** - * Private constructor, meant to be used by the enum definition. - * - * @param columnFamily that this column is stored in. - * @param columnPrefix for this column. - * @param converter used to encode/decode values to be stored in HBase for - * this column prefix. - */ - EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily, - String columnPrefix, boolean compondColQual, ValueConverter converter) { - column = new ColumnHelper<EntityTable>(columnFamily, converter); - this.columnFamily = columnFamily; - this.columnPrefix = columnPrefix; - if (columnPrefix == null) { - this.columnPrefixBytes = null; - } else { - // Future-proof by ensuring the right column prefix hygiene. - this.columnPrefixBytes = - Bytes.toBytes(Separator.SPACE.encode(columnPrefix)); - } - } - - /** - * @return the column name value - */ - public String getColumnPrefix() { - return columnPrefix; - } - - @Override - public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) { - return ColumnHelper.getColumnQualifier( - this.columnPrefixBytes, qualifierPrefix); - } - - @Override - public byte[] getColumnPrefixBytes(String qualifierPrefix) { - return ColumnHelper.getColumnQualifier( - this.columnPrefixBytes, qualifierPrefix); - } - - @Override - public byte[] getColumnFamilyBytes() { - return columnFamily.getBytes(); - } - - @Override - public ValueConverter getValueConverter() { - return column.getValueConverter(); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #store(byte[], - * org.apache.hadoop.yarn.server.timelineservice.storage.common. - * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object, - * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[]) - */ - public void store(byte[] rowKey, - TypedBufferedMutator<EntityTable> tableMutator, String qualifier, - Long timestamp, Object inputValue, Attribute... attributes) - throws IOException { - - // Null check - if (qualifier == null) { - throw new IOException("Cannot store column with null qualifier in " - + tableMutator.getName().getNameAsString()); - } - - byte[] columnQualifier = getColumnPrefixBytes(qualifier); - - column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue, - attributes); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #store(byte[], - * org.apache.hadoop.yarn.server.timelineservice.storage.common. - * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object) - */ - public void store(byte[] rowKey, - TypedBufferedMutator<EntityTable> tableMutator, byte[] qualifier, - Long timestamp, Object inputValue, Attribute... attributes) - throws IOException { - - // Null check - if (qualifier == null) { - throw new IOException("Cannot store column with null qualifier in " - + tableMutator.getName().getNameAsString()); - } - - byte[] columnQualifier = getColumnPrefixBytes(qualifier); - - column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue, - attributes); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String) - */ - public Object readResult(Result result, String qualifier) throws IOException { - byte[] columnQualifier = - ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier); - return column.readResult(result, columnQualifier); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResults(org.apache.hadoop.hbase.client.Result, - * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter) - */ - public <K> Map<K, Object> readResults(Result result, - KeyConverter<K> keyConverter) throws IOException { - return column.readResults(result, columnPrefixBytes, keyConverter); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result, - * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter) - */ - public <K, V> NavigableMap<K, NavigableMap<Long, V>> - readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter) - throws IOException { - return column.readResultsWithTimestamps(result, columnPrefixBytes, - keyConverter); - } - -}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java deleted file mode 100644 index b85a9b0..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java +++ /dev/null @@ -1,299 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.entity; - -import java.util.List; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; - -/** - * Represents a rowkey for the entity table. - */ -public class EntityRowKey { - private final String clusterId; - private final String userId; - private final String flowName; - private final Long flowRunId; - private final String appId; - private final String entityType; - private final Long entityIdPrefix; - private final String entityId; - private final EntityRowKeyConverter entityRowKeyConverter = - new EntityRowKeyConverter(); - - public EntityRowKey(String clusterId, String userId, String flowName, - Long flowRunId, String appId, String entityType, Long entityIdPrefix, - String entityId) { - this.clusterId = clusterId; - this.userId = userId; - this.flowName = flowName; - this.flowRunId = flowRunId; - this.appId = appId; - this.entityType = entityType; - this.entityIdPrefix = entityIdPrefix; - this.entityId = entityId; - } - - public String getClusterId() { - return clusterId; - } - - public String getUserId() { - return userId; - } - - public String getFlowName() { - return flowName; - } - - public Long getFlowRunId() { - return flowRunId; - } - - public String getAppId() { - return appId; - } - - public String getEntityType() { - return entityType; - } - - public String getEntityId() { - return entityId; - } - - public Long getEntityIdPrefix() { - return entityIdPrefix; - } - - /** - * Constructs a row key for the entity table as follows: - * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}. - * Typically used while querying a specific entity. - * - * @return byte array with the row key. - */ - public byte[] getRowKey() { - return entityRowKeyConverter.encode(this); - } - - /** - * Given the raw row key as bytes, returns the row key as an object. - * @param rowKey byte representation of row key. - * @return An <cite>EntityRowKey</cite> object. - */ - public static EntityRowKey parseRowKey(byte[] rowKey) { - return new EntityRowKeyConverter().decode(rowKey); - } - - /** - * Constructs a row key for the entity table as follows: - * <p> - * {@code userName!clusterId!flowName!flowRunId!AppId! - * entityType!entityIdPrefix!entityId}. - * </p> - * @return String representation of row key. - */ - public String getRowKeyAsString() { - return entityRowKeyConverter.encodeAsString(this); - } - - /** - * Given the encoded row key as string, returns the row key as an object. - * @param encodedRowKey String representation of row key. - * @return A <cite>EntityRowKey</cite> object. - */ - public static EntityRowKey parseRowKeyFromString(String encodedRowKey) { - return new EntityRowKeyConverter().decodeFromString(encodedRowKey); - } - - /** - * Encodes and decodes row key for entity table. The row key is of the form : - * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId - * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and - * rest are strings. - * <p> - */ - final private static class EntityRowKeyConverter implements - KeyConverter<EntityRowKey>, KeyConverterToString<EntityRowKey> { - - private final AppIdKeyConverter appIDKeyConverter = new AppIdKeyConverter(); - - private EntityRowKeyConverter() { - } - - /** - * Entity row key is of the form - * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each - * segment separated by !. The sizes below indicate sizes of each one of - * these segments in sequence. clusterId, userName, flowName, entityType and - * entityId are strings. flowrunId is a long hence 8 bytes in size. app id - * is represented as 12 bytes with cluster timestamp part of appid being 8 - * bytes (long) and seq id being 4 bytes(int). Strings are variable in size - * (i.e. end whenever separator is encountered). This is used while decoding - * and helps in determining where to split. - */ - private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE, - Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, - AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE, - Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE }; - - /* - * (non-Javadoc) - * - * Encodes EntityRowKey object into a byte array with each component/field - * in EntityRowKey separated by Separator#QUALIFIERS. This leads to an - * entity table row key of the form - * userName!clusterId!flowName!flowRunId!appId!entityType!entityId If - * entityType in passed EntityRowKey object is null (and the fields - * preceding it i.e. clusterId, userId and flowName, flowRunId and appId - * are not null), this returns a row key prefix of the form - * userName!clusterId!flowName!flowRunId!appId! and if entityId in - * EntityRowKey is null (other 6 components are not null), this returns a - * row key prefix of the form - * userName!clusterId!flowName!flowRunId!appId!entityType! flowRunId is - * inverted while encoding as it helps maintain a descending order for row - * keys in entity table. - * - * @see org.apache.hadoop.yarn.server.timelineservice.storage.common - * .KeyConverter#encode(java.lang.Object) - */ - @Override - public byte[] encode(EntityRowKey rowKey) { - byte[] user = - Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB, - Separator.QUALIFIERS); - byte[] cluster = - Separator.encode(rowKey.getClusterId(), Separator.SPACE, - Separator.TAB, Separator.QUALIFIERS); - byte[] flow = - Separator.encode(rowKey.getFlowName(), Separator.SPACE, - Separator.TAB, Separator.QUALIFIERS); - byte[] first = Separator.QUALIFIERS.join(user, cluster, flow); - // Note that flowRunId is a long, so we can't encode them all at the same - // time. - byte[] second = - Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId())); - byte[] third = appIDKeyConverter.encode(rowKey.getAppId()); - if (rowKey.getEntityType() == null) { - return Separator.QUALIFIERS.join(first, second, third, - Separator.EMPTY_BYTES); - } - byte[] entityType = - Separator.encode(rowKey.getEntityType(), Separator.SPACE, - Separator.TAB, Separator.QUALIFIERS); - - if (rowKey.getEntityIdPrefix() == null) { - return Separator.QUALIFIERS.join(first, second, third, entityType, - Separator.EMPTY_BYTES); - } - - byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix()); - - if (rowKey.getEntityId() == null) { - return Separator.QUALIFIERS.join(first, second, third, entityType, - entityIdPrefix, Separator.EMPTY_BYTES); - } - - byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE, - Separator.TAB, Separator.QUALIFIERS); - - byte[] fourth = - Separator.QUALIFIERS.join(entityType, entityIdPrefix, entityId); - - return Separator.QUALIFIERS.join(first, second, third, fourth); - } - - /* - * (non-Javadoc) - * - * Decodes an application row key of the form - * userName!clusterId!flowName!flowRunId!appId!entityType!entityId - * represented in byte format and converts it into an EntityRowKey object. - * flowRunId is inverted while decoding as it was inverted while encoding. - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common - * .KeyConverter#decode(byte[]) - */ - @Override - public EntityRowKey decode(byte[] rowKey) { - byte[][] rowKeyComponents = - Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES); - if (rowKeyComponents.length != 8) { - throw new IllegalArgumentException("the row key is not valid for " - + "an entity"); - } - String userId = - Separator.decode(Bytes.toString(rowKeyComponents[0]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - String clusterId = - Separator.decode(Bytes.toString(rowKeyComponents[1]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - String flowName = - Separator.decode(Bytes.toString(rowKeyComponents[2]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - Long flowRunId = - LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3])); - String appId = appIDKeyConverter.decode(rowKeyComponents[4]); - String entityType = - Separator.decode(Bytes.toString(rowKeyComponents[5]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - - Long entityPrefixId = Bytes.toLong(rowKeyComponents[6]); - - String entityId = - Separator.decode(Bytes.toString(rowKeyComponents[7]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId, - entityType, entityPrefixId, entityId); - } - - @Override - public String encodeAsString(EntityRowKey key) { - if (key.clusterId == null || key.userId == null || key.flowName == null - || key.flowRunId == null || key.appId == null - || key.entityType == null || key.entityIdPrefix == null - || key.entityId == null) { - throw new IllegalArgumentException(); - } - return TimelineReaderUtils - .joinAndEscapeStrings(new String[] {key.clusterId, key.userId, - key.flowName, key.flowRunId.toString(), key.appId, key.entityType, - key.entityIdPrefix.toString(), key.entityId}); - } - - @Override - public EntityRowKey decodeFromString(String encodedRowKey) { - List<String> split = TimelineReaderUtils.split(encodedRowKey); - if (split == null || split.size() != 8) { - throw new IllegalArgumentException("Invalid row key for entity table."); - } - Long flowRunId = Long.valueOf(split.get(3)); - Long entityIdPrefix = Long.valueOf(split.get(6)); - return new EntityRowKey(split.get(0), split.get(1), split.get(2), - flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7)); - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java deleted file mode 100644 index 47a1789..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.entity; - -import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix; - -/** - * Represents a partial rowkey without the entityId or without entityType and - * entityId for the entity table. - * - */ -public class EntityRowKeyPrefix extends EntityRowKey implements - RowKeyPrefix<EntityRowKey> { - - /** - * Creates a prefix which generates the following rowKeyPrefixes for the - * entity table: - * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!}. - * @param clusterId identifying the cluster - * @param userId identifying the user - * @param flowName identifying the flow - * @param flowRunId identifying the individual run of this flow - * @param appId identifying the application - * @param entityType which entity type - * @param entityIdPrefix for entityId - * @param entityId for an entity - */ - public EntityRowKeyPrefix(String clusterId, String userId, String flowName, - Long flowRunId, String appId, String entityType, Long entityIdPrefix, - String entityId) { - super(clusterId, userId, flowName, flowRunId, appId, entityType, - entityIdPrefix, entityId); - } - - /** - * Creates a prefix which generates the following rowKeyPrefixes for the - * entity table: - * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}. - * - * @param clusterId identifying the cluster - * @param userId identifying the user - * @param flowName identifying the flow - * @param flowRunId identifying the individual run of this flow - * @param appId identifying the application - */ - public EntityRowKeyPrefix(String clusterId, String userId, String flowName, - Long flowRunId, String appId) { - this(clusterId, userId, flowName, flowRunId, appId, null, null, null); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.application. - * RowKeyPrefix#getRowKeyPrefix() - */ - public byte[] getRowKeyPrefix() { - return super.getRowKey(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java deleted file mode 100644 index 988bba2..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.entity; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The entity table as column families info, config and metrics. Info stores - * information about a timeline entity object config stores configuration data - * of a timeline entity object metrics stores the metrics of a timeline entity - * object - * - * Example entity table record: - * - * <pre> - * |-------------------------------------------------------------------------| - * | Row | Column Family | Column Family| Column Family| - * | key | info | metrics | config | - * |-------------------------------------------------------------------------| - * | userName! | id:entityId | metricId1: | configKey1: | - * | clusterId! | | metricValue1 | configValue1 | - * | flowName! | type:entityType | @timestamp1 | | - * | flowRunId! | | | configKey2: | - * | AppId! | created_time: | metricId1: | configValue2 | - * | entityType!| 1392993084018 | metricValue2 | | - * | idPrefix! | | @timestamp2 | | - * | entityId | i!infoKey: | | | - * | | infoValue | metricId1: | | - * | | | metricValue1 | | - * | | r!relatesToKey: | @timestamp2 | | - * | | id3=id4=id5 | | | - * | | | | | - * | | s!isRelatedToKey | | | - * | | id7=id9=id6 | | | - * | | | | | - * | | e!eventId=timestamp=infoKey: | | | - * | | eventInfoValue | | | - * | | | | | - * | | flowVersion: | | | - * | | versionValue | | | - * |-------------------------------------------------------------------------| - * </pre> - */ -public class EntityTable extends BaseTable<EntityTable> { - /** entity prefix. */ - private static final String PREFIX = - YarnConfiguration.TIMELINE_SERVICE_PREFIX + "entity"; - - /** config param name that specifies the entity table name. */ - public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name"; - - /** - * config param name that specifies the TTL for metrics column family in - * entity table. - */ - private static final String METRICS_TTL_CONF_NAME = PREFIX - + ".table.metrics.ttl"; - - /** - * config param name that specifies max-versions for metrics column family in - * entity table. - */ - private static final String METRICS_MAX_VERSIONS = - PREFIX + ".table.metrics.max-versions"; - - /** default value for entity table name. */ - public static final String DEFAULT_TABLE_NAME = "timelineservice.entity"; - - /** default TTL is 30 days for metrics timeseries. */ - private static final int DEFAULT_METRICS_TTL = 2592000; - - /** default max number of versions. */ - private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000; - - private static final Logger LOG = - LoggerFactory.getLogger(EntityTable.class); - - public EntityTable() { - super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable - * (org.apache.hadoop.hbase.client.Admin, - * org.apache.hadoop.conf.Configuration) - */ - public void createTable(Admin admin, Configuration hbaseConf) - throws IOException { - - TableName table = getTableName(hbaseConf); - if (admin.tableExists(table)) { - // do not disable / delete existing table - // similar to the approach taken by map-reduce jobs when - // output directory exists - throw new IOException("Table " + table.getNameAsString() - + " already exists."); - } - - HTableDescriptor entityTableDescp = new HTableDescriptor(table); - HColumnDescriptor infoCF = - new HColumnDescriptor(EntityColumnFamily.INFO.getBytes()); - infoCF.setBloomFilterType(BloomType.ROWCOL); - entityTableDescp.addFamily(infoCF); - - HColumnDescriptor configCF = - new HColumnDescriptor(EntityColumnFamily.CONFIGS.getBytes()); - configCF.setBloomFilterType(BloomType.ROWCOL); - configCF.setBlockCacheEnabled(true); - entityTableDescp.addFamily(configCF); - - HColumnDescriptor metricsCF = - new HColumnDescriptor(EntityColumnFamily.METRICS.getBytes()); - entityTableDescp.addFamily(metricsCF); - metricsCF.setBlockCacheEnabled(true); - // always keep 1 version (the latest) - metricsCF.setMinVersions(1); - metricsCF.setMaxVersions( - hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS)); - metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME, - DEFAULT_METRICS_TTL)); - entityTableDescp.setRegionSplitPolicyClassName( - "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); - entityTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length", - TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH); - admin.createTable(entityTableDescp, - TimelineHBaseSchemaConstants.getUsernameSplits()); - LOG.info("Status of table creation for " + table.getNameAsString() + "=" - + admin.tableExists(table)); - } - - /** - * @param metricsTTL time to live parameter for the metricss in this table. - * @param hbaseConf configururation in which to set the metrics TTL config - * variable. - */ - public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) { - hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java deleted file mode 100644 index bb0e331..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package org.apache.hadoop.yarn.server.timelineservice.storage.entity - * contains classes related to implementation for entity table. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -package org.apache.hadoop.yarn.server.timelineservice.storage.entity; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java deleted file mode 100644 index 4e2cf2d..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Identifies the compaction dimensions for the data in the {@link FlowRunTable} - * . - */ -public enum AggregationCompactionDimension { - - /** - * the application id. - */ - APPLICATION_ID((byte) 101); - - private byte tagType; - private byte[] inBytes; - - private AggregationCompactionDimension(byte tagType) { - this.tagType = tagType; - this.inBytes = Bytes.toBytes(this.name()); - } - - public Attribute getAttribute(String attributeValue) { - return new Attribute(this.name(), Bytes.toBytes(attributeValue)); - } - - public byte getTagType() { - return tagType; - } - - public byte[] getInBytes() { - return this.inBytes.clone(); - } - - public static AggregationCompactionDimension - getAggregationCompactionDimension(String aggCompactDimStr) { - for (AggregationCompactionDimension aggDim : AggregationCompactionDimension - .values()) { - if (aggDim.name().equals(aggCompactDimStr)) { - return aggDim; - } - } - return null; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java deleted file mode 100644 index 40cdd2c..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import org.apache.hadoop.hbase.util.Bytes; - -/** - * Identifies the attributes to be set for puts into the {@link FlowRunTable}. - * The numbers used for tagType are prime numbers. - */ -public enum AggregationOperation { - - /** - * When the flow was started. - */ - GLOBAL_MIN((byte) 71), - - /** - * When it ended. - */ - GLOBAL_MAX((byte) 73), - - /** - * The metrics of the flow. - */ - SUM((byte) 79), - - /** - * application running. - */ - SUM_FINAL((byte) 83), - - /** - * Min value as per the latest timestamp - * seen for a given app. - */ - LATEST_MIN((byte) 89), - - /** - * Max value as per the latest timestamp - * seen for a given app. - */ - LATEST_MAX((byte) 97); - - private byte tagType; - private byte[] inBytes; - - private AggregationOperation(byte tagType) { - this.tagType = tagType; - this.inBytes = Bytes.toBytes(this.name()); - } - - public Attribute getAttribute() { - return new Attribute(this.name(), this.inBytes); - } - - public byte getTagType() { - return tagType; - } - - public byte[] getInBytes() { - return this.inBytes.clone(); - } - - /** - * returns the AggregationOperation enum that represents that string. - * @param aggOpStr Aggregation operation. - * @return the AggregationOperation enum that represents that string - */ - public static AggregationOperation getAggregationOperation(String aggOpStr) { - for (AggregationOperation aggOp : AggregationOperation.values()) { - if (aggOp.name().equals(aggOpStr)) { - return aggOp; - } - } - return null; - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java deleted file mode 100644 index d3de518..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -/** - * Defines the attribute tuple to be set for puts into the {@link FlowRunTable}. - */ -public class Attribute { - private final String name; - private final byte[] value; - - public Attribute(String name, byte[] value) { - this.name = name; - this.value = value.clone(); - } - - public String getName() { - return name; - } - - public byte[] getValue() { - return value.clone(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java deleted file mode 100644 index f9eb5b4..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; - -/** - * Represents the flow run table column families. - */ -public enum FlowActivityColumnFamily - implements ColumnFamily<FlowActivityTable> { - - /** - * Info column family houses known columns, specifically ones included in - * columnfamily filters. - */ - INFO("i"); - - /** - * Byte representation of this column family. - */ - private final byte[] bytes; - - /** - * @param value - * create a column family with this name. Must be lower case and - * without spaces. - */ - private FlowActivityColumnFamily(String value) { - // column families should be lower case and not contain any spaces. - this.bytes = Bytes.toBytes(Separator.SPACE.encode(value)); - } - - public byte[] getBytes() { - return Bytes.copy(bytes); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java deleted file mode 100644 index 706b002..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import java.io.IOException; -import java.util.Map; -import java.util.NavigableMap; - -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; - -/** - * Identifies partially qualified columns for the {@link FlowActivityTable}. - */ -public enum FlowActivityColumnPrefix - implements ColumnPrefix<FlowActivityTable> { - - /** - * To store run ids of the flows. - */ - RUN_ID(FlowActivityColumnFamily.INFO, "r", null); - - private final ColumnHelper<FlowActivityTable> column; - private final ColumnFamily<FlowActivityTable> columnFamily; - - /** - * Can be null for those cases where the provided column qualifier is the - * entire column name. - */ - private final String columnPrefix; - private final byte[] columnPrefixBytes; - - private final AggregationOperation aggOp; - - /** - * Private constructor, meant to be used by the enum definition. - * - * @param columnFamily - * that this column is stored in. - * @param columnPrefix - * for this column. - */ - private FlowActivityColumnPrefix( - ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix, - AggregationOperation aggOp) { - this(columnFamily, columnPrefix, aggOp, false); - } - - private FlowActivityColumnPrefix( - ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix, - AggregationOperation aggOp, boolean compoundColQual) { - column = new ColumnHelper<FlowActivityTable>(columnFamily); - this.columnFamily = columnFamily; - this.columnPrefix = columnPrefix; - if (columnPrefix == null) { - this.columnPrefixBytes = null; - } else { - // Future-proof by ensuring the right column prefix hygiene. - this.columnPrefixBytes = Bytes.toBytes(Separator.SPACE - .encode(columnPrefix)); - } - this.aggOp = aggOp; - } - - /** - * @return the column name value - */ - public String getColumnPrefix() { - return columnPrefix; - } - - @Override - public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) { - return ColumnHelper.getColumnQualifier( - this.columnPrefixBytes, qualifierPrefix); - } - - @Override - public byte[] getColumnPrefixBytes(String qualifierPrefix) { - return ColumnHelper.getColumnQualifier( - this.columnPrefixBytes, qualifierPrefix); - } - - public byte[] getColumnPrefixBytes() { - return columnPrefixBytes.clone(); - } - - @Override - public byte[] getColumnFamilyBytes() { - return columnFamily.getBytes(); - } - - @Override - public ValueConverter getValueConverter() { - return column.getValueConverter(); - } - - public AggregationOperation getAttribute() { - return aggOp; - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #store(byte[], - * org.apache.hadoop.yarn.server.timelineservice.storage.common. - * TypedBufferedMutator, byte[], java.lang.Long, java.lang.Object, - * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[]) - */ - @Override - public void store(byte[] rowKey, - TypedBufferedMutator<FlowActivityTable> tableMutator, byte[] qualifier, - Long timestamp, Object inputValue, Attribute... attributes) - throws IOException { - // Null check - if (qualifier == null) { - throw new IOException("Cannot store column with null qualifier in " - + tableMutator.getName().getNameAsString()); - } - - byte[] columnQualifier = getColumnPrefixBytes(qualifier); - Attribute[] combinedAttributes = - HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp); - column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue, - combinedAttributes); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String) - */ - public Object readResult(Result result, String qualifier) throws IOException { - byte[] columnQualifier = ColumnHelper.getColumnQualifier( - this.columnPrefixBytes, qualifier); - return column.readResult(result, columnQualifier); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResults(org.apache.hadoop.hbase.client.Result, - * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter) - */ - public <K> Map<K, Object> readResults(Result result, - KeyConverter<K> keyConverter) throws IOException { - return column.readResults(result, columnPrefixBytes, keyConverter); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result, - * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter) - */ - public <K, V> NavigableMap<K, NavigableMap<Long, V>> - readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter) - throws IOException { - return column.readResultsWithTimestamps(result, columnPrefixBytes, - keyConverter); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix - * #store(byte[], - * org.apache.hadoop.yarn.server.timelineservice.storage.common. - * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object, - * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[]) - */ - @Override - public void store(byte[] rowKey, - TypedBufferedMutator<FlowActivityTable> tableMutator, String qualifier, - Long timestamp, Object inputValue, Attribute... attributes) - throws IOException { - // Null check - if (qualifier == null) { - throw new IOException("Cannot store column with null qualifier in " - + tableMutator.getName().getNameAsString()); - } - - byte[] columnQualifier = getColumnPrefixBytes(qualifier); - Attribute[] combinedAttributes = - HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp); - column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue, - combinedAttributes); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java deleted file mode 100644 index b8a5dba..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java +++ /dev/null @@ -1,247 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import java.util.List; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; - -/** - * Represents a rowkey for the flow activity table. - */ -public class FlowActivityRowKey { - - private final String clusterId; - private final Long dayTs; - private final String userId; - private final String flowName; - private final FlowActivityRowKeyConverter - flowActivityRowKeyConverter = new FlowActivityRowKeyConverter(); - - /** - * @param clusterId identifying the cluster - * @param dayTs to be converted to the top of the day timestamp - * @param userId identifying user - * @param flowName identifying the flow - */ - public FlowActivityRowKey(String clusterId, Long dayTs, String userId, - String flowName) { - this(clusterId, dayTs, userId, flowName, true); - } - - /** - * @param clusterId identifying the cluster - * @param timestamp when the flow activity happened. May be converted to the - * top of the day depending on the convertDayTsToTopOfDay argument. - * @param userId identifying user - * @param flowName identifying the flow - * @param convertDayTsToTopOfDay if true and timestamp isn't null, then - * timestamp will be converted to the top-of-the day timestamp - */ - protected FlowActivityRowKey(String clusterId, Long timestamp, String userId, - String flowName, boolean convertDayTsToTopOfDay) { - this.clusterId = clusterId; - if (convertDayTsToTopOfDay && (timestamp != null)) { - this.dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp); - } else { - this.dayTs = timestamp; - } - this.userId = userId; - this.flowName = flowName; - } - - public String getClusterId() { - return clusterId; - } - - public Long getDayTimestamp() { - return dayTs; - } - - public String getUserId() { - return userId; - } - - public String getFlowName() { - return flowName; - } - - /** - * Constructs a row key for the flow activity table as follows: - * {@code clusterId!dayTimestamp!user!flowName}. - * - * @return byte array for the row key - */ - public byte[] getRowKey() { - return flowActivityRowKeyConverter.encode(this); - } - - /** - * Given the raw row key as bytes, returns the row key as an object. - * - * @param rowKey Byte representation of row key. - * @return A <cite>FlowActivityRowKey</cite> object. - */ - public static FlowActivityRowKey parseRowKey(byte[] rowKey) { - return new FlowActivityRowKeyConverter().decode(rowKey); - } - - /** - * Constructs a row key for the flow activity table as follows: - * {@code clusterId!dayTimestamp!user!flowName}. - * @return String representation of row key - */ - public String getRowKeyAsString() { - return flowActivityRowKeyConverter.encodeAsString(this); - } - - /** - * Given the raw row key as string, returns the row key as an object. - * @param encodedRowKey String representation of row key. - * @return A <cite>FlowActivityRowKey</cite> object. - */ - public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) { - return new FlowActivityRowKeyConverter().decodeFromString(encodedRowKey); - } - - /** - * Encodes and decodes row key for flow activity table. The row key is of the - * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day - * timestamp) is a long and rest are strings. - * <p> - */ - final private static class FlowActivityRowKeyConverter - implements KeyConverter<FlowActivityRowKey>, - KeyConverterToString<FlowActivityRowKey> { - - private FlowActivityRowKeyConverter() { - } - - /** - * The flow activity row key is of the form - * clusterId!dayTimestamp!user!flowName with each segment separated by !. - * The sizes below indicate sizes of each one of these segements in - * sequence. clusterId, user and flowName are strings. Top of the day - * timestamp is a long hence 8 bytes in size. Strings are variable in size - * (i.e. they end whenever separator is encountered). This is used while - * decoding and helps in determining where to split. - */ - private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE, - Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE }; - - /* - * (non-Javadoc) - * - * Encodes FlowActivityRowKey object into a byte array with each - * component/field in FlowActivityRowKey separated by Separator#QUALIFIERS. - * This leads to an flow activity table row key of the form - * clusterId!dayTimestamp!user!flowName. If dayTimestamp in passed - * FlowActivityRowKey object is null and clusterId is not null, then this - * returns a row key prefix as clusterId! and if userId in - * FlowActivityRowKey is null (and the fields preceding it i.e. clusterId - * and dayTimestamp are not null), this returns a row key prefix as - * clusterId!dayTimeStamp! dayTimestamp is inverted while encoding as it - * helps maintain a descending order for row keys in flow activity table. - * - * @see org.apache.hadoop.yarn.server.timelineservice.storage.common - * .KeyConverter#encode(java.lang.Object) - */ - @Override - public byte[] encode(FlowActivityRowKey rowKey) { - if (rowKey.getDayTimestamp() == null) { - return Separator.QUALIFIERS.join(Separator.encode( - rowKey.getClusterId(), Separator.SPACE, Separator.TAB, - Separator.QUALIFIERS), Separator.EMPTY_BYTES); - } - if (rowKey.getUserId() == null) { - return Separator.QUALIFIERS.join(Separator.encode( - rowKey.getClusterId(), Separator.SPACE, Separator.TAB, - Separator.QUALIFIERS), Bytes.toBytes(LongConverter - .invertLong(rowKey.getDayTimestamp())), Separator.EMPTY_BYTES); - } - return Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(), - Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Bytes - .toBytes(LongConverter.invertLong(rowKey.getDayTimestamp())), - Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB, - Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(), - Separator.SPACE, Separator.TAB, Separator.QUALIFIERS)); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common - * .KeyConverter#decode(byte[]) - */ - @Override - public FlowActivityRowKey decode(byte[] rowKey) { - byte[][] rowKeyComponents = - Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES); - if (rowKeyComponents.length != 4) { - throw new IllegalArgumentException("the row key is not valid for " - + "a flow activity"); - } - String clusterId = - Separator.decode(Bytes.toString(rowKeyComponents[0]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - Long dayTs = LongConverter.invertLong(Bytes.toLong(rowKeyComponents[1])); - String userId = - Separator.decode(Bytes.toString(rowKeyComponents[2]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - String flowName = - Separator.decode(Bytes.toString(rowKeyComponents[3]), - Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); - return new FlowActivityRowKey(clusterId, dayTs, userId, flowName); - } - - @Override - public String encodeAsString(FlowActivityRowKey key) { - if (key.getDayTimestamp() == null) { - return TimelineReaderUtils - .joinAndEscapeStrings(new String[] {key.clusterId}); - } else if (key.getUserId() == null) { - return TimelineReaderUtils.joinAndEscapeStrings( - new String[] {key.clusterId, key.dayTs.toString()}); - } else if (key.getFlowName() == null) { - return TimelineReaderUtils.joinAndEscapeStrings( - new String[] {key.clusterId, key.dayTs.toString(), key.userId}); - } - return TimelineReaderUtils.joinAndEscapeStrings(new String[] { - key.clusterId, key.dayTs.toString(), key.userId, key.flowName}); - } - - @Override - public FlowActivityRowKey decodeFromString(String encodedRowKey) { - List<String> split = TimelineReaderUtils.split(encodedRowKey); - if (split == null || split.size() != 4) { - throw new IllegalArgumentException( - "Invalid row key for flow activity."); - } - Long dayTs = Long.valueOf(split.get(1)); - return new FlowActivityRowKey(split.get(0), dayTs, split.get(2), - split.get(3)); - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java deleted file mode 100644 index eb88e54..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix; - -/** - * A prefix partial rowkey for flow activities. - */ -public class FlowActivityRowKeyPrefix extends FlowActivityRowKey implements - RowKeyPrefix<FlowActivityRowKey> { - - /** - * Constructs a row key prefix for the flow activity table as follows: - * {@code clusterId!dayTimestamp!}. - * - * @param clusterId Cluster Id. - * @param dayTs Start of the day timestamp. - */ - public FlowActivityRowKeyPrefix(String clusterId, Long dayTs) { - super(clusterId, dayTs, null, null, false); - } - - /** - * Constructs a row key prefix for the flow activity table as follows: - * {@code clusterId!}. - * - * @param clusterId identifying the cluster - */ - public FlowActivityRowKeyPrefix(String clusterId) { - super(clusterId, null, null, null, false); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.application. - * RowKeyPrefix#getRowKeyPrefix() - */ - public byte[] getRowKeyPrefix() { - return super.getRowKey(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java deleted file mode 100644 index e646eb2..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The flow activity table has column family info - * Stores the daily activity record for flows - * Useful as a quick lookup of what flows were - * running on a given day - * - * Example flow activity table record: - * - * <pre> - * |-------------------------------------------| - * | Row key | Column Family | - * | | info | - * |-------------------------------------------| - * | clusterId! | r!runid1:version1 | - * | inv Top of | | - * | Day! | r!runid2:version7 | - * | userName! | | - * | flowName | | - * |-------------------------------------------| - * </pre> - */ -public class FlowActivityTable extends BaseTable<FlowActivityTable> { - /** flow activity table prefix. */ - private static final String PREFIX = - YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowactivity"; - - /** config param name that specifies the flowactivity table name. */ - public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name"; - - /** default value for flowactivity table name. */ - public static final String DEFAULT_TABLE_NAME = - "timelineservice.flowactivity"; - - private static final Logger LOG = - LoggerFactory.getLogger(FlowActivityTable.class); - - /** default max number of versions. */ - public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE; - - public FlowActivityTable() { - super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable - * (org.apache.hadoop.hbase.client.Admin, - * org.apache.hadoop.conf.Configuration) - */ - public void createTable(Admin admin, Configuration hbaseConf) - throws IOException { - - TableName table = getTableName(hbaseConf); - if (admin.tableExists(table)) { - // do not disable / delete existing table - // similar to the approach taken by map-reduce jobs when - // output directory exists - throw new IOException("Table " + table.getNameAsString() - + " already exists."); - } - - HTableDescriptor flowActivityTableDescp = new HTableDescriptor(table); - HColumnDescriptor infoCF = - new HColumnDescriptor(FlowActivityColumnFamily.INFO.getBytes()); - infoCF.setBloomFilterType(BloomType.ROWCOL); - flowActivityTableDescp.addFamily(infoCF); - infoCF.setMinVersions(1); - infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS); - - // TODO: figure the split policy before running in production - admin.createTable(flowActivityTableDescp); - LOG.info("Status of table creation for " + table.getNameAsString() + "=" - + admin.tableExists(table)); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java deleted file mode 100644 index 3797faf..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import java.io.IOException; - -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; - -/** - * Identifies fully qualified columns for the {@link FlowRunTable}. - */ -public enum FlowRunColumn implements Column<FlowRunTable> { - - /** - * When the flow was started. This is the minimum of currently known - * application start times. - */ - MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time", - AggregationOperation.GLOBAL_MIN, new LongConverter()), - - /** - * When the flow ended. This is the maximum of currently known application end - * times. - */ - MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time", - AggregationOperation.GLOBAL_MAX, new LongConverter()), - - /** - * The version of the flow that this flow belongs to. - */ - FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null); - - private final ColumnHelper<FlowRunTable> column; - private final ColumnFamily<FlowRunTable> columnFamily; - private final String columnQualifier; - private final byte[] columnQualifierBytes; - private final AggregationOperation aggOp; - - private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily, - String columnQualifier, AggregationOperation aggOp) { - this(columnFamily, columnQualifier, aggOp, - GenericConverter.getInstance()); - } - - private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily, - String columnQualifier, AggregationOperation aggOp, - ValueConverter converter) { - this.columnFamily = columnFamily; - this.columnQualifier = columnQualifier; - this.aggOp = aggOp; - // Future-proof by ensuring the right column prefix hygiene. - this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE - .encode(columnQualifier)); - this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true); - } - - /** - * @return the column name value - */ - private String getColumnQualifier() { - return columnQualifier; - } - - @Override - public byte[] getColumnQualifierBytes() { - return columnQualifierBytes.clone(); - } - - @Override - public byte[] getColumnFamilyBytes() { - return columnFamily.getBytes(); - } - - public AggregationOperation getAggregationOperation() { - return aggOp; - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store - * (byte[], org.apache.hadoop.yarn.server.timelineservice.storage.common. - * TypedBufferedMutator, java.lang.Long, java.lang.Object, - * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[]) - */ - public void store(byte[] rowKey, - TypedBufferedMutator<FlowRunTable> tableMutator, Long timestamp, - Object inputValue, Attribute... attributes) throws IOException { - - Attribute[] combinedAttributes = - HBaseTimelineStorageUtils.combineAttributes(attributes, aggOp); - column.store(rowKey, tableMutator, columnQualifierBytes, timestamp, - inputValue, combinedAttributes); - } - - public Object readResult(Result result) throws IOException { - return column.readResult(result, columnQualifierBytes); - } - - @Override - public ValueConverter getValueConverter() { - return column.getValueConverter(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java deleted file mode 100644 index 8faf5f8..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.timelineservice.storage.flow; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily; -import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; - -/** - * Represents the flow run table column families. - */ -public enum FlowRunColumnFamily implements ColumnFamily<FlowRunTable> { - - /** - * Info column family houses known columns, specifically ones included in - * columnfamily filters. - */ - INFO("i"); - - /** - * Byte representation of this column family. - */ - private final byte[] bytes; - - /** - * @param value - * create a column family with this name. Must be lower case and - * without spaces. - */ - private FlowRunColumnFamily(String value) { - // column families should be lower case and not contain any spaces. - this.bytes = Bytes.toBytes(Separator.SPACE.encode(value)); - } - - public byte[] getBytes() { - return Bytes.copy(bytes); - } - -} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org