YARN-3662. Federation Membership State Store internal APIs.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22db8fda Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22db8fda Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22db8fda Branch: refs/heads/YARN-2915 Commit: 22db8fda42a3d41d89ad0f530196f6ab6533712b Parents: 85eda58 Author: Subru Krishnan <su...@apache.org> Authored: Fri Jul 29 16:53:40 2016 -0700 Committer: Subru Krishnan <su...@apache.org> Committed: Fri Jul 29 16:53:40 2016 -0700 ---------------------------------------------------------------------- .../hadoop/yarn/api/BasePBImplRecordsTest.java | 263 +++++++++ .../hadoop/yarn/api/TestPBImplRecords.java | 556 +------------------ .../hadoop-yarn-server-common/pom.xml | 8 + .../store/FederationMembershipStateStore.java | 126 +++++ .../server/federation/store/package-info.java | 17 + .../store/records/GetSubClusterInfoRequest.java | 62 +++ .../records/GetSubClusterInfoResponse.java | 62 +++ .../records/GetSubClustersInfoRequest.java | 66 +++ .../records/GetSubClustersInfoResponse.java | 66 +++ .../records/SubClusterDeregisterRequest.java | 89 +++ .../records/SubClusterDeregisterResponse.java | 42 ++ .../records/SubClusterHeartbeatRequest.java | 149 +++++ .../records/SubClusterHeartbeatResponse.java | 45 ++ .../federation/store/records/SubClusterId.java | 100 ++++ .../store/records/SubClusterInfo.java | 263 +++++++++ .../records/SubClusterRegisterRequest.java | 74 +++ .../records/SubClusterRegisterResponse.java | 44 ++ .../store/records/SubClusterState.java | 60 ++ .../impl/pb/GetSubClusterInfoRequestPBImpl.java | 125 +++++ .../pb/GetSubClusterInfoResponsePBImpl.java | 134 +++++ .../pb/GetSubClustersInfoRequestPBImpl.java | 108 ++++ .../pb/GetSubClustersInfoResponsePBImpl.java | 184 ++++++ .../pb/SubClusterDeregisterRequestPBImpl.java | 156 ++++++ .../pb/SubClusterDeregisterResponsePBImpl.java | 77 +++ .../pb/SubClusterHeartbeatRequestPBImpl.java | 192 +++++++ .../pb/SubClusterHeartbeatResponsePBImpl.java | 77 +++ .../records/impl/pb/SubClusterIdPBImpl.java | 75 +++ .../records/impl/pb/SubClusterInfoPBImpl.java | 267 +++++++++ .../pb/SubClusterRegisterRequestPBImpl.java | 134 +++++ .../pb/SubClusterRegisterResponsePBImpl.java | 77 +++ .../store/records/impl/pb/package-info.java | 17 + .../federation/store/records/package-info.java | 17 + .../proto/yarn_server_federation_protos.proto | 93 ++++ .../records/TestFederationProtocolRecords.java | 133 +++++ 34 files changed, 3418 insertions(+), 540 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java new file mode 100644 index 0000000..98d8222 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -0,0 +1,263 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.Assert; + +import java.lang.reflect.*; +import java.nio.ByteBuffer; +import java.util.*; + +/** + * Generic helper class to validate protocol records. + */ +public class BasePBImplRecordsTest { + static final Log LOG = LogFactory.getLog(BasePBImplRecordsTest.class); + + protected static HashMap<Type, Object> typeValueCache = + new HashMap<Type, Object>(); + private static Random rand = new Random(); + private static byte [] bytes = new byte[] {'1', '2', '3', '4'}; + + @SuppressWarnings({"rawtypes", "unchecked"}) + private static Object genTypeValue(Type type) { + Object ret = typeValueCache.get(type); + if (ret != null) { + return ret; + } + // only use positive primitive values + if (type.equals(boolean.class)) { + return rand.nextBoolean(); + } else if (type.equals(byte.class)) { + return bytes[rand.nextInt(4)]; + } else if (type.equals(int.class) || type.equals(Integer.class)) { + return rand.nextInt(1000000); + } else if (type.equals(long.class)) { + return Long.valueOf(rand.nextInt(1000000)); + } else if (type.equals(float.class)) { + return rand.nextFloat(); + } else if (type.equals(double.class)) { + return rand.nextDouble(); + } else if (type.equals(String.class)) { + return String.format("%c%c%c", + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26)); + } else if (type instanceof Class) { + Class clazz = (Class)type; + if (clazz.isArray()) { + Class compClass = clazz.getComponentType(); + if (compClass != null) { + ret = Array.newInstance(compClass, 2); + Array.set(ret, 0, genTypeValue(compClass)); + Array.set(ret, 1, genTypeValue(compClass)); + } + } else if (clazz.isEnum()) { + Object [] values = clazz.getEnumConstants(); + ret = values[rand.nextInt(values.length)]; + } else if (clazz.equals(ByteBuffer.class)) { + // return new ByteBuffer every time + // to prevent potential side effects + ByteBuffer buff = ByteBuffer.allocate(4); + rand.nextBytes(buff.array()); + return buff; + } + } else if (type instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType)type; + Type rawType = pt.getRawType(); + Type [] params = pt.getActualTypeArguments(); + // only support EnumSet<T>, List<T>, Set<T>, Map<K,V> + if (rawType.equals(EnumSet.class)) { + if (params[0] instanceof Class) { + Class c = (Class)(params[0]); + return EnumSet.allOf(c); + } + } if (rawType.equals(List.class)) { + ret = Lists.newArrayList(genTypeValue(params[0])); + } else if (rawType.equals(Set.class)) { + ret = Sets.newHashSet(genTypeValue(params[0])); + } else if (rawType.equals(Map.class)) { + Map<Object, Object> map = Maps.newHashMap(); + map.put(genTypeValue(params[0]), genTypeValue(params[1])); + ret = map; + } + } + if (ret == null) { + throw new IllegalArgumentException("type " + type + " is not supported"); + } + typeValueCache.put(type, ret); + return ret; + } + + /** + * this method generate record instance by calling newIntance + * using reflection, add register the generated value to typeValueCache + */ + @SuppressWarnings("rawtypes") + protected static Object generateByNewInstance(Class clazz) throws Exception { + Object ret = typeValueCache.get(clazz); + if (ret != null) { + return ret; + } + Method newInstance = null; + Type [] paramTypes = new Type[0]; + // get newInstance method with most parameters + for (Method m : clazz.getMethods()) { + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(clazz) && + Modifier.isPublic(mod) && + Modifier.isStatic(mod) && + m.getName().equals("newInstance")) { + Type [] pts = m.getGenericParameterTypes(); + if (newInstance == null + || (pts.length > paramTypes.length)) { + newInstance = m; + paramTypes = pts; + } + } + } + if (newInstance == null) { + throw new IllegalArgumentException("type " + clazz.getName() + + " does not have newInstance method"); + } + Object [] args = new Object[paramTypes.length]; + for (int i=0;i<args.length;i++) { + args[i] = genTypeValue(paramTypes[i]); + } + ret = newInstance.invoke(null, args); + typeValueCache.put(clazz, ret); + return ret; + } + + private class GetSetPair { + public String propertyName; + public Method getMethod; + public Method setMethod; + public Type type; + public Object testValue; + + @Override + public String toString() { + return String.format("{ name=%s, class=%s, value=%s }", propertyName, + type, testValue); + } + } + + private <R> Map<String, GetSetPair> getGetSetPairs(Class<R> recordClass) + throws Exception { + Map<String, GetSetPair> ret = new HashMap<String, GetSetPair>(); + Method [] methods = recordClass.getDeclaredMethods(); + // get all get methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.equals("getProto")) { + continue; + } + if ((name.length() > 3) && name.startsWith("get") && + (m.getParameterTypes().length == 0)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericReturnType(); + GetSetPair p = ret.get(propertyName); + if (p == null) { + p = new GetSetPair(); + p.propertyName = propertyName; + p.type = valueType; + p.getMethod = m; + ret.put(propertyName, p); + } else { + Assert.fail("Multiple get method with same name: " + recordClass + + p.propertyName); + } + } + } + } + // match get methods with set methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.startsWith("set") && (m.getParameterTypes().length == 1)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericParameterTypes()[0]; + GetSetPair p = ret.get(propertyName); + if (p != null && p.type.equals(valueType)) { + p.setMethod = m; + } + } + } + } + // exclude incomplete get/set pair, and generate test value + Iterator<Map.Entry<String, GetSetPair>> itr = ret.entrySet().iterator(); + while (itr.hasNext()) { + Map.Entry<String, GetSetPair> cur = itr.next(); + GetSetPair gsp = cur.getValue(); + if ((gsp.getMethod == null) || + (gsp.setMethod == null)) { + LOG.info(String.format("Exclude protential property: %s\n", gsp.propertyName)); + itr.remove(); + } else { + LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type)); + gsp.testValue = genTypeValue(gsp.type); + LOG.info(String.format(" testValue: %s\n", gsp.testValue)); + } + } + return ret; + } + + protected <R, P> void validatePBImplRecord(Class<R> recordClass, + Class<P> protoClass) + throws Exception { + LOG.info(String.format("Validate %s %s\n", recordClass.getName(), + protoClass.getName())); + Constructor<R> emptyConstructor = recordClass.getConstructor(); + Constructor<R> pbConstructor = recordClass.getConstructor(protoClass); + Method getProto = recordClass.getDeclaredMethod("getProto"); + Map<String, GetSetPair> getSetPairs = getGetSetPairs(recordClass); + R origRecord = emptyConstructor.newInstance(); + for (GetSetPair gsp : getSetPairs.values()) { + gsp.setMethod.invoke(origRecord, gsp.testValue); + } + Object ret = getProto.invoke(origRecord); + Assert.assertNotNull(recordClass.getName() + "#getProto returns null", ret); + if (!(protoClass.isAssignableFrom(ret.getClass()))) { + Assert.fail("Illegal getProto method return type: " + ret.getClass()); + } + R deserRecord = pbConstructor.newInstance(ret); + Assert.assertEquals("whole " + recordClass + " records should be equal", + origRecord, deserRecord); + for (GetSetPair gsp : getSetPairs.values()) { + Object origValue = gsp.getMethod.invoke(origRecord); + Object deserValue = gsp.getMethod.invoke(deserRecord); + Assert.assertEquals("property " + recordClass.getName() + "#" + + gsp.propertyName + " should be equal", origValue, deserValue); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 55b1233..a1480e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -16,443 +16,31 @@ * limitations under the License. */ package org.apache.hadoop.yarn.api; -import java.io.IOException; -import java.lang.reflect.Array; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.nio.ByteBuffer; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; - +import com.google.common.collect.ImmutableSet; import org.apache.commons.lang.math.LongRange; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; -import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.security.proto.SecurityProtos.*; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationListRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationListResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.api.records.Container; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; -import org.apache.hadoop.yarn.api.records.ContainerReport; -import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest; -import org.apache.hadoop.yarn.api.records.ContainerRetryContext; -import org.apache.hadoop.yarn.api.records.ContainerStatus; -import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; -import org.apache.hadoop.yarn.api.records.LocalResource; -import org.apache.hadoop.yarn.api.records.LogAggregationContext; -import org.apache.hadoop.yarn.api.records.NMToken; -import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.NodeLabel; -import org.apache.hadoop.yarn.api.records.NodeReport; -import org.apache.hadoop.yarn.api.records.PreemptionContainer; -import org.apache.hadoop.yarn.api.records.PreemptionContract; -import org.apache.hadoop.yarn.api.records.PreemptionMessage; -import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest; -import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.QueueInfo; -import org.apache.hadoop.yarn.api.records.QueueState; -import org.apache.hadoop.yarn.api.records.QueueStatistics; -import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; -import org.apache.hadoop.yarn.api.records.ReservationAllocationState; -import org.apache.hadoop.yarn.api.records.ReservationDefinition; -import org.apache.hadoop.yarn.api.records.ReservationId; -import org.apache.hadoop.yarn.api.records.ReservationRequest; -import org.apache.hadoop.yarn.api.records.ReservationRequests; -import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; -import org.apache.hadoop.yarn.api.records.ResourceOption; -import org.apache.hadoop.yarn.api.records.ResourceRequest; -import org.apache.hadoop.yarn.api.records.ResourceUtilization; -import org.apache.hadoop.yarn.api.records.SerializedException; -import org.apache.hadoop.yarn.api.records.StrictPreemptionContract; -import org.apache.hadoop.yarn.api.records.Token; -import org.apache.hadoop.yarn.api.records.URL; -import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceChangeRequestPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerRetryContextPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionResourceRequestPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryContextProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; -import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; -import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto; -import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; -import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; -import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto; -import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto; -import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; -import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; -import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; -import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; -import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto; -import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; -import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; -import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationListRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationListResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.*; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.api.records.impl.pb.*; +import org.apache.hadoop.yarn.proto.YarnProtos.*; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.*; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.*; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; - -public class TestPBImplRecords { - static final Log LOG = LogFactory.getLog(TestPBImplRecords.class); - - private static HashMap<Type, Object> typeValueCache = new HashMap<Type, Object>(); - private static Random rand = new Random(); - private static byte [] bytes = new byte[] {'1', '2', '3', '4'}; - - @SuppressWarnings({"rawtypes", "unchecked"}) - private static Object genTypeValue(Type type) { - Object ret = typeValueCache.get(type); - if (ret != null) { - return ret; - } - // only use positive primitive values - if (type.equals(boolean.class)) { - return rand.nextBoolean(); - } else if (type.equals(byte.class)) { - return bytes[rand.nextInt(4)]; - } else if (type.equals(int.class) || type.equals(Integer.class)) { - return rand.nextInt(1000000); - } else if (type.equals(long.class)) { - return Long.valueOf(rand.nextInt(1000000)); - } else if (type.equals(float.class)) { - return rand.nextFloat(); - } else if (type.equals(double.class)) { - return rand.nextDouble(); - } else if (type.equals(String.class)) { - return String.format("%c%c%c", - 'a' + rand.nextInt(26), - 'a' + rand.nextInt(26), - 'a' + rand.nextInt(26)); - } else if (type instanceof Class) { - Class clazz = (Class)type; - if (clazz.isArray()) { - Class compClass = clazz.getComponentType(); - if (compClass != null) { - ret = Array.newInstance(compClass, 2); - Array.set(ret, 0, genTypeValue(compClass)); - Array.set(ret, 1, genTypeValue(compClass)); - } - } else if (clazz.isEnum()) { - Object [] values = clazz.getEnumConstants(); - ret = values[rand.nextInt(values.length)]; - } else if (clazz.equals(ByteBuffer.class)) { - // return new ByteBuffer every time - // to prevent potential side effects - ByteBuffer buff = ByteBuffer.allocate(4); - rand.nextBytes(buff.array()); - return buff; - } - } else if (type instanceof ParameterizedType) { - ParameterizedType pt = (ParameterizedType)type; - Type rawType = pt.getRawType(); - Type [] params = pt.getActualTypeArguments(); - // only support EnumSet<T>, List<T>, Set<T>, Map<K,V> - if (rawType.equals(EnumSet.class)) { - if (params[0] instanceof Class) { - Class c = (Class)(params[0]); - return EnumSet.allOf(c); - } - } if (rawType.equals(List.class)) { - ret = Lists.newArrayList(genTypeValue(params[0])); - } else if (rawType.equals(Set.class)) { - ret = Sets.newHashSet(genTypeValue(params[0])); - } else if (rawType.equals(Map.class)) { - Map<Object, Object> map = Maps.newHashMap(); - map.put(genTypeValue(params[0]), genTypeValue(params[1])); - ret = map; - } - } - if (ret == null) { - throw new IllegalArgumentException("type " + type + " is not supported"); - } - typeValueCache.put(type, ret); - return ret; - } - - /** - * this method generate record instance by calling newIntance - * using reflection, add register the generated value to typeValueCache - */ - @SuppressWarnings("rawtypes") - private static Object generateByNewInstance(Class clazz) throws Exception { - Object ret = typeValueCache.get(clazz); - if (ret != null) { - return ret; - } - Method newInstance = null; - Type [] paramTypes = new Type[0]; - // get newInstance method with most parameters - for (Method m : clazz.getMethods()) { - int mod = m.getModifiers(); - if (m.getDeclaringClass().equals(clazz) && - Modifier.isPublic(mod) && - Modifier.isStatic(mod) && - m.getName().equals("newInstance")) { - Type [] pts = m.getGenericParameterTypes(); - if (newInstance == null - || (pts.length > paramTypes.length)) { - newInstance = m; - paramTypes = pts; - } - } - } - if (newInstance == null) { - throw new IllegalArgumentException("type " + clazz.getName() + - " does not have newInstance method"); - } - Object [] args = new Object[paramTypes.length]; - for (int i=0;i<args.length;i++) { - args[i] = genTypeValue(paramTypes[i]); - } - ret = newInstance.invoke(null, args); - typeValueCache.put(clazz, ret); - return ret; - } +import java.io.IOException; + +/** + * Test class for YARN API protocol records. + */ +public class TestPBImplRecords extends BasePBImplRecordsTest { @BeforeClass public static void setup() throws Exception { @@ -514,118 +102,6 @@ public class TestPBImplRecords { generateByNewInstance(ResourceUtilization.class); } - private class GetSetPair { - public String propertyName; - public Method getMethod; - public Method setMethod; - public Type type; - public Object testValue; - - @Override - public String toString() { - return String.format("{ name=%s, class=%s, value=%s }", propertyName, - type, testValue); - } - } - - private <R> Map<String, GetSetPair> getGetSetPairs(Class<R> recordClass) - throws Exception { - Map<String, GetSetPair> ret = new HashMap<String, GetSetPair>(); - Method [] methods = recordClass.getDeclaredMethods(); - // get all get methods - for (int i = 0; i < methods.length; i++) { - Method m = methods[i]; - int mod = m.getModifiers(); - if (m.getDeclaringClass().equals(recordClass) && - Modifier.isPublic(mod) && - (!Modifier.isStatic(mod))) { - String name = m.getName(); - if (name.equals("getProto")) { - continue; - } - if ((name.length() > 3) && name.startsWith("get") && - (m.getParameterTypes().length == 0)) { - String propertyName = name.substring(3); - Type valueType = m.getGenericReturnType(); - GetSetPair p = ret.get(propertyName); - if (p == null) { - p = new GetSetPair(); - p.propertyName = propertyName; - p.type = valueType; - p.getMethod = m; - ret.put(propertyName, p); - } else { - Assert.fail("Multiple get method with same name: " + recordClass - + p.propertyName); - } - } - } - } - // match get methods with set methods - for (int i = 0; i < methods.length; i++) { - Method m = methods[i]; - int mod = m.getModifiers(); - if (m.getDeclaringClass().equals(recordClass) && - Modifier.isPublic(mod) && - (!Modifier.isStatic(mod))) { - String name = m.getName(); - if (name.startsWith("set") && (m.getParameterTypes().length == 1)) { - String propertyName = name.substring(3); - Type valueType = m.getGenericParameterTypes()[0]; - GetSetPair p = ret.get(propertyName); - if (p != null && p.type.equals(valueType)) { - p.setMethod = m; - } - } - } - } - // exclude incomplete get/set pair, and generate test value - Iterator<Entry<String, GetSetPair>> itr = ret.entrySet().iterator(); - while (itr.hasNext()) { - Entry<String, GetSetPair> cur = itr.next(); - GetSetPair gsp = cur.getValue(); - if ((gsp.getMethod == null) || - (gsp.setMethod == null)) { - LOG.info(String.format("Exclude protential property: %s\n", gsp.propertyName)); - itr.remove(); - } else { - LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type)); - gsp.testValue = genTypeValue(gsp.type); - LOG.info(String.format(" testValue: %s\n", gsp.testValue)); - } - } - return ret; - } - - private <R, P> void validatePBImplRecord(Class<R> recordClass, - Class<P> protoClass) - throws Exception { - LOG.info(String.format("Validate %s %s\n", recordClass.getName(), - protoClass.getName())); - Constructor<R> emptyConstructor = recordClass.getConstructor(); - Constructor<R> pbConstructor = recordClass.getConstructor(protoClass); - Method getProto = recordClass.getDeclaredMethod("getProto"); - Map<String, GetSetPair> getSetPairs = getGetSetPairs(recordClass); - R origRecord = emptyConstructor.newInstance(); - for (GetSetPair gsp : getSetPairs.values()) { - gsp.setMethod.invoke(origRecord, gsp.testValue); - } - Object ret = getProto.invoke(origRecord); - Assert.assertNotNull(recordClass.getName() + "#getProto returns null", ret); - if (!(protoClass.isAssignableFrom(ret.getClass()))) { - Assert.fail("Illegal getProto method return type: " + ret.getClass()); - } - R deserRecord = pbConstructor.newInstance(ret); - Assert.assertEquals("whole " + recordClass + " records should be equal", - origRecord, deserRecord); - for (GetSetPair gsp : getSetPairs.values()) { - Object origValue = gsp.getMethod.invoke(origRecord); - Object deserValue = gsp.getMethod.invoke(deserRecord); - Assert.assertEquals("property " + recordClass.getName() + "#" - + gsp.propertyName + " should be equal", origValue, deserValue); - } - } - @Test public void testAllocateRequestPBImpl() throws Exception { validatePBImplRecord(AllocateRequestPBImpl.class, AllocateRequestProto.class); http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index f8330e3..0d59503 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -59,6 +59,13 @@ <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> </dependency> + <!-- 'mvn dependency:analyze' fails to detect use of this dependency --> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-yarn-common</artifactId> + <type>test-jar</type> + <scope>test</scope> + </dependency> <dependency> <groupId>com.google.guava</groupId> @@ -148,6 +155,7 @@ <include>yarn_server_common_protos.proto</include> <include>yarn_server_common_service_protos.proto</include> <include>yarn_server_common_service_protos.proto</include> + <include>yarn_server_federation_protos.proto</include> <include>ResourceTracker.proto</include> <include>SCMUploader.proto</include> <include>collectornodemanager_protocol.proto</include> http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java new file mode 100644 index 0000000..378eadc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.records.Version; + +/** + * FederationMembershipStateStore maintains the state of all + * <em>subcluster(s)</em> as encapsulated by {@code SubClusterInfo} for all the + * subcluster(s) that are participating in federation. + */ +@Private +@Unstable +public interface FederationMembershipStateStore { + + /** + * Get the {@link Version} of the underlying federation membership state + * store. + * + * @return the {@link Version} of the underlying federation membership state + * store + */ + Version getMembershipStateStoreVersion(); + + /** + * Register a <em>subcluster</em> by publishing capabilities as represented by + * {@code SubClusterInfo} to indicate participation in federation. This is + * typically done during initialization or restart/failover of the + * subcluster's <code>ResourceManager</code>. Upon successful registration, an + * identifier for the <em>subcluster</em> which is unique across the federated + * cluster is returned. The identifier is static, i.e. preserved across + * restarts and failover. + * + * @param registerSubClusterRequest the capabilities of the subcluster that + * wants to participate in federation. The subcluster id is also + * specified in case registration is triggered by restart/failover + * @return response empty on successfully if registration was successful + * @throws YarnException if the request is invalid/fails + */ + SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest registerSubClusterRequest) throws YarnException; + + /** + * Deregister a <em>subcluster</em> identified by {@code SubClusterId} to + * change state in federation. This can be done to mark the sub cluster lost, + * deregistered, or decommissioned. + * + * @param subClusterDeregisterRequest - the request to deregister the + * sub-cluster from federation. + * @return response empty on successfully deregistering the subcluster state + * @throws YarnException if the request is invalid/fails + */ + SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest subClusterDeregisterRequest) + throws YarnException; + + /** + * Periodic heartbeat from a <code>ResourceManager</code> participating in + * federation to indicate liveliness. The heartbeat publishes the current + * capabilities as represented by {@code SubClusterInfo} of the subcluster. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + * + * @param subClusterHeartbeatRequest the capabilities of the subcluster that + * wants to keep alive its participation in federation + * @return response currently empty on if heartbeat was successfully processed + * @throws YarnException if the request is invalid/fails + */ + SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest subClusterHeartbeatRequest) + throws YarnException; + + /** + * Get the membership information of <em>subcluster</em> as identified by + * {@code SubClusterId}. The membership information includes the cluster + * endpoint and current capabilities as represented by {@code SubClusterInfo}. + * + * @param subClusterRequest the subcluster whose information is required + * @return the {@code SubClusterInfo} + * @throws YarnException if the request is invalid/fails + */ + GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest subClusterRequest) throws YarnException; + + /** + * Get the membership information of all the <em>subclusters</em> that are + * currently participating in federation. The membership information includes + * the cluster endpoint and current capabilities as represented by + * {@code SubClusterInfo}. + * + * @param subClustersRequest request for sub-clusters information + * @return a map of {@code SubClusterInfo} keyed by the {@code SubClusterId} + * @throws YarnException if the request is invalid/fails + */ + GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest subClustersRequest) throws YarnException; + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java new file mode 100644 index 0000000..33179e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java new file mode 100644 index 0000000..656dea9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain information about a sub-cluster identified by its + * {@link SubClusterId}. + */ +@Private +@Unstable +public abstract class GetSubClusterInfoRequest { + + @Private + @Unstable + public static GetSubClusterInfoRequest newInstance( + SubClusterId subClusterId) { + GetSubClusterInfoRequest subClusterRequest = + Records.newRecord(GetSubClusterInfoRequest.class); + subClusterRequest.setSubClusterId(subClusterId); + return subClusterRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @param subClusterId the subcluster identifier + */ + @Public + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java new file mode 100644 index 0000000..f7bc74d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Response to a query with {@link SubClusterInfo} about a sub-cluster. + */ +@Private +@Unstable +public abstract class GetSubClusterInfoResponse { + + @Private + @Unstable + public static GetSubClusterInfoResponse newInstance( + SubClusterInfo subClusterInfo) { + GetSubClusterInfoResponse registerSubClusterRequest = + Records.newRecord(GetSubClusterInfoResponse.class); + registerSubClusterRequest.setSubClusterInfo(subClusterInfo); + return registerSubClusterRequest; + } + + /** + * Get the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @return the information pertaining to the sub-cluster + */ + @Public + @Unstable + public abstract SubClusterInfo getSubClusterInfo(); + + /** + * Set the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @param subClusterInfo the information pertaining to the sub-cluster + */ + @Private + @Unstable + public abstract void setSubClusterInfo(SubClusterInfo subClusterInfo); + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java new file mode 100644 index 0000000..3264d81 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain information about all sub-clusters that are + * participating in federation. + */ +@Private +@Unstable +public abstract class GetSubClustersInfoRequest { + + @Public + @Unstable + public static GetSubClustersInfoRequest newInstance( + boolean filterInactiveSubClusters) { + GetSubClustersInfoRequest request = + Records.newRecord(GetSubClustersInfoRequest.class); + request.setFilterInactiveSubClusters(filterInactiveSubClusters); + return request; + } + + /** + * Get the flag that indicates whether only active sub-clusters should be + * returned. + * + * @return whether to filter out inactive sub-clusters + */ + @Public + @Unstable + public abstract boolean getFilterInactiveSubClusters(); + + /** + * Set the flag that indicates whether only active sub-clusters should be + * returned. + * + * @param filterInactiveSubClusters whether to filter out inactive + * sub-clusters + */ + @Public + @Unstable + public abstract void setFilterInactiveSubClusters( + boolean filterInactiveSubClusters); + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java new file mode 100644 index 0000000..bcf75ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Response to a query with list of {@link SubClusterInfo} about all + * sub-clusters that are currently participating in Federation. + */ +@Private +@Unstable +public abstract class GetSubClustersInfoResponse { + + @Public + @Unstable + public static GetSubClustersInfoResponse newInstance( + List<SubClusterInfo> subClusters) { + GetSubClustersInfoResponse subClusterInfos = + Records.newRecord(GetSubClustersInfoResponse.class); + subClusterInfos.setSubClusters(subClusters); + return subClusterInfos; + } + + /** + * Get the list of {@link SubClusterInfo} representing the information about + * all sub-clusters that are currently participating in Federation. + * + * @return the list of {@link SubClusterInfo} + */ + @Public + @Unstable + public abstract List<SubClusterInfo> getSubClusters(); + + /** + * Set the list of {@link SubClusterInfo} representing the information about + * all sub-clusters that are currently participating in Federation. + * + * @param subClusters the list of {@link SubClusterInfo} + */ + @Private + @Unstable + public abstract void setSubClusters(List<SubClusterInfo> subClusters); + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java new file mode 100644 index 0000000..50a50a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * <p> + * The request sent to set the state of a subcluster to either + * SC_DECOMMISSIONED, SC_LOST, or SC_DEREGISTERED. + * + * <p> + * The update includes details such as: + * <ul> + * <li>{@link SubClusterId}</li> + * <li>{@link SubClusterState}</li> + * </ul> + */ +@Private +@Unstable +public abstract class SubClusterDeregisterRequest { + + @Private + @Unstable + public static SubClusterDeregisterRequest newInstance( + SubClusterId subClusterId, SubClusterState subClusterState) { + SubClusterDeregisterRequest registerRequest = + Records.newRecord(SubClusterDeregisterRequest.class); + registerRequest.setSubClusterId(subClusterId); + registerRequest.setState(subClusterState); + return registerRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @param subClusterId the subcluster identifier + */ + @Private + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); + + /** + * Get the {@link SubClusterState} of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract SubClusterState getState(); + + /** + * Set the {@link SubClusterState} of the subcluster. + * + * @param state the state of the subCluster + */ + @Private + @Unstable + public abstract void setState(SubClusterState state); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java new file mode 100644 index 0000000..74fe994 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SubClusterDeregisterResponse contains the answer from the {@code + * FederationMembershipStateStore} to a request to deregister the sub cluster. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + */ +@Private +@Unstable +public abstract class SubClusterDeregisterResponse { + + @Private + @Unstable + public static SubClusterDeregisterResponse newInstance() { + SubClusterDeregisterResponse response = + Records.newRecord(SubClusterDeregisterResponse.class); + return response; + } + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java new file mode 100644 index 0000000..3a07c18 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * <p> + * SubClusterHeartbeatRequest is a report of the runtime information of the + * subcluster that is participating in federation. + * + * <p> + * It includes information such as: + * <ul> + * <li>{@link SubClusterId}</li> + * <li>The URL of the subcluster</li> + * <li>The timestamp representing the last start time of the subCluster</li> + * <li>{@code FederationsubClusterState}</li> + * <li>The current capacity and utilization of the subCluster</li> + * </ul> + */ +@Private +@Unstable +public abstract class SubClusterHeartbeatRequest { + + @Private + @Unstable + public static SubClusterHeartbeatRequest newInstance( + SubClusterId subClusterId, SubClusterState state, String capability) { + return newInstance(subClusterId, 0, state, capability); + } + + @Private + @Unstable + public static SubClusterHeartbeatRequest newInstance( + SubClusterId subClusterId, long lastHeartBeat, SubClusterState state, + String capability) { + SubClusterHeartbeatRequest subClusterHeartbeatRequest = + Records.newRecord(SubClusterHeartbeatRequest.class); + subClusterHeartbeatRequest.setSubClusterId(subClusterId); + subClusterHeartbeatRequest.setLastHeartBeat(lastHeartBeat); + subClusterHeartbeatRequest.setState(state); + subClusterHeartbeatRequest.setCapability(capability); + return subClusterHeartbeatRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subCluster. + * + * @param subClusterId the subCluster identifier + */ + @Private + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); + + /** + * Get the last heart beat time of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract long getLastHeartBeat(); + + /** + * Set the last heartbeat time of the subcluster. + * + * @param time the last heartbeat time of the subcluster + */ + @Private + @Unstable + public abstract void setLastHeartBeat(long time); + + /** + * Get the {@link SubClusterState} of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract SubClusterState getState(); + + /** + * Set the {@link SubClusterState} of the subcluster. + * + * @param state the state of the subCluster + */ + @Private + @Unstable + public abstract void setState(SubClusterState state); + + /** + * Get the current capacity and utilization of the subcluster. This is the + * JAXB marshalled string representation of the <code>ClusterMetrics</code>. + * + * @return the current capacity and utilization of the subcluster + */ + @Public + @Unstable + public abstract String getCapability(); + + /** + * Set the current capacity and utilization of the subCluster. This is the + * JAXB marshalled string representation of the <code>ClusterMetrics</code>. + * + * @param capability the current capacity and utilization of the subcluster + */ + @Private + @Unstable + public abstract void setCapability(String capability); + + @Override + public String toString() { + return "SubClusterHeartbeatRequest [getSubClusterId() = " + + getSubClusterId() + ", getState() = " + getState() + + ", getLastHeartBeat = " + getLastHeartBeat() + ", getCapability() = " + + getCapability() + "]"; + } + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/22db8fda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java new file mode 100644 index 0000000..0b7fd8c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SubClusterHeartbeatResponse contains the response from the {@code + * FederationMembershipStateStore} to a periodic heartbeat to indicate + * liveliness from a <code>ResourceManager</code> participating in federation. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + * <p> + * NOTE: This can be extended to push down policies in future + */ +@Private +@Unstable +public abstract class SubClusterHeartbeatResponse { + + @Private + @Unstable + public static SubClusterHeartbeatResponse newInstance() { + SubClusterHeartbeatResponse response = + Records.newRecord(SubClusterHeartbeatResponse.class); + return response; + } + +} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org