Hi, The script was run and submitted here: https://reviews.apache.org/r/46075/
It was not committed along with the patch though. I've sent a notice to @Jie. Since the change is included in the patch as @haosdent mentioned, I guess we could drop the one I created. Thanks for reminding! Cheers, /J haosdent <[email protected]> wrote on 04/22/2016 22:57:53: > From: haosdent <[email protected]> > To: dev <[email protected]> > Date: 04/22/2016 22:59 > Subject: Re: mesos git commit: Add /containers endpoint. > > I notice this new endpoint today. I think it have already included in > https://reviews.apache.org/r/46472/ > > On Fri, Apr 22, 2016 at 10:54 PM, Neil Conway <[email protected]> wrote: > > > Folks, > > > > When adding a new endpoint or modifying the help text of an existing > > endpoint, we should rerun the `support/generate-endpoint-help.py` > > script. > > > > Neil > > > > > > On Thu, Apr 21, 2016 at 8:49 PM, <[email protected]> wrote: > > > Repository: mesos > > > Updated Branches: > > > refs/heads/master fa55a69a2 -> 90f7645cc > > > > > > > > > Add /containers endpoint. > > > > > > It returns both resource statistics and container status. > > > > > > Review: https://reviews.apache.org/r/45014/ > > > > > > > > > Project: http://git-wip-us.apache.org/repos/asf/mesos/repo > > > Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/90f7645c > > > Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/90f7645c > > > Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/90f7645c > > > > > > Branch: refs/heads/master > > > Commit: 90f7645cc51d43b63990aec4d0c5e37423b584f0 > > > Parents: fa55a69 > > > Author: Jay Guo <[email protected]> > > > Authored: Thu Apr 21 09:48:10 2016 -0700 > > > Committer: Jie Yu <[email protected]> > > > Committed: Thu Apr 21 17:48:59 2016 -0700 > > > > > > ---------------------------------------------------------------------- > > > src/slave/http.cpp | 135 ++++++++++++++++++++++++++++++++++ +++ > > > src/slave/slave.cpp | 6 ++ > > > src/slave/slave.hpp | 5 ++ > > > src/tests/containerizer.cpp | 3 + > > > src/tests/containerizer.hpp | 4 ++ > > > src/tests/slave_tests.cpp | 139 > > +++++++++++++++++++++++++++++++++++++++ > > > 6 files changed, 292 insertions(+) > > > ---------------------------------------------------------------------- > > > > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > slave/http.cpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/slave/http.cpp b/src/slave/http.cpp > > > index 3908e33..f887a71 100644 > > > --- a/src/slave/http.cpp > > > +++ b/src/slave/http.cpp > > > @@ -14,9 +14,11 @@ > > > // See the License for the specific language governing permissions and > > > // limitations under the License. > > > > > > +#include <list> > > > #include <memory> > > > #include <sstream> > > > #include <string> > > > +#include <tuple> > > > > > > #include <mesos/executor/executor.hpp> > > > > > > @@ -25,6 +27,7 @@ > > > #include <mesos/attributes.hpp> > > > #include <mesos/type_utils.hpp> > > > > > > +#include <process/collect.hpp> > > > #include <process/help.hpp> > > > #include <process/owned.hpp> > > > #include <process/limiter.hpp> > > > @@ -74,7 +77,9 @@ using process::http::UnsupportedMediaType; > > > > > > using process::metrics::internal::MetricsProcess; > > > > > > +using std::list; > > > using std::string; > > > +using std::tuple; > > > > > > > > > namespace mesos { > > > @@ -626,6 +631,136 @@ Future<Response> Slave::Http::statistics( > > > }); > > > } > > > > > > + > > > +string Slave::Http::CONTAINERS_HELP() > > > +{ > > > + return HELP( > > > + TLDR( > > > + "Retrieve container status and usage information."), > > > + DESCRIPTION( > > > + "Returns the current resource consumption data and status > > for", > > > + "containers running under this slave.", > > > + "", > > > + "Example (**Note**: this is not exhaustive):", > > > + "", > > > + "```", > > > + "[{", > > > + " \"container_id\":\"container\",", > > > + " \"container_status\":", > > > + " {", > > > + " \"network_infos\":", > > > + " > > [{\"ip_addresses\":[{\"ip_address\":\"192.168.1.1\"}]}]", > > > + " }", > > > + " \"executor_id\":\"executor\",", > > > + " \"executor_name\":\"name\",", > > > + " \"framework_id\":\"framework\",", > > > + " \"source\":\"source\",", > > > + " \"statistics\":", > > > + " {", > > > + " \"cpus_limit\":8.25,", > > > + " \"cpus_nr_periods\":769021,", > > > + " \"cpus_nr_throttled\":1046,", > > > + " \"cpus_system_time_secs\":34501.45,", > > > + " \"cpus_throttled_time_secs\":352.597023453,", > > > + " \"cpus_user_time_secs\":96348.84,", > > > + " \"mem_anon_bytes\":4845449216,", > > > + " \"mem_file_bytes\":260165632,", > > > + " \"mem_limit_bytes\":7650410496,", > > > + " \"mem_mapped_file_bytes\":7159808,", > > > + " \"mem_rss_bytes\":5105614848,", > > > + " \"timestamp\":1388534400.0", > > > + " }", > > > + "}]", > > > + "```")); > > > +} > > > + > > > + > > > +Future<Response> Slave::Http::containers(const Request& request) const > > > +{ > > > + Owned<list<JSON::Object>> metadata(new list<JSON::Object>()); > > > + list<Future<ContainerStatus>> statusFutures; > > > + list<Future<ResourceStatistics>> statsFutures; > > > + > > > + foreachvalue (const Framework* framework, slave->frameworks) { > > > + foreachvalue (const Executor* executor, framework->executors) { > > > + const ExecutorInfo& info = executor->info; > > > + const ContainerID& containerId = executor->containerId; > > > + > > > + JSON::Object entry; > > > + entry.values["framework_id"] = info.framework_id().value(); > > > + entry.values["executor_id"] = info.executor_id().value(); > > > + entry.values["executor_name"] = info.name(); > > > + entry.values["source"] = info.source(); > > > + entry.values["container_id"] = containerId.value(); > > > + > > > + metadata->push_back(entry); > > > + > > statusFutures.push_back(slave->containerizer->status(containerId)); > > > + statsFutures.push_back(slave->containerizer->usage (containerId)); > > > + } > > > + } > > > + > > > + return await(await(statusFutures), await(statsFutures)).then( > > > + [metadata, request](const tuple< > > > + Future<list<Future<ContainerStatus>>>, > > > + Future<list<Future<ResourceStatistics>>>>& t) > > > + -> Future<Response> { > > > + const list<Future<ContainerStatus>>& status = > > std::get<0>(t).get(); > > > + const list<Future<ResourceStatistics>>& stats = > > std::get<1>(t).get(); > > > + CHECK_EQ(status.size(), stats.size()); > > > + CHECK_EQ(status.size(), metadata->size()); > > > + > > > + JSON::Array result; > > > + > > > + auto statusIter = status.begin(); > > > + auto statsIter = stats.begin(); > > > + auto metadataIter = metadata->begin(); > > > + > > > + while (statusIter != status.end() && > > > + statsIter != stats.end() && > > > + metadataIter != metadata->end()) { > > > + JSON::Object& entry= *metadataIter; > > > + > > > + if (statusIter->isReady()) { > > > + entry.values["status"] = JSON::protobuf(statusIter->get ()); > > > + } else { > > > + LOG(WARNING) << "Failed to get container status for > > executor '" > > > + << entry.values["executor_id"] << "'" > > > + << " of framework " > > > + << entry.values["framework_id"] << ": " > > > + << (statusIter->isFailed() > > > + ? statusIter->failure() > > > + : "discarded"); > > > + } > > > + > > > + if (statsIter->isReady()) { > > > + entry.values["statistics"] = > > JSON::protobuf(statsIter->get()); > > > + } else { > > > + LOG(WARNING) << "Failed to get resource statistics for > > executor '" > > > + << entry.values["executor_id"] << "'" > > > + << " of framework " > > > + << entry.values["framework_id"] << ": " > > > + << (statsIter->isFailed() > > > + ? statsIter->failure() > > > + : "discarded"); > > > + } > > > + > > > + result.values.push_back(entry); > > > + > > > + statusIter++; > > > + statsIter++; > > > + metadataIter++; > > > + } > > > + > > > + return process::http::OK(result, > > request.url.query.get("jsonp")); > > > + }) > > > + .repair([](const Future<Response>& future) { > > > + LOG(WARNING) << "Could not collect container status and > > statistics: " > > > + << (future.isFailed() ? future.failure() : > > "discarded"); > > > + > > > + return process::http::InternalServerError(); > > > + }); > > > +} > > > + > > > } // namespace slave { > > > } // namespace internal { > > > } // namespace mesos { > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > slave/slave.cpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/slave/slave.cpp b/src/slave/slave.cpp > > > index d82dec2..a365e8f 100644 > > > --- a/src/slave/slave.cpp > > > +++ b/src/slave/slave.cpp > > > @@ -104,6 +104,7 @@ using std::list; > > > using std::map; > > > using std::set; > > > using std::string; > > > +using std::tuple; > > > using std::vector; > > > > > > using process::async; > > > @@ -757,6 +758,11 @@ void Slave::initialize() > > > const Option<string>& principal) { > > > return http.statistics(request, principal); > > > }); > > > + route("/containers", > > > + Http::CONTAINERS_HELP(), > > > + [http](const process::http::Request& request) { > > > + return http.containers(request); > > > + }); > > > > > > // Expose the log file for the webui. Fall back to 'log_dir' if > > > // an explicit file was not specified. > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > slave/slave.hpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/slave/slave.hpp b/src/slave/slave.hpp > > > index f78c1b4..20a4bcd 100644 > > > --- a/src/slave/slave.hpp > > > +++ b/src/slave/slave.hpp > > > @@ -451,11 +451,16 @@ private: > > > const process::http::Request& request, > > > const Option<std::string>& /* principal */) const; > > > > > > + // /slave/containers > > > + process::Future<process::http::Response> containers( > > > + const process::http::Request& request) const; > > > + > > > static std::string EXECUTOR_HELP(); > > > static std::string FLAGS_HELP(); > > > static std::string HEALTH_HELP(); > > > static std::string STATE_HELP(); > > > static std::string STATISTICS_HELP(); > > > + static std::string CONTAINERS_HELP(); > > > > > > private: > > > Slave* slave; > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > tests/containerizer.cpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/tests/containerizer.cpp b/src/tests/containerizer.cpp > > > index 4c7f5a2..105ca9c 100644 > > > --- a/src/tests/containerizer.cpp > > > +++ b/src/tests/containerizer.cpp > > > @@ -297,6 +297,9 @@ void TestContainerizer::setup() > > > EXPECT_CALL(*this, usage(_)) > > > .WillRepeatedly(Return(ResourceStatistics())); > > > > > > + EXPECT_CALL(*this, status(_)) > > > + .WillRepeatedly(Return(ContainerStatus())); > > > + > > > EXPECT_CALL(*this, update(_, _)) > > > .WillRepeatedly(Return(Nothing())); > > > > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > tests/containerizer.hpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/tests/containerizer.hpp b/src/tests/containerizer.hpp > > > index efc1ca8..ded331b 100644 > > > --- a/src/tests/containerizer.hpp > > > +++ b/src/tests/containerizer.hpp > > > @@ -107,6 +107,10 @@ public: > > > process::Future<ResourceStatistics>(const ContainerID&)); > > > > > > MOCK_METHOD1( > > > + status, > > > + process::Future<ContainerStatus>(const ContainerID&)); > > > + > > > + MOCK_METHOD1( > > > wait, > > > process::Future<containerizer::Termination>(const ContainerID&)); > > > > > > > > > > > http://git-wip-us.apache.org/repos/asf/mesos/blob/90f7645c/src/ > tests/slave_tests.cpp > > > ---------------------------------------------------------------------- > > > diff --git a/src/tests/slave_tests.cpp b/src/tests/slave_tests.cpp > > > index ee58488..3f65335 100644 > > > --- a/src/tests/slave_tests.cpp > > > +++ b/src/tests/slave_tests.cpp > > > @@ -1890,6 +1890,145 @@ TEST_F(SlaveTest, > > StatisticsEndpointAuthentication) > > > } > > > > > > > > > +// This test verifies correct handling of containers endpoint when > > > +// there is no exeuctor running. > > > +TEST_F(SlaveTest, ContainersEndpointNoExecutor) > > > +{ > > > + Try<Owned<cluster::Master>> master = StartMaster(); > > > + ASSERT_SOME(master); > > > + > > > + Owned<MasterDetector> detector = master.get()->createDetector(); > > > + Try<Owned<cluster::Slave>> slave = StartSlave(detector.get()); > > > + ASSERT_SOME(slave); > > > + > > > + Future<Response> response = process::http::get( > > > + slave.get()->pid, > > > + "containers", > > > + None(), > > > + createBasicAuthHeaders(DEFAULT_CREDENTIAL)); > > > + > > > + AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); > > > + AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", > > response); > > > + AWAIT_EXPECT_RESPONSE_BODY_EQ("[]", response); > > > +} > > > + > > > + > > > +// This is an end-to-end test that verifies that the slave returns the > > > +// correct container status and resource statistics based on the > > > +// currently running executors, and the values returned by the > > > +// '/containers' endpoint are as expected. > > > +TEST_F(SlaveTest, ContainersEndpoint) > > > +{ > > > + Try<Owned<cluster::Master>> master = StartMaster(); > > > + ASSERT_SOME(master); > > > + > > > + MockExecutor exec(DEFAULT_EXECUTOR_ID); > > > + TestContainerizer containerizer(&exec); > > > + StandaloneMasterDetector detector(master.get()->pid); > > > + > > > + MockSlave slave(CreateSlaveFlags(), &detector, &containerizer); > > > + spawn(slave); > > > + > > > + MockScheduler sched; > > > + MesosSchedulerDriver driver( > > > + &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, > > DEFAULT_CREDENTIAL); > > > + > > > + EXPECT_CALL(sched, registered(_, _, _)); > > > + EXPECT_CALL(exec, registered(_, _, _, _)); > > > + > > > + Future<vector<Offer>> offers; > > > + > > > + EXPECT_CALL(sched, resourceOffers(&driver, _)) > > > + .WillOnce(FutureArg<1>(&offers)) > > > + .WillRepeatedly(Return()); // Ignore subsequent offers. > > > + > > > + driver.start(); > > > + > > > + AWAIT_READY(offers); > > > + EXPECT_NE(0u, offers.get().size()); > > > + > > > + const Offer& offer = offers.get()[0]; > > > + > > > + TaskInfo task = createTask( > > > + offer.slave_id(), > > > + Resources::parse("cpus:0.1;mem:32").get(), > > > + "sleep 1000", > > > + exec.id); > > > + > > > + EXPECT_CALL(exec, launchTask(_, _)) > > > + .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); > > > + > > > + Future<TaskStatus> status; > > > + EXPECT_CALL(sched, statusUpdate(&driver, _)) > > > + .WillOnce(FutureArg<1>(&status)); > > > + > > > + driver.launchTasks(offer.id(), {task}); > > > + > > > + AWAIT_READY(status); > > > + EXPECT_EQ(TASK_RUNNING, status.get().state()); > > > + > > > + ResourceStatistics statistics; > > > + statistics.set_mem_limit_bytes(2048); > > > + > > > + EXPECT_CALL(containerizer, usage(_)) > > > + .WillOnce(Return(statistics)); > > > + > > > + ContainerStatus containerStatus; > > > + > > > + CgroupInfo* cgroupInfo = containerStatus.mutable_cgroup_info(); > > > + CgroupInfo::NetCls* netCls = cgroupInfo->mutable_net_cls(); > > > + netCls->set_classid(42); > > > + > > > + NetworkInfo* networkInfo = containerStatus.add_network_infos(); > > > + NetworkInfo::IPAddress* ipAddr = networkInfo->add_ip_addresses(); > > > + ipAddr->set_ip_address("192.168.1.20"); > > > + > > > + EXPECT_CALL(containerizer, status(_)) > > > + .WillOnce(Return(containerStatus)); > > > + > > > + Future<Response> response = process::http::get( > > > + slave.self(), > > > + "containers", > > > + None(), > > > + createBasicAuthHeaders(DEFAULT_CREDENTIAL)); > > > + > > > + AWAIT_READY(response); > > > + AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); > > > + AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", > > response); > > > + > > > + Try<JSON::Value> value = JSON::parse(response.get().body); > > > + ASSERT_SOME(value); > > > + > > > + Try<JSON::Value> expected = JSON::parse( > > > + "[{" > > > + "\"executor_id\":\"default\"," > > > + "\"executor_name\":\"\"," > > > + "\"source\":\"\"," > > > + "\"statistics\":{" > > > + "\"mem_limit_bytes\":2048" > > > + "}," > > > + "\"status\":{" > > > + "\"cgroup_info\":{\"net_cls\":{\"classid\":42}}," > > > + "\"network_infos\":[{" > > > + "\"ip_addresses\":[{\"ip_address\":\"192.168.1.20 \"}]" > > > + "}]" > > > + "}" > > > + "}]"); > > > + > > > + ASSERT_SOME(expected); > > > + EXPECT_TRUE(value.get().contains(expected.get())); > > > + > > > + EXPECT_CALL(exec, shutdown(_)) > > > + .Times(AtMost(1)); > > > + > > > + driver.stop(); > > > + driver.join(); > > > + > > > + terminate(slave); > > > + wait(slave); > > > +} > > > + > > > + > > > // This test ensures that when a slave is shutting down, it will not > > > // try to re-register with the master. > > > TEST_F(SlaveTest, DISABLED_TerminatingSlaveDoesNotReregister) > > > > > > > > > -- > Best Regards, > Haosdent Huang
