This is an automated email from the ASF dual-hosted git repository. qianzhang pushed a commit to branch 1.7.x in repository https://gitbox.apache.org/repos/asf/mesos.git
commit d8aeab2952e34ad43d15e4d4907cecacbd309aee Author: Qian Zhang <[email protected]> AuthorDate: Fri Oct 11 22:05:14 2019 +0800 Supported destroying UCR container in `PROVISIONING` state. Previously in MESOS-3736, we made Docker store support pulling same image simultaneously which is a performance improvement, however it may cause an issue: If the pulling hangs somehow, all the subsequent pulling request for the same image will hang as well, and as a result the container destroy will also hang since destroy has to wait for provisioning to finish, see MESOS-4985 for details. So in this patch we removed that performance improvement and made UCR can destroy the container which is being provisioned, i.e., UCR will discard the container provisioning and then keep doing the container destroy. And we also improved Docker fetcher plugin so that when container provisioning is discarded the `curl` process used to fetch manifest or blob will be killed immediately. Review: https://reviews.apache.org/r/71608 --- src/slave/containerizer/mesos/containerizer.cpp | 15 +- src/slave/containerizer/mesos/containerizer.hpp | 4 +- .../mesos/provisioner/docker/store.cpp | 69 +++------ .../mesos/provisioner/provisioner.cpp | 156 +++++++++++++++------ .../mesos/provisioner/provisioner.hpp | 30 ++-- .../containerizer/mesos_containerizer_tests.cpp | 95 ++++++++++--- .../containerizer/provisioner_docker_tests.cpp | 107 -------------- src/uri/fetchers/docker.cpp | 22 ++- 8 files changed, 253 insertions(+), 245 deletions(-) diff --git a/src/slave/containerizer/mesos/containerizer.cpp b/src/slave/containerizer/mesos/containerizer.cpp index 6087c0f..2e3445b 100644 --- a/src/slave/containerizer/mesos/containerizer.cpp +++ b/src/slave/containerizer/mesos/containerizer.cpp @@ -2577,20 +2577,11 @@ void MesosContainerizerProcess::_destroy( } if (previousState == PROVISIONING) { - VLOG(1) << "Waiting for the provisioner to complete provisioning " - << "before destroying container " << containerId; + VLOG(1) << "Discarding the provisioning for container " << containerId; - // Wait for the provisioner to finish provisioning before we - // start destroying the container. - container->provisioning - .onAny(defer( - self(), - &Self::_____destroy, - containerId, - termination, - vector<Future<Nothing>>())); + container->provisioning.discard(); - return; + return _____destroy(containerId, termination, vector<Future<Nothing>>()); } if (previousState == PREPARING) { diff --git a/src/slave/containerizer/mesos/containerizer.hpp b/src/slave/containerizer/mesos/containerizer.hpp index 263a177..9c02a1c 100644 --- a/src/slave/containerizer/mesos/containerizer.hpp +++ b/src/slave/containerizer/mesos/containerizer.hpp @@ -374,8 +374,8 @@ private: Option<process::Future<Option<int>>> status; // We keep track of the future for 'provisioner->provision' so - // that destroy will only start calling 'provisioner->destroy' - // after 'provisioner->provision' has finished. + // that we can discard the provisioning for the container which + // is destroyed when it is being provisioned. process::Future<ProvisionInfo> provisioning; // We keep track of the future that is waiting for all the diff --git a/src/slave/containerizer/mesos/provisioner/docker/store.cpp b/src/slave/containerizer/mesos/provisioner/docker/store.cpp index 286ee5f..5489936 100644 --- a/src/slave/containerizer/mesos/provisioner/docker/store.cpp +++ b/src/slave/containerizer/mesos/provisioner/docker/store.cpp @@ -142,7 +142,6 @@ private: Owned<MetadataManager> metadataManager; Owned<Puller> puller; - hashmap<string, Owned<Promise<Image>>> pulling; // For executing path removals in a separated actor. process::Executor executor; @@ -338,51 +337,30 @@ Future<Image> StoreProcess::_get( } } - // If there is already a pulling going on for the given 'name', we - // will skip the additional pulling. - const string name = stringify(reference); + Try<string> staging = + os::mkdtemp(paths::getStagingTempDir(flags.docker_store_dir)); - if (!pulling.contains(name)) { - Try<string> staging = - os::mkdtemp(paths::getStagingTempDir(flags.docker_store_dir)); - - if (staging.isError()) { - return Failure( - "Failed to create a staging directory: " + staging.error()); - } - - Owned<Promise<Image>> promise(new Promise<Image>()); - - Future<Image> future = metrics.image_pull.time(puller->pull( - reference, - staging.get(), - backend, - config) - .then(defer(self(), - &Self::moveLayers, - staging.get(), - lambda::_1, - backend)) - .then(defer(self(), [=](const Image& image) { - return metadataManager->put(image); - })) - .onAny(defer(self(), [=](const Future<Image>&) { - pulling.erase(name); - - Try<Nothing> rmdir = os::rmdir(staging.get()); - if (rmdir.isError()) { - LOG(WARNING) << "Failed to remove staging directory: " - << rmdir.error(); - } - }))); - - promise->associate(future); - pulling[name] = promise; - - return promise->future(); + if (staging.isError()) { + return Failure( + "Failed to create a staging directory: " + staging.error()); } - return pulling[name]->future(); + return metrics.image_pull.time(puller->pull( + reference, + staging.get(), + backend, + config) + .then(defer(self(), &Self::moveLayers, staging.get(), lambda::_1, backend)) + .then(defer(self(), [=](const Image& image) { + return metadataManager->put(image); + })) + .onAny(defer(self(), [=](const Future<Image>& image) { + Try<Nothing> rmdir = os::rmdir(staging.get()); + if (rmdir.isError()) { + LOG(WARNING) << "Failed to remove staging directory: " + << rmdir.error(); + } + }))); } @@ -551,11 +529,6 @@ Future<Nothing> StoreProcess::prune( const vector<mesos::Image>& excludedImages, const hashset<string>& activeLayerPaths) { - // All existing pulling should have finished. - if (!pulling.empty()) { - return Failure("Cannot prune and pull at the same time"); - } - vector<spec::ImageReference> imageReferences; imageReferences.reserve(excludedImages.size()); diff --git a/src/slave/containerizer/mesos/provisioner/provisioner.cpp b/src/slave/containerizer/mesos/provisioner/provisioner.cpp index 9b0ea38..84d9890 100644 --- a/src/slave/containerizer/mesos/provisioner/provisioner.cpp +++ b/src/slave/containerizer/mesos/provisioner/provisioner.cpp @@ -58,6 +58,7 @@ using std::vector; using process::Failure; using process::Future; using process::Owned; +using process::Promise; using process::ReadWriteLock; using mesos::internal::slave::AUFS_BACKEND; @@ -210,13 +211,6 @@ Try<Owned<Provisioner>> Provisioner::create( CHECK_SOME(rootDir); // Can't be None since we just created it. - Try<hashmap<Image::Type, Owned<Store>>> stores = - Store::create(flags, secretResolver); - - if (stores.isError()) { - return Error("Failed to create image stores: " + stores.error()); - } - hashmap<string, Owned<Backend>> backends = Backend::create(flags); if (backends.empty()) { return Error("No usable provisioner backend created"); @@ -297,10 +291,33 @@ Try<Owned<Provisioner>> Provisioner::create( LOG(INFO) << "Using default backend '" << defaultBackend.get() << "'"; + return Provisioner::create( + flags, + rootDir.get(), + defaultBackend.get(), + backends, + secretResolver); +} + + +Try<Owned<Provisioner>> Provisioner::create( + const Flags& flags, + const string& rootDir, + const string& defaultBackend, + const hashmap<string, Owned<Backend>>& backends, + SecretResolver* secretResolver) +{ + Try<hashmap<Image::Type, Owned<Store>>> stores = + Store::create(flags, secretResolver); + + if (stores.isError()) { + return Error("Failed to create image stores: " + stores.error()); + } + return Owned<Provisioner>(new Provisioner( Owned<ProvisionerProcess>(new ProvisionerProcess( - rootDir.get(), - defaultBackend.get(), + rootDir, + defaultBackend, stores.get(), backends)))); } @@ -400,6 +417,7 @@ Future<Nothing> ProvisionerProcess::recover( foreach (const ContainerID& containerId, containers.get()) { Owned<Info> info = Owned<Info>(new Info()); + info->provisioning = ProvisionInfo{}; Try<hashmap<string, hashset<string>>> rootfses = provisioner::paths::listContainerRootfses(rootDir, containerId); @@ -514,15 +532,35 @@ Future<ProvisionInfo> ProvisionerProcess::provision( "Unsupported container image type: " + stringify(image.type())); } + Owned<Promise<ProvisionInfo>> promise(new Promise<ProvisionInfo>()); + // Get and then provision image layers from the store. - return stores.get(image.type()).get()->get(image, defaultBackend) - .then(defer( - self(), - &Self::_provision, - containerId, - image, - defaultBackend, - lambda::_1)); + Future<ProvisionInfo> future = + stores.at(image.type())->get(image, defaultBackend) + .then(defer( + self(), + &Self::_provision, + containerId, + image, + defaultBackend, + lambda::_1)) + .onAny(defer(self(), [=](const Future<ProvisionInfo>& provisionInfo) { + CHECK(!provisionInfo.isPending()); + + if (provisionInfo.isReady()) { + promise->set(provisionInfo); + } else if (provisionInfo.isDiscarded()) { + promise->discard(); + } else { + promise->fail(provisionInfo.failure()); + } + })); + + return promise->future() + .onDiscard([promise, future]() mutable { + promise->discard(); + future.discard(); + }); })) .onAny(defer(self(), [this](const Future<ProvisionInfo>&) { rwLock.read_unlock(); @@ -568,7 +606,7 @@ Future<ProvisionInfo> ProvisionerProcess::_provision( containerId, backend); - return backends.get(backend).get()->provision( + infos[containerId]->provisioning = backends.at(backend)->provision( imageInfo.layers, rootfs, backendDir) @@ -596,6 +634,8 @@ Future<ProvisionInfo> ProvisionerProcess::_provision( return ProvisionInfo{ rootfs, imageInfo.dockerManifest, imageInfo.appcManifest}; })); + + return infos[containerId]->provisioning; } @@ -679,43 +719,71 @@ Future<bool> ProvisionerProcess::_destroy( const Owned<Info>& info = infos[containerId]; - vector<Future<bool>> futures; - foreachkey (const string& backend, info->rootfses) { - if (!backends.contains(backend)) { - return Failure("Unknown backend '" + backend + "'"); - } + info->provisioning + .onAny(defer(self(), [=](const Future<ProvisionInfo>&) -> void { + vector<Future<bool>> futures; + foreachkey (const string& backend, info->rootfses) { + if (!backends.contains(backend)) { + infos[containerId]->termination.fail( + "Unknown backend '" + backend + "'"); - foreach (const string& rootfsId, info->rootfses[backend]) { - string rootfs = provisioner::paths::getContainerRootfsDir( - rootDir, - containerId, - backend, - rootfsId); + return; + } - string backendDir = provisioner::paths::getBackendDir( - rootDir, - containerId, - backend); + foreach (const string& rootfsId, info->rootfses[backend]) { + string rootfs = provisioner::paths::getContainerRootfsDir( + rootDir, + containerId, + backend, + rootfsId); - LOG(INFO) << "Destroying container rootfs at '" << rootfs - << "' for container " << containerId; + string backendDir = provisioner::paths::getBackendDir( + rootDir, + containerId, + backend); - futures.push_back( - backends.get(backend).get()->destroy(rootfs, backendDir)); - } - } + LOG(INFO) << "Destroying container rootfs at '" << rootfs + << "' for container " << containerId; + + futures.push_back( + backends.at(backend)->destroy(rootfs, backendDir)); + } + } - // TODO(xujyan): Revisit the usefulness of this return value. - return collect(futures) - .then(defer(self(), &ProvisionerProcess::__destroy, containerId)); + await(futures) + .onAny(defer( + self(), + &ProvisionerProcess::__destroy, + containerId, + lambda::_1)); + })); + + return info->termination.future(); } -Future<bool> ProvisionerProcess::__destroy(const ContainerID& containerId) +void ProvisionerProcess::__destroy( + const ContainerID& containerId, + const Future<vector<Future<bool>>>& futures) { CHECK(infos.contains(containerId)); CHECK(infos[containerId]->destroying); + CHECK_READY(futures); + + vector<string> messages; + foreach (const Future<bool>& future, futures.get()) { + if (!future.isReady()) { + messages.push_back( + future.isFailed() ? future.failure() : "discarded"); + } + } + + if (!messages.empty()) { + infos[containerId]->termination.fail(strings::join("\n", messages)); + return; + } + // This should be fairly cheap as the directory should only // contain a few empty sub-directories at this point. // @@ -737,8 +805,6 @@ Future<bool> ProvisionerProcess::__destroy(const ContainerID& containerId) infos[containerId]->termination.set(true); infos.erase(containerId); - - return true; } diff --git a/src/slave/containerizer/mesos/provisioner/provisioner.hpp b/src/slave/containerizer/mesos/provisioner/provisioner.hpp index 7f84aa4..a56d23d 100644 --- a/src/slave/containerizer/mesos/provisioner/provisioner.hpp +++ b/src/slave/containerizer/mesos/provisioner/provisioner.hpp @@ -77,6 +77,14 @@ public: const Flags& flags, SecretResolver* secretResolver = nullptr); + // This allows the backend to be mocked for testing. + static Try<process::Owned<Provisioner>> create( + const Flags& flags, + const std::string& rootDir, + const std::string& defaultBackend, + const hashmap<std::string, process::Owned<Backend>>& backends, + SecretResolver* secretResolver = nullptr); + // Available only for testing. explicit Provisioner(process::Owned<ProvisionerProcess> process); @@ -153,7 +161,9 @@ private: const ContainerID& containerId, const std::vector<process::Future<bool>>& destroys); - process::Future<bool> __destroy(const ContainerID& containerId); + void __destroy( + const ContainerID& containerId, + const process::Future<std::vector<process::Future<bool>>>& futures); // Absolute path to the provisioner root directory. It can be // derived from '--work_dir' but we keep a separate copy here @@ -181,6 +191,11 @@ private: // started in 1.5. Option<std::vector<std::string>> layers; + // We keep track of the future for 'backend->provision' so + // that destroy will only start calling 'backend->destroy' + // after 'backend->provision' has finished. + process::Future<ProvisionInfo> provisioning; + process::Promise<bool> termination; // The container status in provisioner. @@ -197,17 +212,14 @@ private: process::metrics::Counter remove_container_errors; } metrics; - // This `ReadWriteLock` instance is used to protect the critical - // section, which includes store directory and provision directory. - // Because `provision` and `destroy` are scoped by `containerId`, - // they are not expected to touch the same critical section - // simultaneously, so any `provision` and `destroy` can happen concurrently. - // This is guaranteed by Mesos containerizer, e.g., a `destroy` will always - // wait for a container's `provision` to finish, then do the cleanup. + // This `ReadWriteLock` instance is used to protect the critical section which + // is the layers in the store directory (i.e. `--docker_store_dir`/layers/). + // Any `provision` and `destroy` can happen concurrently since they are not + // expected to touch the critical section simultaneously. // // On the other hand, `pruneImages` needs to know all active layers from all // containers, therefore it must be exclusive to other `provision`, `destroy` - // and `pruneImages` so that we do not prune image layers which is used by an + // and `pruneImages` so that we do not prune image layers which are used by an // active `provision` or `destroy`. process::ReadWriteLock rwLock; }; diff --git a/src/tests/containerizer/mesos_containerizer_tests.cpp b/src/tests/containerizer/mesos_containerizer_tests.cpp index 449928c..cad3b73 100644 --- a/src/tests/containerizer/mesos_containerizer_tests.cpp +++ b/src/tests/containerizer/mesos_containerizer_tests.cpp @@ -31,6 +31,7 @@ #include <process/shared.hpp> #include <stout/net.hpp> +#include <stout/path.hpp> #include <stout/strings.hpp> #include <stout/uuid.hpp> @@ -43,6 +44,7 @@ #include "slave/containerizer/mesos/containerizer.hpp" #include "slave/containerizer/mesos/launcher.hpp" +#include "slave/containerizer/mesos/provisioner/backend.hpp" #include "slave/containerizer/mesos/provisioner/provisioner.hpp" #include "tests/environment.hpp" @@ -58,6 +60,7 @@ using namespace process; using mesos::internal::master::Master; +using mesos::internal::slave::Backend; using mesos::internal::slave::Containerizer; using mesos::internal::slave::executorEnvironment; using mesos::internal::slave::Fetcher; @@ -986,30 +989,71 @@ TEST_F(MesosContainerizerProvisionerTest, ProvisionFailed) } -// This test verifies that there is no race (or leaked provisioned -// directories) if the containerizer destroy a container while it -// is provisioning an image. -TEST_F(MesosContainerizerProvisionerTest, DestroyWhileProvisioning) +class MockBackend : public Backend +{ +public: + MockBackend() {} + ~MockBackend() override {} + + MOCK_METHOD3( + provision, + Future<Option<vector<Path>>>( + const vector<string>&, + const string&, + const string&)); + + MOCK_METHOD2( + destroy, + Future<bool>( + const string&, + const string&)); +}; + + +// This test verifies that when the containerizer destroys a container while +// provisioner backend is provisioning rootfs for the container, backend +// destroy will not be invoked until backend provision finishes. +TEST_F( + MesosContainerizerProvisionerTest, + ROOT_INTERNET_CURL_DestroyWhileProvisioning) { slave::Flags flags = CreateSlaveFlags(); flags.launcher = "posix"; + flags.isolation = "docker/runtime"; + flags.image_providers = "docker"; Try<Launcher*> launcher_ = SubprocessLauncher::create(flags); ASSERT_SOME(launcher_); Owned<Launcher> launcher(new TestLauncher(Owned<Launcher>(launcher_.get()))); - MockProvisioner* provisioner = new MockProvisioner(); + const string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); + CHECK_SOME(os::mkdir(provisionerDir)); + CHECK_SOME(os::realpath(provisionerDir)); Future<Nothing> provision; - Promise<ProvisionInfo> promise; + Future<Nothing> destroy; + Promise<Option<vector<Path>>> promise; - EXPECT_CALL(*provisioner, provision(_, _)) + MockBackend* backend = new MockBackend(); + EXPECT_CALL(*backend, provision(_, _, _)) .WillOnce(DoAll(FutureSatisfy(&provision), Return(promise.future()))); - EXPECT_CALL(*provisioner, destroy(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*backend, destroy(_, _)) + .WillOnce(DoAll(FutureSatisfy(&destroy), + Return(true))); + + hashmap<string, Owned<Backend>> backends = + {{"MockBackend", Owned<Backend>(backend)}}; + + Try<Owned<Provisioner>> provisioner = Provisioner::create( + flags, + provisionerDir, + "MockBackend", + backends); + + ASSERT_SOME(provisioner); Fetcher fetcher(flags); @@ -1019,7 +1063,7 @@ TEST_F(MesosContainerizerProvisionerTest, DestroyWhileProvisioning) &fetcher, nullptr, launcher, - Shared<Provisioner>(provisioner), + provisioner->share(), vector<Owned<Isolator>>()); Owned<MesosContainerizer> containerizer(_containerizer.get()); @@ -1030,7 +1074,7 @@ TEST_F(MesosContainerizerProvisionerTest, DestroyWhileProvisioning) Image image; image.set_type(Image::DOCKER); Image::Docker dockerImage; - dockerImage.set_name(id::UUID::random().toString()); + dockerImage.set_name("alpine"); image.mutable_docker()->CopyFrom(dockerImage); ContainerInfo::MesosInfo mesosInfo; @@ -1059,12 +1103,18 @@ TEST_F(MesosContainerizerProvisionerTest, DestroyWhileProvisioning) AWAIT_READY(provision); + // Destroying container in `PROVISIONING` state will discard the container + // launch immediately. containerizer->destroy(containerId); + AWAIT_DISCARDED(launch); + + ASSERT_TRUE(destroy.isPending()); ASSERT_TRUE(wait.isPending()); - promise.set(ProvisionInfo{"rootfs", None()}); - AWAIT_FAILED(launch); + promise.set(Option<vector<Path>>::none()); + + AWAIT_READY(destroy); AWAIT_READY(wait); ASSERT_SOME(wait.get()); @@ -1090,14 +1140,17 @@ TEST_F(MesosContainerizerProvisionerTest, IsolatorCleanupBeforePrepare) MockProvisioner* provisioner = new MockProvisioner(); Future<Nothing> provision; - Promise<ProvisionInfo> promise; + Future<Nothing> destroy; + Promise<ProvisionInfo> provisionPromise; + Promise<bool> destroyPromise; EXPECT_CALL(*provisioner, provision(_, _)) .WillOnce(DoAll(FutureSatisfy(&provision), - Return(promise.future()))); + Return(provisionPromise.future()))); EXPECT_CALL(*provisioner, destroy(_)) - .WillOnce(Return(true)); + .WillOnce(DoAll(FutureSatisfy(&destroy), + Return(destroyPromise.future()))); MockIsolator* isolator = new MockIsolator(); @@ -1153,10 +1206,16 @@ TEST_F(MesosContainerizerProvisionerTest, IsolatorCleanupBeforePrepare) containerizer->destroy(containerId); + AWAIT_READY(destroy); + ASSERT_TRUE(wait.isPending()); - promise.set(ProvisionInfo{"rootfs", None()}); - AWAIT_FAILED(launch); + provisionPromise.set(ProvisionInfo{"rootfs", None()}); + + AWAIT_DISCARDED(launch); + + destroyPromise.set(true); + AWAIT_READY(wait); ASSERT_SOME(wait.get()); diff --git a/src/tests/containerizer/provisioner_docker_tests.cpp b/src/tests/containerizer/provisioner_docker_tests.cpp index 76ba5f1..caac470 100644 --- a/src/tests/containerizer/provisioner_docker_tests.cpp +++ b/src/tests/containerizer/provisioner_docker_tests.cpp @@ -39,8 +39,6 @@ #include "slave/containerizer/mesos/provisioner/docker/message.hpp" #include "slave/containerizer/mesos/provisioner/docker/metadata_manager.hpp" #include "slave/containerizer/mesos/provisioner/docker/paths.hpp" -#include "slave/containerizer/mesos/provisioner/docker/puller.hpp" -#include "slave/containerizer/mesos/provisioner/docker/registry_puller.hpp" #include "slave/containerizer/mesos/provisioner/docker/store.hpp" #include "tests/environment.hpp" @@ -76,8 +74,6 @@ using mesos::master::detector::MasterDetector; using slave::ImageInfo; using slave::Slave; -using slave::docker::Puller; -using slave::docker::RegistryPuller; using slave::docker::Store; using testing::WithParamInterface; @@ -296,109 +292,6 @@ TEST_F(ProvisionerDockerLocalStoreTest, MissingLayer) } -class MockPuller : public Puller -{ -public: - MockPuller() - { - EXPECT_CALL(*this, pull(_, _, _, _)) - .WillRepeatedly(Invoke(this, &MockPuller::unmocked_pull)); - } - - ~MockPuller() override {} - - MOCK_METHOD4( - pull, - Future<slave::docker::Image>( - const spec::ImageReference&, - const string&, - const string&, - const Option<Secret>&)); - - Future<slave::docker::Image> unmocked_pull( - const spec::ImageReference& reference, - const string& directory, - const string& backend, - const Option<Secret>& config) - { - // TODO(gilbert): Allow return Image to be overridden. - return slave::docker::Image(); - } -}; - - -// This tests the store to pull the same image simultaneously. -// This test verifies that the store only calls the puller once -// when multiple requests for the same image is in flight. -TEST_F(ProvisionerDockerLocalStoreTest, PullingSameImageSimutanuously) -{ - slave::Flags flags; - flags.docker_registry = path::join(os::getcwd(), "images"); - flags.docker_store_dir = path::join(os::getcwd(), "store"); - - MockPuller* puller = new MockPuller(); - Future<Nothing> pull; - Future<string> directory; - Promise<slave::docker::Image> promise; - - EXPECT_CALL(*puller, pull(_, _, _, _)) - .WillOnce(testing::DoAll(FutureSatisfy(&pull), - FutureArg<1>(&directory), - Return(promise.future()))); - - Try<Owned<slave::Store>> store = - slave::docker::Store::create(flags, Owned<Puller>(puller)); - ASSERT_SOME(store); - - Image mesosImage; - mesosImage.set_type(Image::DOCKER); - mesosImage.mutable_docker()->set_name("abc"); - - Future<slave::ImageInfo> imageInfo1 = - store.get()->get(mesosImage, COPY_BACKEND); - - AWAIT_READY(pull); - AWAIT_READY(directory); - - // TODO(gilbert): Need a helper method to create test layers - // which will allow us to set manifest so that we can add - // checks here. - const string layerPath = path::join(directory.get(), "456"); - - Try<Nothing> mkdir = os::mkdir(layerPath); - ASSERT_SOME(mkdir); - - JSON::Value manifest = JSON::parse( - "{" - " \"parent\": \"\"" - "}").get(); - - ASSERT_SOME( - os::write(path::join(layerPath, "json"), stringify(manifest))); - - ASSERT_TRUE(imageInfo1.isPending()); - Future<slave::ImageInfo> imageInfo2 = - store.get()->get(mesosImage, COPY_BACKEND); - - Try<spec::ImageReference> reference = - spec::parseImageReference(mesosImage.docker().name()); - - ASSERT_SOME(reference); - - slave::docker::Image result; - result.mutable_reference()->CopyFrom(reference.get()); - result.add_layer_ids("456"); - - ASSERT_TRUE(imageInfo2.isPending()); - promise.set(result); - - AWAIT_READY(imageInfo1); - AWAIT_READY(imageInfo2); - - EXPECT_EQ(imageInfo1->layers, imageInfo2->layers); -} - - #ifdef __linux__ class ProvisionerDockerTest : public MesosTest, diff --git a/src/uri/fetchers/docker.cpp b/src/uri/fetchers/docker.cpp index 631db2d..0ca980c 100644 --- a/src/uri/fetchers/docker.cpp +++ b/src/uri/fetchers/docker.cpp @@ -33,6 +33,7 @@ #include <stout/os/constants.hpp> #include <stout/os/getenv.hpp> +#include <stout/os/kill.hpp> #include <stout/os/mkdir.hpp> #include <stout/os/write.hpp> @@ -82,6 +83,15 @@ static set<string> schemes() }; } +void commandDiscarded(const Subprocess& s, const string& cmd) +{ + if (s.status().isPending()) { + VLOG(1) << "'" << cmd << "' is being discarded"; + os::kill(s.pid(), SIGKILL); + } +} + + // TODO(jieyu): Move the following curl based utility functions to a // command utils common directory. @@ -142,7 +152,8 @@ static Future<http::Response> curl( argv.push_back(strings::trim(uri)); - // TODO(jieyu): Kill the process if discard is called. + string cmd = strings::join(" ", argv); + Try<Subprocess> s = subprocess( "curl", argv, @@ -223,7 +234,8 @@ static Future<http::Response> curl( // NOTE: We always return the last response because there might // be a '307 Temporary Redirect' response before that. return responses->back(); - }); + }) + .onDiscard(lambda::bind(&commandDiscarded, s.get(), cmd)); } @@ -266,7 +278,8 @@ static Future<int> download( argv.push_back(uri); - // TODO(jieyu): Kill the process if discard is called. + string cmd = strings::join(" ", argv); + Try<Subprocess> s = subprocess( "curl", argv, @@ -341,7 +354,8 @@ static Future<int> download( } return code.get(); - }); + }) + .onDiscard(lambda::bind(&commandDiscarded, s.get(), cmd)); }
