The following pull request was submitted through Github. It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/1632
This e-mail was sent by the LXC bot, direct replies will not reach the author unless they happen to be subscribed to this list. === Description (from pull-request) ===
From 7cc21c04807b92a4bcfe2caee4dbe2f480035f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com> Date: Mon, 22 Feb 2016 21:27:52 -0500 Subject: [PATCH 1/2] Add process limit (pids cgroup) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber <stgra...@ubuntu.com> --- lxd/container.go | 2 ++ lxd/container_lxc.go | 40 ++++++++++++++++++++++++++++++++++++++++ lxd/daemon.go | 6 ++++++ specs/configuration.md | 1 + 4 files changed, 49 insertions(+) diff --git a/lxd/container.go b/lxd/container.go index 0792c17..4ded43f 100644 --- a/lxd/container.go +++ b/lxd/container.go @@ -63,6 +63,8 @@ func containerValidConfigKey(k string) bool { return true case "limits.network.priority": return true + case "limits.processes": + return true case "linux.kernel_modules": return true case "security.privileged": diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go index c3afead..a09ee56 100644 --- a/lxd/container_lxc.go +++ b/lxd/container_lxc.go @@ -616,6 +616,22 @@ func (c *containerLXC) initLXC() error { } } + // Processes + if cgPidsController { + processes := c.expandedConfig["limits.processes"] + if processes != "" { + valueInt, err := strconv.ParseInt(processes, 10, 64) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.cgroup.pids.max", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + } + } + // Setup devices for k, m := range c.expandedDevices { if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { @@ -2065,6 +2081,30 @@ func (c *containerLXC) Update(args containerArgs, userRequested bool) error { undoChanges() return err } + } else if key == "limits.processes" { + if !cgPidsController { + continue + } + + if value == "" { + err = c.CGroupSet("pids.max", "max") + if err != nil { + undoChanges() + return err + } + } else { + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("pids.max", fmt.Sprintf("%d", valueInt)) + if err != nil { + undoChanges() + return err + } + } } } diff --git a/lxd/daemon.go b/lxd/daemon.go index 5b1546c..bbb34a8 100644 --- a/lxd/daemon.go +++ b/lxd/daemon.go @@ -48,6 +48,7 @@ var cgCpusetController = false var cgDevicesController = false var cgMemoryController = false var cgNetPrioController = false +var cgPidsController = false var cgSwapAccounting = false // UserNS @@ -744,6 +745,11 @@ func (d *Daemon) Init() error { shared.Log.Warn("Couldn't find the CGroup network class controller, network limits will be ignored.") } + cgPidsController = shared.PathExists("/sys/fs/cgroup/pids/") + if !cgPidsController { + shared.Log.Warn("Couldn't find the CGroup pids controller, process limits will be ignored.") + } + cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") if !cgSwapAccounting { shared.Log.Warn("CGroup memory swap accounting is disabled, swap limits will be ignored.") diff --git a/specs/configuration.md b/specs/configuration.md index fe6d7f0..fe7e6f7 100644 --- a/specs/configuration.md +++ b/specs/configuration.md @@ -70,6 +70,7 @@ limits.memory.enforce | string | hard | yes | If har limits.memory.swap | boolean | true | yes | Whether to allow some of the container's memory to be swapped out to disk limits.memory.swap.priority | integer | 10 (maximum) | yes | The higher this is set, the least likely the container is to be swapped to disk limits.network.priority | integer | 0 (minimum) | yes | When under load, how much priority to give to the container's network requests +limits.processes | integer | - (max) | yes | Maximum number of processes that can run in the container linux.kernel\_modules | string | - | yes | Comma separated list of kernel modules to load before starting the container raw.apparmor | blob | - | yes | Apparmor profile entries to be appended to the generated profile raw.lxc | blob | - | no | Raw LXC configuration to be appended to the generated one From eef45154892ab079dbc9748710bbf450a8b1b4db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com> Date: Mon, 22 Feb 2016 21:37:29 -0500 Subject: [PATCH 2/2] Optimize container process count (use pid cgroup) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stéphane Graber <stgra...@ubuntu.com> --- lxd/container.go | 1 + lxd/container_lxc.go | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/lxd/container.go b/lxd/container.go index 4ded43f..9dc714e 100644 --- a/lxd/container.go +++ b/lxd/container.go @@ -329,6 +329,7 @@ type container interface { Export(w io.Writer) error // Live configuration + CGroupGet(key string) (string, error) CGroupSet(key string, value string) error ConfigKeySet(key string, value string) error diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go index a09ee56..bcff879 100644 --- a/lxd/container_lxc.go +++ b/lxd/container_lxc.go @@ -1617,6 +1617,22 @@ func (c *containerLXC) Rename(newName string) error { return nil } +func (c *containerLXC) CGroupGet(key string) (string, error) { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return "", err + } + + // Make sure the container is running + if !c.IsRunning() { + return "", fmt.Errorf("Can't get cgroups on a stopped container") + } + + value := c.c.CgroupItem(key) + return strings.Join(value, "\n"), nil +} + func (c *containerLXC) CGroupSet(key string, value string) error { // Load the go-lxc struct err := c.initLXC() @@ -2703,6 +2719,16 @@ func (c *containerLXC) processcountGet() int { return 0 } + if cgPidsController { + value, err := c.CGroupGet("pids.current") + valueInt, err := strconv.Atoi(value) + if err != nil { + return 0 + } + + return valueInt + } + pids := []int{pid} // Go through the pid list, adding new pids at the end so we go through them all
_______________________________________________ lxc-devel mailing list lxc-devel@lists.linuxcontainers.org http://lists.linuxcontainers.org/listinfo/lxc-devel