The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6876

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
This adds support for comparing the requested CPU pin with physical hardware layout and if consistent, configures qemu to mimic the setup (sockets, cores, threads) and pin the resulting threads on the matching cores.

The logic also sorts the provided pinning data by hardware order (socket, core, thread) which matches what qemu itself uses and is how the pin is lined up with qemu threads later on.
From 3abd3fe98624f1c81154a9aa7edbda84511cbf29 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Wed, 12 Feb 2020 13:47:08 -0800
Subject: [PATCH 1/3] shared: Add Uint64InSlice
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 shared/util.go | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/shared/util.go b/shared/util.go
index 184a6e9cd7..0db130129f 100644
--- a/shared/util.go
+++ b/shared/util.go
@@ -586,6 +586,15 @@ func Int64InSlice(key int64, list []int64) bool {
        return false
 }
 
+func Uint64InSlice(key uint64, list []uint64) bool {
+       for _, entry := range list {
+               if entry == key {
+                       return true
+               }
+       }
+       return false
+}
+
 func IsTrue(value string) bool {
        if StringInSlice(strings.ToLower(value), []string{"true", "1", "yes", 
"on"}) {
                return true

From a0327bb4d9837c25a4fab136e854f13b86eba492 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Wed, 12 Feb 2020 13:53:16 -0800
Subject: [PATCH 2/3] lxd/vm: Template sockets/cores/threads config
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/instance/drivers/driver_qemu.go           | 3 +++
 lxd/instance/drivers/driver_qemu_templates.go | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/lxd/instance/drivers/driver_qemu.go 
b/lxd/instance/drivers/driver_qemu.go
index 28c6f3256d..1068b4bd85 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -1437,6 +1437,9 @@ func (vm *qemu) addCPUConfig(sb *strings.Builder) error {
        return qemuCPU.Execute(sb, map[string]interface{}{
                "architecture": vm.architectureName,
                "cpuCount":     cpuCount,
+               "cpuSockets":   1,
+               "cpuCores":     cpuCount,
+               "cpuThreads":   1,
        })
 }
 
diff --git a/lxd/instance/drivers/driver_qemu_templates.go 
b/lxd/instance/drivers/driver_qemu_templates.go
index 2c0700c753..63e525d19e 100644
--- a/lxd/instance/drivers/driver_qemu_templates.go
+++ b/lxd/instance/drivers/driver_qemu_templates.go
@@ -150,6 +150,9 @@ var qemuCPU = template.Must(template.New("qemuCPU").Parse(`
 # CPU
 [smp-opts]
 cpus = "{{.cpuCount}}"
+sockets = "{{.cpuSockets}}"
+cores = "{{.cpuCores}}"
+threads = "{{.cpuThreads}}"
 `))
 
 var qemuControlSocket = template.Must(template.New("qemuControlSocket").Parse(`

From b3cf68511442fbad1767e152a5b72c0439f14109 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Wed, 12 Feb 2020 14:42:45 -0800
Subject: [PATCH 3/3] lxd/vm: Attempt to line up CPU topology
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/instance/drivers/driver_qemu.go | 150 ++++++++++++++++++++++++----
 1 file changed, 132 insertions(+), 18 deletions(-)

diff --git a/lxd/instance/drivers/driver_qemu.go 
b/lxd/instance/drivers/driver_qemu.go
index 1068b4bd85..160b8d5f15 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -36,6 +36,7 @@ import (
        "github.com/lxc/lxd/lxd/network"
        "github.com/lxc/lxd/lxd/operations"
        "github.com/lxc/lxd/lxd/project"
+       "github.com/lxc/lxd/lxd/resources"
        "github.com/lxc/lxd/lxd/revert"
        "github.com/lxc/lxd/lxd/state"
        storagePools "github.com/lxc/lxd/lxd/storage"
@@ -824,8 +825,8 @@ func (vm *qemu) Start(stateful bool) error {
        if ok && cpuLimit != "" {
                _, err := strconv.Atoi(cpuLimit)
                if err != nil {
-                       // Expand to a set of CPU identifiers.
-                       pins, err := instance.ParseCpuset(cpuLimit)
+                       // Expand to a set of CPU identifiers and get the 
pinning map.
+                       _, _, _, pins, err := vm.cpuTopology(cpuLimit)
                        if err != nil {
                                op.Done(err)
                                return err
@@ -843,11 +844,9 @@ func (vm *qemu) Start(stateful bool) error {
                                return fmt.Errorf("QEMU has less vCPUs than 
configured")
                        }
 
-                       for i, pin := range pins {
-                               pid := pids[i]
-
+                       for i, pid := range pids {
                                set := unix.CPUSet{}
-                               set.Set(pin)
+                               set.Set(int(pins[uint64(i)]))
 
                                // Apply the pin.
                                err := unix.SchedSetaffinity(pid, &set)
@@ -1418,29 +1417,37 @@ func (vm *qemu) addVsockConfig(sb *strings.Builder) 
error {
 
 // addCPUConfig adds the qemu config required for setting the number of 
virtualised CPUs.
 func (vm *qemu) addCPUConfig(sb *strings.Builder) error {
-       // Configure CPU limit. TODO add control of sockets, cores and threads.
+       // Default to a single core.
        cpus := vm.expandedConfig["limits.cpu"]
        if cpus == "" {
                cpus = "1"
        }
 
+       ctx := map[string]interface{}{
+               "architecture": vm.architectureName,
+       }
+
        cpuCount, err := strconv.Atoi(cpus)
-       if err != nil {
-               pins, err := instance.ParseCpuset(cpus)
+       if err == nil {
+               // If not pinning, default to exposing cores.
+               ctx["cpuCount"] = cpuCount
+               ctx["cpuSockets"] = 1
+               ctx["cpuCores"] = cpuCount
+               ctx["cpuThreads"] = 1
+       } else {
+               // Expand to a set of CPU identifiers and get the pinning map.
+               nrSockets, nrCores, nrThreads, vcpus, err := 
vm.cpuTopology(cpus)
                if err != nil {
-                       return fmt.Errorf("limits.cpu invalid: %v", err)
+                       return err
                }
 
-               cpuCount = len(pins)
+               ctx["cpuCount"] = len(vcpus)
+               ctx["cpuSockets"] = nrSockets
+               ctx["cpuCores"] = nrCores
+               ctx["cpuThreads"] = nrThreads
        }
 
-       return qemuCPU.Execute(sb, map[string]interface{}{
-               "architecture": vm.architectureName,
-               "cpuCount":     cpuCount,
-               "cpuSockets":   1,
-               "cpuCores":     cpuCount,
-               "cpuThreads":   1,
-       })
+       return qemuCPU.Execute(sb, ctx)
 }
 
 // addMonitorConfig adds the qemu config required for setting up the host side 
VM monitor device.
@@ -3718,3 +3725,110 @@ func (vm *qemu) UpdateBackupFile() error {
 
        return pool.UpdateInstanceBackupFile(vm, nil)
 }
+
+func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, 
error) {
+       // Get CPU topology.
+       cpus, err := resources.GetCPU()
+       if err != nil {
+               return -1, -1, -1, nil, err
+       }
+
+       // Expand the pins.
+       pins, err := instance.ParseCpuset(limit)
+       if err != nil {
+               return -1, -1, -1, nil, err
+       }
+
+       // Match tracking.
+       vcpus := map[uint64]uint64{}
+       sockets := map[uint64][]uint64{}
+       cores := map[uint64][]uint64{}
+
+       // Go through the physical CPUs looking for matches.
+       i := uint64(0)
+       for _, cpu := range cpus.Sockets {
+               for _, core := range cpu.Cores {
+                       for _, thread := range core.Threads {
+                               for _, pin := range pins {
+                                       if thread.ID == int64(pin) {
+                                               // Found a matching CPU.
+                                               vcpus[i] = uint64(pin)
+                                               i++
+
+                                               // Track cores per socket.
+                                               _, ok := sockets[cpu.Socket]
+                                               if !ok {
+                                                       sockets[cpu.Socket] = 
[]uint64{}
+                                               }
+                                               if 
!shared.Uint64InSlice(core.Core, sockets[cpu.Socket]) {
+                                                       sockets[cpu.Socket] = 
append(sockets[cpu.Socket], core.Core)
+                                               }
+
+                                               // Track threads per core.
+                                               _, ok = cores[core.Core]
+                                               if !ok {
+                                                       cores[core.Core] = 
[]uint64{}
+                                               }
+                                               if 
!shared.Uint64InSlice(thread.Thread, cores[core.Core]) {
+                                                       cores[core.Core] = 
append(cores[core.Core], thread.Thread)
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       // Confirm we're getting the expected number of CPUs.
+       if len(pins) != len(vcpus) {
+               return -1, -1, -1, nil, fmt.Errorf("Unavailable CPUs requested: 
%s", limit)
+       }
+
+       // Validate the topology.
+       valid := true
+       nrSockets := 0
+       nrCores := 0
+       nrThreads := 0
+
+       // Confirm that there is no balancing inconsistencies.
+       countCores := -1
+       for _, cores := range sockets {
+               if countCores != -1 && len(cores) != countCores {
+                       valid = false
+                       break
+               }
+
+               countCores = len(cores)
+       }
+
+       countThreads := -1
+       for _, threads := range cores {
+               if countThreads != -1 && len(threads) != countThreads {
+                       valid = false
+                       break
+               }
+
+               countThreads = len(threads)
+       }
+
+       // Check against double listing of CPU.
+       if len(sockets)*countCores*countThreads != len(vcpus) {
+               valid = false
+       }
+
+       // Build up the topology.
+       if valid {
+               // Valid topology.
+               nrSockets = len(sockets)
+               nrCores = countCores
+               nrThreads = countThreads
+       } else {
+               logger.Warnf("Instance '%s' uses a CPU pinning profile which 
doesn't match hardware layout", project.Prefix(vm.Project(), vm.Name()))
+
+               // Fallback on pretending everything are cores.
+               nrSockets = 1
+               nrCores = len(vcpus)
+               nrThreads = 1
+       }
+
+       return nrSockets, nrCores, nrThreads, vcpus, nil
+}
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to