The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/7243

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
This now mimics the NUMA layout of the host in the guest for cases where
specific CPUs have been pinned.

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
From 3d1179b3d7dc21febf691cf3d775b7c315ac9278 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?St=C3=A9phane=20Graber?= <stgra...@ubuntu.com>
Date: Wed, 22 Apr 2020 21:28:28 -0400
Subject: [PATCH] lxd/qemu: Match basic NUMA layout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This now mimics the NUMA layout of the host in the guest for cases where
specific CPUs have been pinned.

Signed-off-by: Stéphane Graber <stgra...@ubuntu.com>
---
 lxd/instance/drivers/driver_qemu.go           | 62 ++++++++++++++++---
 lxd/instance/drivers/driver_qemu_templates.go | 15 +++++
 2 files changed, 69 insertions(+), 8 deletions(-)

diff --git a/lxd/instance/drivers/driver_qemu.go 
b/lxd/instance/drivers/driver_qemu.go
index 0848fa9e02..3c3eb28dc2 100644
--- a/lxd/instance/drivers/driver_qemu.go
+++ b/lxd/instance/drivers/driver_qemu.go
@@ -870,7 +870,7 @@ func (vm *qemu) Start(stateful bool) error {
                _, err := strconv.Atoi(cpuLimit)
                if err != nil {
                        // Expand to a set of CPU identifiers and get the 
pinning map.
-                       _, _, _, pins, err := vm.cpuTopology(cpuLimit)
+                       _, _, _, pins, _, err := vm.cpuTopology(cpuLimit)
                        if err != nil {
                                op.Done(err)
                                return err
@@ -1684,15 +1684,52 @@ func (vm *qemu) addCPUConfig(sb *strings.Builder) error 
{
                ctx["cpuThreads"] = 1
        } else {
                // Expand to a set of CPU identifiers and get the pinning map.
-               nrSockets, nrCores, nrThreads, vcpus, err := 
vm.cpuTopology(cpus)
+               nrSockets, nrCores, nrThreads, vcpus, numaNodes, err := 
vm.cpuTopology(cpus)
                if err != nil {
                        return err
                }
 
+               // Figure out socket-id/core-id/thread-id for all vcpus.
+               vcpuSocket := map[uint64]uint64{}
+               vcpuCore := map[uint64]uint64{}
+               vcpuThread := map[uint64]uint64{}
+               vcpu := uint64(0)
+               for i := 0; i < nrSockets; i++ {
+                       for j := 0; j < nrCores; j++ {
+                               for k := 0; k < nrThreads; k++ {
+                                       vcpuSocket[vcpu] = uint64(i)
+                                       vcpuCore[vcpu] = uint64(j)
+                                       vcpuThread[vcpu] = uint64(k)
+                                       vcpu++
+                               }
+                       }
+               }
+
+               // Prepare the NUMA map.
+               numa := []map[string]uint64{}
+               numaIDs := []uint64{}
+               numaNode := uint64(0)
+               for _, entry := range numaNodes {
+                       numaIDs = append(numaIDs, numaNode)
+                       for _, vcpu := range entry {
+                               numa = append(numa, map[string]uint64{
+                                       "node":   numaNode,
+                                       "socket": vcpuSocket[vcpu],
+                                       "core":   vcpuCore[vcpu],
+                                       "thread": vcpuThread[vcpu],
+                               })
+                       }
+
+                       numaNode++
+               }
+
+               // Prepare context.
                ctx["cpuCount"] = len(vcpus)
                ctx["cpuSockets"] = nrSockets
                ctx["cpuCores"] = nrCores
                ctx["cpuThreads"] = nrThreads
+               ctx["cpuNumaNodes"] = numaIDs
+               ctx["cpuNumaMapping"] = numa
        }
 
        return qemuCPU.Execute(sb, ctx)
@@ -4254,23 +4291,24 @@ func (vm *qemu) UpdateBackupFile() error {
        return pool.UpdateInstanceBackupFile(vm, nil)
 }
 
-func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, 
error) {
+func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, 
map[uint64][]uint64, error) {
        // Get CPU topology.
        cpus, err := resources.GetCPU()
        if err != nil {
-               return -1, -1, -1, nil, err
+               return -1, -1, -1, nil, nil, err
        }
 
        // Expand the pins.
        pins, err := instance.ParseCpuset(limit)
        if err != nil {
-               return -1, -1, -1, nil, err
+               return -1, -1, -1, nil, nil, err
        }
 
        // Match tracking.
        vcpus := map[uint64]uint64{}
        sockets := map[uint64][]uint64{}
        cores := map[uint64][]uint64{}
+       numaNodes := map[uint64][]uint64{}
 
        // Go through the physical CPUs looking for matches.
        i := uint64(0)
@@ -4281,7 +4319,6 @@ func (vm *qemu) cpuTopology(limit string) (int, int, int, 
map[uint64]uint64, err
                                        if thread.ID == int64(pin) {
                                                // Found a matching CPU.
                                                vcpus[i] = uint64(pin)
-                                               i++
 
                                                // Track cores per socket.
                                                _, ok := sockets[cpu.Socket]
@@ -4300,6 +4337,15 @@ func (vm *qemu) cpuTopology(limit string) (int, int, 
int, map[uint64]uint64, err
                                                if 
!shared.Uint64InSlice(thread.Thread, cores[core.Core]) {
                                                        cores[core.Core] = 
append(cores[core.Core], thread.Thread)
                                                }
+
+                                               // Record NUMA node for thread.
+                                               _, ok = cores[core.Core]
+                                               if !ok {
+                                                       
numaNodes[thread.NUMANode] = []uint64{}
+                                               }
+                                               numaNodes[thread.NUMANode] = 
append(numaNodes[thread.NUMANode], i)
+
+                                               i++
                                        }
                                }
                        }
@@ -4308,7 +4354,7 @@ func (vm *qemu) cpuTopology(limit string) (int, int, int, 
map[uint64]uint64, err
 
        // Confirm we're getting the expected number of CPUs.
        if len(pins) != len(vcpus) {
-               return -1, -1, -1, nil, fmt.Errorf("Unavailable CPUs requested: 
%s", limit)
+               return -1, -1, -1, nil, nil, fmt.Errorf("Unavailable CPUs 
requested: %s", limit)
        }
 
        // Validate the topology.
@@ -4358,5 +4404,5 @@ func (vm *qemu) cpuTopology(limit string) (int, int, int, 
map[uint64]uint64, err
                nrThreads = 1
        }
 
-       return nrSockets, nrCores, nrThreads, vcpus, nil
+       return nrSockets, nrCores, nrThreads, vcpus, numaNodes, nil
 }
diff --git a/lxd/instance/drivers/driver_qemu_templates.go 
b/lxd/instance/drivers/driver_qemu_templates.go
index 9eb9079387..b2f8f9def5 100644
--- a/lxd/instance/drivers/driver_qemu_templates.go
+++ b/lxd/instance/drivers/driver_qemu_templates.go
@@ -154,6 +154,21 @@ cpus = "{{.cpuCount}}"
 sockets = "{{.cpuSockets}}"
 cores = "{{.cpuCores}}"
 threads = "{{.cpuThreads}}"
+
+{{range $index, $element := .cpuNumaNodes}}
+[numa]
+type = "node"
+nodeid = "{{$element}}"
+{{end}}
+
+{{range .cpuNumaMapping}}
+[numa]
+type = "cpu"
+node-id = "{{.node}}"
+socket-id = "{{.socket}}"
+core-id = "{{.core}}"
+thread-id = "{{.thread}}"
+{{end}}
 `))
 
 var qemuControlSocket = template.Must(template.New("qemuControlSocket").Parse(`
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to