From: Amos Kong <ak...@redhat.com>

Dynamically checking hardware, use a dict to record the pin status,
pin process to single cpu by 'taskset' command.

Guest memory pining is already implemented in framework.
process pining needs to be done in the testcases.

Example:
|  numa_node = -1 # last node
|  p = virt_utils.NumaNode(numa_node)
|  vhost_threads = commands.getoutput("ps aux |grep '\[vhost-.*\]'
|                  |grep -v grep|awk '{print $2}'")
|  for i in vhost_threads.split():
|      logging.debug("pin vhost_net thread(%s) to host cpu node" % i)
|      p.pin_cpu(i)
|  o = vm.monitor.info("cpus")
|  for i in re.findall("thread_id=(\d+)", o):
|      logging.debug("pin vcpu thread(%s) to host cpu node" % i)
|      p.pin_cpu(i)
|  p.show()

Changes from v1:
 * Removed usage of commands API, using utils.run() instead
 * Added unittests for NumaNode
 * Improved docstrings

Signed-off-by: Amos Kong <ak...@redhat.com>
Signed-off-by: Lucas Meneghel Rodrigues <l...@redhat.com>
---
 client/tests/kvm/base.cfg.sample   |    6 ++-
 client/virt/kvm_vm.py              |    8 +++
 client/virt/virt_utils.py          |   80 +++++++++++++++++++++++++
 client/virt/virt_utils_unittest.py |  112 +++++++++++++++++++++++++++++++++++-
 4 files changed, 204 insertions(+), 2 deletions(-)

diff --git a/client/tests/kvm/base.cfg.sample b/client/tests/kvm/base.cfg.sample
index 21fa513..411decf 100644
--- a/client/tests/kvm/base.cfg.sample
+++ b/client/tests/kvm/base.cfg.sample
@@ -147,6 +147,11 @@ shell_port = 22
 used_cpus = 1
 used_mem = 512
 
+# Numa pinning params
+# pin guest memory to 1st numa node
+# pin processes to host cpu of 1st node
+# numa_node = 1
+
 # Port redirections
 redirs = remote_shell
 guest_port_remote_shell = 22
@@ -159,4 +164,3 @@ login_timeout = 360
 
 # NFS directory of guest images
 images_good = fileserver.foo.com:/autotest/images_good
-
diff --git a/client/virt/kvm_vm.py b/client/virt/kvm_vm.py
index 6747c2b..fa258c3 100644
--- a/client/virt/kvm_vm.py
+++ b/client/virt/kvm_vm.py
@@ -473,6 +473,14 @@ class VM(virt_vm.BaseVM):
             qemu_cmd += "LD_LIBRARY_PATH=%s " % library_path
         if params.get("qemu_audio_drv"):
             qemu_cmd += "QEMU_AUDIO_DRV=%s " % params.get("qemu_audio_drv")
+        # Add numa memory cmd to pin guest memory to numa node
+        if params.get("numa_node"):
+            numa_node = int(params.get("numa_node"))
+            if numa_node < 0:
+                p = virt_utils.NumaNode(numa_node)
+                qemu_cmd += "numactl -m %s " % (int(p.get_node_num()) + 
numa_node)
+            else:
+                qemu_cmd += "numactl -m %s " % (numa_node - 1)
         # Add the qemu binary
         qemu_cmd += qemu_binary
         # Add the VM's name
diff --git a/client/virt/virt_utils.py b/client/virt/virt_utils.py
index 95b2883..b2694ec 100644
--- a/client/virt/virt_utils.py
+++ b/client/virt/virt_utils.py
@@ -3452,3 +3452,83 @@ def virt_test_assistant(test_name, test_dir, base_dir, 
default_userspace_paths,
     logging.info("Autotest prints the results dir, so you can look at DEBUG "
                  "logs if something went wrong")
     logging.info("You can also edit the test config files")
+
+
+class NumaNode(object):
+    """
+    Numa node to control processes and shared memory.
+    """
+    def __init__(self, i=-1):
+        self.num = self.get_node_num()
+        if i < 0:
+            self.cpus = self.get_node_cpus(int(self.num) + i).split()
+        else:
+            self.cpus = self.get_node_cpus(i - 1).split()
+        self.dict = {}
+        for i in self.cpus:
+            self.dict[i] = "free"
+
+
+    def get_node_num(self):
+        """
+        Get the number of nodes of current host.
+        """
+        cmd = utils.run("numactl --hardware")
+        return re.findall("available: (\d+) nodes", cmd.stdout)[0]
+
+
+    def get_node_cpus(self, i):
+        """
+        Get cpus of a specific node
+
+        @param i: Index of the CPU inside the node.
+        """
+        cmd = utils.run("numactl --hardware")
+        return re.findall("node %s cpus: (.*)" % i, cmd.stdout)[0]
+
+
+    def free_cpu(self, i):
+        """
+        Release pin of one node.
+
+        @param i: Index of the node.
+        """
+        self.dict[i] = "free"
+
+
+    def _flush_pin(self):
+        """
+        Flush pin dict, remove the record of exited process.
+        """
+        cmd = utils.run("ps -eLf | awk '{print $4}'")
+        all_pids = cmd.stdout
+        for i in self.cpus:
+            if self.dict[i] != "free" and self.dict[i] not in all_pids:
+                self.free_cpu(i)
+
+
+    @error.context_aware
+    def pin_cpu(self, process):
+        """
+        Pin one process to a single cpu.
+
+        @param process: Process ID.
+        """
+        self._flush_pin()
+        error.context("Pinning process %s to the CPU" % process)
+        for i in self.cpus:
+            if self.dict[i] == "free":
+                self.dict[i] = str(process)
+                cmd = "taskset -p %s %s" % (hex(2 ** int(i)), process)
+                logging.debug("NumaNode (%s): " % i + cmd)
+                utils.run(cmd)
+                return i
+
+
+    def show(self):
+        """
+        Display the record dict in a convenient way.
+        """
+        logging.info("Numa Node record dict:")
+        for i in self.cpus:
+            logging.info("    %s: %s" % (i, self.dict[i]))
diff --git a/client/virt/virt_utils_unittest.py 
b/client/virt/virt_utils_unittest.py
index 5509ea6..a895ffd 100755
--- a/client/virt/virt_utils_unittest.py
+++ b/client/virt/virt_utils_unittest.py
@@ -1,8 +1,10 @@
 #!/usr/bin/python
 
-import unittest
+import unittest, logging
 import common
 from autotest_lib.client.virt import virt_utils
+from autotest_lib.client.bin import utils
+from autotest_lib.client.common_lib.test_utils import mock
 from autotest_lib.client.common_lib import cartesian_config
 
 class virt_utils_test(unittest.TestCase):
@@ -82,5 +84,113 @@ git_repo_foo_commit = 
bc732ad8b2ed8be52160b893735417b43a1e91a8
         self.assertEqual(h.commit, 'bc732ad8b2ed8be52160b893735417b43a1e91a8')
 
 
+class FakeCmd(object):
+    def __init__(self, cmd):
+        self.fake_cmds = [
+{"cmd": "numactl --hardware",
+"stdout": """
+available: 1 nodes (0)
+node 0 cpus: 0 1 2 3 4 5 6 7
+node 0 size: 18431 MB
+node 0 free: 17186 MB
+node distances:
+node   0
+  0:  10
+"""},
+{"cmd": "ps -eLf | awk '{print $4}'",
+"stdout": """
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+"""},
+{"cmd": "taskset -p 0x1 1230", "stdout": ""},
+{"cmd": "taskset -p 0x2 1231", "stdout": ""},
+{"cmd": "taskset -p 0x4 1232", "stdout": ""},
+{"cmd": "taskset -p 0x8 1233", "stdout": ""},
+{"cmd": "taskset -p 0x10 1234", "stdout": ""},
+{"cmd": "taskset -p 0x20 1235", "stdout": ""},
+{"cmd": "taskset -p 0x40 1236", "stdout": ""},
+{"cmd": "taskset -p 0x80 1237", "stdout": ""},
+
+]
+
+        self.stdout = self.get_stdout(cmd)
+
+
+    def get_stdout(self, cmd):
+        for fake_cmd in self.fake_cmds:
+            if fake_cmd['cmd'] == cmd:
+                return fake_cmd['stdout']
+        raise ValueError("Could not locate locate '%s' on fake cmd db" % cmd)
+
+
+def utils_run(cmd):
+    return FakeCmd(cmd)
+
+
+class TestNumaNode(unittest.TestCase):
+    def setUp(self):
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_with(utils, 'run', utils_run)
+        self.numa_node = virt_utils.NumaNode(-1)
+
+
+    def test_get_node_num(self):
+        self.assertEqual(self.numa_node.get_node_num(), '1')
+
+
+    def test_get_node_cpus(self):
+        self.assertEqual(self.numa_node.get_node_cpus(0), '0 1 2 3 4 5 6 7')
+
+
+    def test_pin_cpu(self):
+        self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
+        self.assertEqual(self.numa_node.dict["0"], "1230")
+
+        self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
+        self.assertEqual(self.numa_node.dict["1"], "1231")
+
+        self.assertEqual(self.numa_node.pin_cpu("1232"), "2")
+        self.assertEqual(self.numa_node.dict["2"], "1232")
+
+        self.assertEqual(self.numa_node.pin_cpu("1233"), "3")
+        self.assertEqual(self.numa_node.dict["3"], "1233")
+
+        self.assertEqual(self.numa_node.pin_cpu("1234"), "4")
+        self.assertEqual(self.numa_node.dict["4"], "1234")
+
+        self.assertEqual(self.numa_node.pin_cpu("1235"), "5")
+        self.assertEqual(self.numa_node.dict["5"], "1235")
+
+        self.assertEqual(self.numa_node.pin_cpu("1236"), "6")
+        self.assertEqual(self.numa_node.dict["6"], "1236")
+
+        self.assertEqual(self.numa_node.pin_cpu("1237"), "7")
+        self.assertEqual(self.numa_node.dict["7"], "1237")
+
+        self.assertNotIn("free", self.numa_node.dict.values())
+
+
+    def test_free_cpu(self):
+        self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
+        self.assertEqual(self.numa_node.dict["0"], "1230")
+
+        self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
+        self.assertEqual(self.numa_node.dict["1"], "1231")
+
+        self.numa_node.free_cpu("0")
+        self.assertEqual(self.numa_node.dict["0"], "free")
+        self.assertEqual(self.numa_node.dict["1"], "1231")
+
+
+    def tearDown(self):
+        self.god.unstub_all()
+
+
 if __name__ == '__main__':
     unittest.main()
-- 
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to