---
src/qemu/qemu_driver.c | 259 +++-
1 files changed, 234 insertions(+), 25 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 8d54e58..c5d0e05 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5111,6 +5111,7 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
{
struct qemud_driver *driver = dom-conn-privateData;
char *ret = NULL;
+char *cfs_period_path = NULL;
qemuDriverLock(driver);
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
@@ -5119,14 +5120,29 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
goto cleanup;
}
-if (nparams)
-*nparams = 1;
+/* check whether the host supports CFS bandwidth */
+if (virCgroupPathOfController(driver-cgroup, VIR_CGROUP_CONTROLLER_CPU,
+ cpu.cfs_period_us, cfs_period_path) 0) {
+qemuReportError(VIR_ERR_INTERNAL_ERROR,
+%s,
+_(cannot get the path of cgroup CPU controller));
+goto cleanup;
+}
+
+if (nparams) {
+if (access(cfs_period_path, F_OK) 0) {
+*nparams = 1;
+} else {
+*nparams = 3;
+}
+}
ret = strdup(posix);
if (!ret)
virReportOOMError();
cleanup:
+VIR_FREE(cfs_period_path);
qemuDriverUnlock(driver);
return ret;
}
@@ -5753,6 +5769,48 @@ cleanup:
return ret;
}
+static int
+qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+ unsigned long long period, long long quota)
+{
+int i;
+qemuDomainObjPrivatePtr priv = vm-privateData;
+virCgroupPtr cgroup_vcpu = NULL;
+int rc;
+
+if (period == 0 quota == 0)
+return 0;
+
+if (priv-nvcpupids == 0 || priv-vcpupids[0] == vm-pid) {
+/* If we does not know VCPU-PID mapping or all vcpu runs in the same
+ * thread, we can not control each vcpu.
+ */
+return qemuSetupCgroupVcpuBW(cgroup, period, quota);
+}
+
+for (i = 0; i priv-nvcpupids; i++) {
+rc = virCgroupForVcpu(cgroup, i, cgroup_vcpu, 0);
+if (rc 0) {
+virReportSystemError(-rc,
+ _(Unable to find vcpu cgroup for %s(vcpu:
+%d)),
+ vm-def-name, i);
+goto cleanup;
+}
+
+if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) 0)
+goto cleanup;
+
+virCgroupFree(cgroup_vcpu);
+}
+
+return 0;
+
+cleanup:
+virCgroupFree(cgroup_vcpu);
+return -1;
+}
+
static int qemuSetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int nparams,
@@ -5762,9 +5820,10 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr
dom,
int i;
virCgroupPtr group = NULL;
virDomainObjPtr vm = NULL;
-virDomainDefPtr persistentDef = NULL;
+virDomainDefPtr vmdef = NULL;
int ret = -1;
bool isActive;
+int rc;
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
VIR_DOMAIN_AFFECT_CONFIG, -1);
@@ -5788,10 +5847,17 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr
dom,
flags = VIR_DOMAIN_AFFECT_CONFIG;
}
-if ((flags VIR_DOMAIN_AFFECT_CONFIG) !vm-persistent) {
-qemuReportError(VIR_ERR_OPERATION_INVALID, %s,
-_(cannot change persistent config of a transient
domain));
-goto cleanup;
+if (flags VIR_DOMAIN_AFFECT_CONFIG) {
+if (!vm-persistent) {
+qemuReportError(VIR_ERR_OPERATION_INVALID, %s,
+_(cannot change persistent config of a transient
domain));
+goto cleanup;
+}
+
+/* Make a copy for updated domain. */
+vmdef = virDomainObjCopyPersistentDef(driver-caps, vm);
+if (!vmdef)
+goto cleanup;
}
if (flags VIR_DOMAIN_AFFECT_LIVE) {
@@ -5818,7 +5884,6 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr
dom,
virTypedParameterPtr param = params[i];
if (STREQ(param-field, cpu_shares)) {
-int rc;
if (param-type != VIR_TYPED_PARAM_ULLONG) {
qemuReportError(VIR_ERR_INVALID_ARG, %s,
_(invalid type for cpu_shares tunable,
expected a 'ullong'));
@@ -5837,19 +5902,47 @@ static int qemuSetSchedulerParametersFlags(virDomainPtr
dom,
}
if (flags VIR_DOMAIN_AFFECT_CONFIG) {
-persistentDef = virDomainObjGetPersistentDef(driver-caps, vm);
-if (!persistentDef) {
-qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
-_(can't get persistentDef));
+vmdef-cputune.shares = params[i].value.ul;
+