Re: [libvirt] [PATCH 1/1] vz: fix raise in vzDomainBlock

2017-05-03 Thread Konstantin Neumoin

On 05/03/2017 05:10 PM, Nikolay Shirokovskiy wrote:


On 03.05.2017 13:44, Konstantin Neumoin wrote:

Need begin job before lookup disk in config,
because it can be edited at this moment.

I would slightly change commit message to something like:

OK

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/1] vz: unlock dom until resize operation

2017-05-03 Thread Konstantin Neumoin
We have to use waitDomainJob instead of waitJob, because of it
unlock the domain until job has finished, so domain will be available
for other clients.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_sdk.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index bc1a9eb..79b356d 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -4993,7 +4993,7 @@ int prlsdkResizeImage(virDomainObjPtr dom, 
virDomainDiskDefPtr disk,
 
 job = PrlVmDev_ResizeImage(prldisk, newsize,
PRIF_RESIZE_LAST_PARTITION);
-if (PRL_FAILED(waitJob(job)))
+if (PRL_FAILED(waitDomainJob(job, dom)))
 goto cleanup;
 
 ret = 0;
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/1] vz: fix raise in vzDomainBlock

2017-05-03 Thread Konstantin Neumoin
Need begin job before lookup disk in config,
because it can be edited at this moment.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_driver.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index 8f94326..954ca6a 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -4000,12 +4000,6 @@ vzDomainBlockResize(virDomainPtr domain,
 size /= 1024;
 size /= 1024;
 
-if (!(disk = virDomainDiskByName(dom->def, path, false))) {
-virReportError(VIR_ERR_INVALID_ARG,
-   _("invalid path: %s"), path);
-goto cleanup;
-}
-
 if (vzDomainObjBeginJob(dom) < 0)
 goto cleanup;
 job = true;
@@ -4019,6 +4013,12 @@ vzDomainBlockResize(virDomainPtr domain,
 goto cleanup;
 }
 
+if (!(disk = virDomainDiskByName(dom->def, path, false))) {
+virReportError(VIR_ERR_INVALID_ARG,
+   _("invalid path: %s"), path);
+goto cleanup;
+}
+
 ret = prlsdkResizeImage(dom, disk, size);
 
  cleanup:
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/1] vz: minor cleanup in prlsdkDomainSetUserPassword

2017-05-03 Thread Konstantin Neumoin
No need begin job for asynchronous operation.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_sdk.c | 16 ++--
 1 file changed, 2 insertions(+), 14 deletions(-)

diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index 138aea3..bc1a9eb 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -3926,30 +3926,18 @@ prlsdkDomainSetUserPassword(virDomainObjPtr dom,
 const char *user,
 const char *password)
 {
-int ret = -1;
 vzDomObjPtr privdom = dom->privateData;
 PRL_HANDLE job = PRL_INVALID_HANDLE;
 
-job = PrlVm_BeginEdit(privdom->sdkdom);
-if (PRL_FAILED(waitDomainJob(job, dom)))
-goto cleanup;
-
 job = PrlVm_SetUserPasswd(privdom->sdkdom,
   user,
   password,
   0);
 
 if (PRL_FAILED(waitDomainJob(job, dom)))
-goto cleanup;
-
-job = PrlVm_CommitEx(privdom->sdkdom, 0);
-if (PRL_FAILED(waitDomainJob(job, dom)))
-goto cleanup;
-
-ret = 0;
+return -1;
 
- cleanup:
-return ret;
+return 0;
 }
 
 static int
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/2] vz: support virDomainSetVcpus

2017-04-14 Thread Konstantin Neumoin
Acked-by: Nikolay Shirokovskiy <nshirokovs...@virtuozzo.com>
Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_driver.c | 43 +++
 src/vz/vz_sdk.c| 23 +++
 src/vz/vz_sdk.h|  1 +
 3 files changed, 67 insertions(+)

diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index da83a8f..ed7132f 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -3905,6 +3905,47 @@ vzDomainReset(virDomainPtr domain, unsigned int flags)
 return ret;
 }
 
+static int vzDomainSetVcpusFlags(virDomainPtr domain, unsigned int nvcpus,
+ unsigned int flags)
+{
+virDomainObjPtr dom = NULL;
+int ret = -1;
+bool job = false;
+
+virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+if (!(dom = vzDomObjFromDomainRef(domain)))
+goto cleanup;
+
+if (vzCheckConfigUpdateFlags(dom, ) < 0)
+goto cleanup;
+
+if (virDomainSetVcpusFlagsEnsureACL(domain->conn, dom->def, flags) < 0)
+goto cleanup;
+
+if (vzDomainObjBeginJob(dom) < 0)
+goto cleanup;
+job = true;
+
+if (vzEnsureDomainExists(dom) < 0)
+goto cleanup;
+
+ret = prlsdkSetCpuCount(dom, nvcpus);
+
+ cleanup:
+if (job)
+vzDomainObjEndJob(dom);
+virDomainObjEndAPI();
+return ret;
+}
+
+static int vzDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus)
+{
+return vzDomainSetVcpusFlags(dom, nvcpus,
+ VIR_DOMAIN_AFFECT_LIVE | 
VIR_DOMAIN_AFFECT_CONFIG);
+}
+
 static virHypervisorDriver vzHypervisorDriver = {
 .name = "vz",
 .connectOpen = vzConnectOpen,/* 0.10.0 */
@@ -3954,6 +3995,8 @@ static virHypervisorDriver vzHypervisorDriver = {
 .domainDetachDeviceFlags = vzDomainDetachDeviceFlags, /* 1.2.15 */
 .domainIsActive = vzDomainIsActive, /* 1.2.10 */
 .domainIsUpdated = vzDomainIsUpdated, /* 1.2.21 */
+.domainSetVcpus = vzDomainSetVcpus, /* 3.3.0 */
+.domainSetVcpusFlags = vzDomainSetVcpusFlags, /* 3.3.0 */
 .domainGetVcpusFlags = vzDomainGetVcpusFlags, /* 1.2.21 */
 .domainGetMaxVcpus = vzDomainGetMaxVcpus, /* 1.2.21 */
 .domainSetUserPassword = vzDomainSetUserPassword, /* 2.0.0 */
diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index c1a50fd..2daa44a 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -4902,3 +4902,26 @@ int prlsdkMigrate(virDomainObjPtr dom, virURIPtr uri,
  cleanup:
 return ret;
 }
+
+int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int count)
+{
+vzDomObjPtr privdom = dom->privateData;
+PRL_HANDLE job;
+PRL_RESULT pret;
+
+job = PrlVm_BeginEdit(privdom->sdkdom);
+if (PRL_FAILED(waitDomainJob(job, dom)))
+goto error;
+
+pret = PrlVmCfg_SetCpuCount(privdom->sdkdom, count);
+prlsdkCheckRetGoto(pret, error);
+
+job = PrlVm_CommitEx(privdom->sdkdom, 0);
+if (PRL_FAILED(waitDomainJob(job, dom)))
+goto error;
+
+return 0;
+
+ error:
+return -1;
+}
diff --git a/src/vz/vz_sdk.h b/src/vz/vz_sdk.h
index f8da2ad..100a5e3 100644
--- a/src/vz/vz_sdk.h
+++ b/src/vz/vz_sdk.h
@@ -71,6 +71,7 @@ int
 prlsdkGetMemoryStats(PRL_HANDLE sdkstas, virDomainMemoryStatPtr stats, 
unsigned int nr_stats);
 /* memsize is in MiB */
 int prlsdkSetMemsize(virDomainObjPtr dom, unsigned int memsize);
+int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int count);
 int
 prlsdkDomainSetUserPassword(virDomainObjPtr dom,
 const char *user,
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 2/2] vz: support virDomainBlockResize

2017-04-14 Thread Konstantin Neumoin
Acked-by: Nikolay Shirokovskiy <nshirokovs...@virtuozzo.com>
Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_driver.c | 58 ++
 src/vz/vz_sdk.c| 37 ++
 src/vz/vz_sdk.h|  1 +
 3 files changed, 96 insertions(+)

diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index ed7132f..059e7c9 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -3945,6 +3945,63 @@ static int vzDomainSetVcpus(virDomainPtr dom, unsigned 
int nvcpus)
 return vzDomainSetVcpusFlags(dom, nvcpus,
  VIR_DOMAIN_AFFECT_LIVE | 
VIR_DOMAIN_AFFECT_CONFIG);
 }
+static int
+vzDomainBlockResize(virDomainPtr domain,
+const char *path,
+unsigned long long size,
+unsigned int flags)
+{
+virDomainObjPtr dom = NULL;
+virDomainDiskDefPtr disk = NULL;
+int ret = -1;
+bool job = false;
+
+virCheckFlags(VIR_DOMAIN_BLOCK_RESIZE_BYTES, -1);
+
+if (!(dom = vzDomObjFromDomainRef(domain)))
+goto cleanup;
+
+if (virDomainBlockResizeEnsureACL(domain->conn, dom->def) < 0)
+goto cleanup;
+
+if (path[0] == '\0') {
+virReportError(VIR_ERR_INVALID_ARG,
+   "%s", _("empty path"));
+goto cleanup;
+}
+
+/* sdk wants Mb */
+if (flags & VIR_DOMAIN_BLOCK_RESIZE_BYTES)
+size /= 1024;
+size /= 1024;
+
+if (!(disk = virDomainDiskByName(dom->def, path, false))) {
+virReportError(VIR_ERR_INVALID_ARG,
+   _("invalid path: %s"), path);
+goto cleanup;
+}
+
+if (vzDomainObjBeginJob(dom) < 0)
+goto cleanup;
+job = true;
+
+if (vzEnsureDomainExists(dom) < 0)
+goto cleanup;
+
+if (!virDomainObjIsActive(dom)) {
+virReportError(VIR_ERR_OPERATION_INVALID,
+   "%s", _("domain is not running"));
+goto cleanup;
+}
+
+ret = prlsdkResizeImage(dom, disk, size);
+
+ cleanup:
+if (job)
+vzDomainObjEndJob(dom);
+virDomainObjEndAPI();
+return ret;
+}
 
 static virHypervisorDriver vzHypervisorDriver = {
 .name = "vz",
@@ -4046,6 +4103,7 @@ static virHypervisorDriver vzHypervisorDriver = {
 .connectGetAllDomainStats = vzConnectGetAllDomainStats, /* 3.1.0 */
 .domainAbortJob = vzDomainAbortJob, /* 3.1.0 */
 .domainReset = vzDomainReset, /* 3.1.0 */
+.domainBlockResize = vzDomainBlockResize, /* 3.3.0 */
 };
 
 static virConnectDriver vzConnectDriver = {
diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index 2daa44a..4d2c6b0 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -4925,3 +4925,40 @@ int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int 
count)
  error:
 return -1;
 }
+
+int prlsdkResizeImage(virDomainObjPtr dom, virDomainDiskDefPtr disk,
+  unsigned long long newsize)
+{
+int ret = -1;
+PRL_RESULT pret;
+vzDomObjPtr privdom = dom->privateData;
+PRL_UINT32 emulatedType;
+PRL_HANDLE job = PRL_INVALID_HANDLE;
+PRL_HANDLE prldisk = PRL_INVALID_HANDLE;
+
+prldisk = prlsdkGetDisk(privdom->sdkdom, disk);
+if (prldisk == PRL_INVALID_HANDLE)
+goto cleanup;
+
+pret = PrlVmDev_GetEmulatedType(prldisk, );
+prlsdkCheckRetGoto(pret, cleanup);
+
+if (emulatedType != PDT_USE_IMAGE_FILE &&
+emulatedType != PDT_USE_FILE_SYSTEM) {
+virReportError(VIR_ERR_INVALID_ARG, "%s",
+   _("Only disk image supported for resize"));
+goto cleanup;
+}
+
+job = PrlVmDev_ResizeImage(prldisk, newsize,
+   PRIF_RESIZE_LAST_PARTITION);
+if (PRL_FAILED(waitJob(job)))
+goto cleanup;
+
+ret = 0;
+
+ cleanup:
+
+PrlHandle_Free(prldisk);
+return ret;
+}
diff --git a/src/vz/vz_sdk.h b/src/vz/vz_sdk.h
index 100a5e3..0a77431 100644
--- a/src/vz/vz_sdk.h
+++ b/src/vz/vz_sdk.h
@@ -90,3 +90,4 @@ prlsdkMigrate(virDomainObjPtr dom,
 PRL_HANDLE
 prlsdkSdkDomainLookupByName(vzDriverPtr driver, const char *name);
 int prlsdkCancelJob(virDomainObjPtr dom);
+int prlsdkResizeImage(virDomainObjPtr dom, virDomainDiskDefPtr disk, unsigned 
long long newsize);
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/2] vz: support virDomainSetVcpus

2017-04-14 Thread Konstantin Neumoin
Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_driver.c | 43 +++
 src/vz/vz_sdk.c| 23 +++
 src/vz/vz_sdk.h|  1 +
 3 files changed, 67 insertions(+)

diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index da83a8f..ed7132f 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -3905,6 +3905,47 @@ vzDomainReset(virDomainPtr domain, unsigned int flags)
 return ret;
 }
 
+static int vzDomainSetVcpusFlags(virDomainPtr domain, unsigned int nvcpus,
+ unsigned int flags)
+{
+virDomainObjPtr dom = NULL;
+int ret = -1;
+bool job = false;
+
+virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+if (!(dom = vzDomObjFromDomainRef(domain)))
+goto cleanup;
+
+if (vzCheckConfigUpdateFlags(dom, ) < 0)
+goto cleanup;
+
+if (virDomainSetVcpusFlagsEnsureACL(domain->conn, dom->def, flags) < 0)
+goto cleanup;
+
+if (vzDomainObjBeginJob(dom) < 0)
+goto cleanup;
+job = true;
+
+if (vzEnsureDomainExists(dom) < 0)
+goto cleanup;
+
+ret = prlsdkSetCpuCount(dom, nvcpus);
+
+ cleanup:
+if (job)
+vzDomainObjEndJob(dom);
+virDomainObjEndAPI();
+return ret;
+}
+
+static int vzDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus)
+{
+return vzDomainSetVcpusFlags(dom, nvcpus,
+ VIR_DOMAIN_AFFECT_LIVE | 
VIR_DOMAIN_AFFECT_CONFIG);
+}
+
 static virHypervisorDriver vzHypervisorDriver = {
 .name = "vz",
 .connectOpen = vzConnectOpen,/* 0.10.0 */
@@ -3954,6 +3995,8 @@ static virHypervisorDriver vzHypervisorDriver = {
 .domainDetachDeviceFlags = vzDomainDetachDeviceFlags, /* 1.2.15 */
 .domainIsActive = vzDomainIsActive, /* 1.2.10 */
 .domainIsUpdated = vzDomainIsUpdated, /* 1.2.21 */
+.domainSetVcpus = vzDomainSetVcpus, /* 3.3.0 */
+.domainSetVcpusFlags = vzDomainSetVcpusFlags, /* 3.3.0 */
 .domainGetVcpusFlags = vzDomainGetVcpusFlags, /* 1.2.21 */
 .domainGetMaxVcpus = vzDomainGetMaxVcpus, /* 1.2.21 */
 .domainSetUserPassword = vzDomainSetUserPassword, /* 2.0.0 */
diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index c1a50fd..2daa44a 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -4902,3 +4902,26 @@ int prlsdkMigrate(virDomainObjPtr dom, virURIPtr uri,
  cleanup:
 return ret;
 }
+
+int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int count)
+{
+vzDomObjPtr privdom = dom->privateData;
+PRL_HANDLE job;
+PRL_RESULT pret;
+
+job = PrlVm_BeginEdit(privdom->sdkdom);
+if (PRL_FAILED(waitDomainJob(job, dom)))
+goto error;
+
+pret = PrlVmCfg_SetCpuCount(privdom->sdkdom, count);
+prlsdkCheckRetGoto(pret, error);
+
+job = PrlVm_CommitEx(privdom->sdkdom, 0);
+if (PRL_FAILED(waitDomainJob(job, dom)))
+goto error;
+
+return 0;
+
+ error:
+return -1;
+}
diff --git a/src/vz/vz_sdk.h b/src/vz/vz_sdk.h
index f8da2ad..100a5e3 100644
--- a/src/vz/vz_sdk.h
+++ b/src/vz/vz_sdk.h
@@ -71,6 +71,7 @@ int
 prlsdkGetMemoryStats(PRL_HANDLE sdkstas, virDomainMemoryStatPtr stats, 
unsigned int nr_stats);
 /* memsize is in MiB */
 int prlsdkSetMemsize(virDomainObjPtr dom, unsigned int memsize);
+int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int count);
 int
 prlsdkDomainSetUserPassword(virDomainObjPtr dom,
 const char *user,
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 2/2] vz: support virDomainBlockResize

2017-04-14 Thread Konstantin Neumoin
Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/vz/vz_driver.c | 58 ++
 src/vz/vz_sdk.c| 37 ++
 src/vz/vz_sdk.h|  1 +
 3 files changed, 96 insertions(+)

diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index ed7132f..24156ca 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -3945,6 +3945,63 @@ static int vzDomainSetVcpus(virDomainPtr dom, unsigned 
int nvcpus)
 return vzDomainSetVcpusFlags(dom, nvcpus,
  VIR_DOMAIN_AFFECT_LIVE | 
VIR_DOMAIN_AFFECT_CONFIG);
 }
+static int
+vzDomainBlockResize(virDomainPtr domain,
+const char *path,
+unsigned long long size,
+unsigned int flags)
+{
+virDomainObjPtr dom = NULL;
+virDomainDiskDefPtr disk = NULL;
+int ret = -1;
+bool job = false;
+
+virCheckFlags(VIR_DOMAIN_BLOCK_RESIZE_BYTES, -1);
+
+if (!(dom = vzDomObjFromDomainRef(domain)))
+goto cleanup;
+
+if (virDomainSetVcpusFlagsEnsureACL(domain->conn, dom->def, flags) < 0)
+goto cleanup;
+
+if (path[0] == '\0') {
+virReportError(VIR_ERR_INVALID_ARG,
+   "%s", _("empty path"));
+goto cleanup;
+}
+
+/* sdk wants Mb */
+if (flags & VIR_DOMAIN_BLOCK_RESIZE_BYTES)
+size /= 1024;
+size /= 1024;
+
+if (!(disk = virDomainDiskByName(dom->def, path, false))) {
+virReportError(VIR_ERR_INVALID_ARG,
+   _("invalid path: %s"), path);
+goto cleanup;
+}
+
+if (vzDomainObjBeginJob(dom) < 0)
+goto cleanup;
+job = true;
+
+if (vzEnsureDomainExists(dom) < 0)
+goto cleanup;
+
+if (!virDomainObjIsActive(dom)) {
+virReportError(VIR_ERR_OPERATION_INVALID,
+   "%s", _("domain is not running"));
+goto cleanup;
+}
+
+ret = prlsdkResizeImage(dom, disk, size);
+
+ cleanup:
+if (job)
+vzDomainObjEndJob(dom);
+virDomainObjEndAPI();
+return ret;
+}
 
 static virHypervisorDriver vzHypervisorDriver = {
 .name = "vz",
@@ -4046,6 +4103,7 @@ static virHypervisorDriver vzHypervisorDriver = {
 .connectGetAllDomainStats = vzConnectGetAllDomainStats, /* 3.1.0 */
 .domainAbortJob = vzDomainAbortJob, /* 3.1.0 */
 .domainReset = vzDomainReset, /* 3.1.0 */
+.domainBlockResize = vzDomainBlockResize, /* 3.3.0 */
 };
 
 static virConnectDriver vzConnectDriver = {
diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index 2daa44a..4d2c6b0 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -4925,3 +4925,40 @@ int prlsdkSetCpuCount(virDomainObjPtr dom, unsigned int 
count)
  error:
 return -1;
 }
+
+int prlsdkResizeImage(virDomainObjPtr dom, virDomainDiskDefPtr disk,
+  unsigned long long newsize)
+{
+int ret = -1;
+PRL_RESULT pret;
+vzDomObjPtr privdom = dom->privateData;
+PRL_UINT32 emulatedType;
+PRL_HANDLE job = PRL_INVALID_HANDLE;
+PRL_HANDLE prldisk = PRL_INVALID_HANDLE;
+
+prldisk = prlsdkGetDisk(privdom->sdkdom, disk);
+if (prldisk == PRL_INVALID_HANDLE)
+goto cleanup;
+
+pret = PrlVmDev_GetEmulatedType(prldisk, );
+prlsdkCheckRetGoto(pret, cleanup);
+
+if (emulatedType != PDT_USE_IMAGE_FILE &&
+emulatedType != PDT_USE_FILE_SYSTEM) {
+virReportError(VIR_ERR_INVALID_ARG, "%s",
+   _("Only disk image supported for resize"));
+goto cleanup;
+}
+
+job = PrlVmDev_ResizeImage(prldisk, newsize,
+   PRIF_RESIZE_LAST_PARTITION);
+if (PRL_FAILED(waitJob(job)))
+goto cleanup;
+
+ret = 0;
+
+ cleanup:
+
+PrlHandle_Free(prldisk);
+return ret;
+}
diff --git a/src/vz/vz_sdk.h b/src/vz/vz_sdk.h
index 100a5e3..0a77431 100644
--- a/src/vz/vz_sdk.h
+++ b/src/vz/vz_sdk.h
@@ -90,3 +90,4 @@ prlsdkMigrate(virDomainObjPtr dom,
 PRL_HANDLE
 prlsdkSdkDomainLookupByName(vzDriverPtr driver, const char *name);
 int prlsdkCancelJob(virDomainObjPtr dom);
+int prlsdkResizeImage(virDomainObjPtr dom, virDomainDiskDefPtr disk, unsigned 
long long newsize);
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v3 python 1/2] move cpumap conversion code to a common helper

2016-11-03 Thread Konstantin Neumoin
All libvirt_virDomainPin* functions do the same thing for convert
pycpumap to cpumap, so this patch moves all common logic to new
helper - virPyCpumapConvert.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 libvirt-override.c | 131 +
 libvirt-utils.c|  57 +++
 libvirt-utils.h|   5 ++
 3 files changed, 73 insertions(+), 120 deletions(-)

diff --git a/libvirt-override.c b/libvirt-override.c
index fa3e2ca..be299d4 100644
--- a/libvirt-override.c
+++ b/libvirt-override.c
@@ -1302,8 +1302,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED,
 PyObject *pyobj_domain, *pycpumap;
 PyObject *ret = NULL;
 unsigned char *cpumap;
-int cpumaplen, vcpu, tuple_size, cpunum;
-size_t i;
+int cpumaplen, vcpu, cpunum;
 int i_retval;
 
 if (!PyArg_ParseTuple(args, (char *)"OiO:virDomainPinVcpu",
@@ -1314,34 +1313,8 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-if (PyTuple_Check(pycpumap)) {
-tuple_size = PyTuple_Size(pycpumap);
-if (tuple_size == -1)
-return ret;
-} else {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return ret;
-}
-
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0)
-goto cleanup;
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
+if (virPyCpumapConvert(cpunum, pycpumap, , ) < 0)
+return NULL;
 
 LIBVIRT_BEGIN_ALLOW_THREADS;
 i_retval = virDomainPinVcpu(domain, vcpu, cpumap, cpumaplen);
@@ -1366,8 +1339,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self 
ATTRIBUTE_UNUSED,
 PyObject *pyobj_domain, *pycpumap;
 PyObject *ret = NULL;
 unsigned char *cpumap;
-int cpumaplen, vcpu, tuple_size, cpunum;
-size_t i;
+int cpumaplen, vcpu, cpunum;
 unsigned int flags;
 int i_retval;
 
@@ -1379,34 +1351,8 @@ libvirt_virDomainPinVcpuFlags(PyObject *self 
ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-if (PyTuple_Check(pycpumap)) {
-tuple_size = PyTuple_Size(pycpumap);
-if (tuple_size == -1)
-return ret;
-} else {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return ret;
-}
-
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0)
-goto cleanup;
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
+if (virPyCpumapConvert(cpunum, pycpumap, , ) < 0)
+return NULL;
 
 LIBVIRT_BEGIN_ALLOW_THREADS;
 i_retval = virDomainPinVcpuFlags(domain, vcpu, cpumap, cpumaplen, flags);
@@ -1505,8 +1451,7 @@ libvirt_virDomainPinEmulator(PyObject *self 
ATTRIBUTE_UNUSED,
 virDomainPtr domain;
 PyObject *pyobj_domain, *pycpumap;
 unsigned char *cpumap = NULL;
-int cpumaplen, tuple_size, cpunum;
-size_t i;
+int cpumaplen, cpunum;
 int i_retval;
 unsigned int flags;
 
@@ -1519,37 +1464,9 @@ libvirt_virDomainPinEmulator(PyObject *self 
ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-
-if (!PyTuple_Check(pycpumap)) {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return NULL;
-}
-
-if ((tuple_size = PyTuple_Size(pycpumap)) == -1)
+if (virPyCpumapConvert(cpunum, pycpumap, , ) < 0)
 return NULL;
 
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0) {
-VIR_FREE(cpumap);
-return NULL;
-}
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
-
 LIBVIRT_BEGIN_ALLOW_THREADS;
  

[libvirt] [PATCH v3 python 2/2] don't overrun buffer when converting cpumap

2016-11-03 Thread Konstantin Neumoin
If we pass large(more than cpunum) cpu mask to any libvirt_virDomainPin*
function, it could leads to crash. So we have to check tuple size in
virPyCpumapConvert and ignore extra tuple members.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 libvirt-utils.c | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/libvirt-utils.c b/libvirt-utils.c
index 09cc1c3..ac3606b 100644
--- a/libvirt-utils.c
+++ b/libvirt-utils.c
@@ -623,7 +623,15 @@ virPyCpumapConvert(int cpunum,
 return -1;
 }
 
-for (i = 0; i < tuple_size; i++) {
+/* Not presented elements of the tuple will be filled by zeros.
+ * Only first "cpunum" elements make sense, so the rest
+ * of the bits from the tuple will be ignored. */
+for (i = 0; i < cpunum; i++) {
+if (i >= tuple_size) {
+VIR_UNUSE_CPU(*cpumapptr, i);
+continue;
+}
+
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
@@ -638,8 +646,5 @@ virPyCpumapConvert(int cpunum,
 VIR_UNUSE_CPU(*cpumapptr, i);
 }
 
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(*cpumapptr, i);
-
 return 0;
 }
-- 
2.5.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v3 python 0/2] fix crash in libvirt_virDomainPin*

2016-11-03 Thread Konstantin Neumoin
this small patch set:
* move common logic of all libvirt_virDomainPin* functions to new helper
 in util module.
* fix overrun buffer when converting cpumap.

Changes since v2:
- fix comments for new helper function
- remove extra variables and labels
- fix commit message/subject
- rename new helper function

Changes since v1:
- add new helper in util module.

Konstantin Neumoin (2):
  move cpumap conversion code to a common helper
  don't overrun buffer when converting cpumap

 libvirt-override.c | 131 +
 libvirt-utils.c|  62 +
 libvirt-utils.h|   5 ++
 3 files changed, 78 insertions(+), 120 deletions(-)

-- 
2.5.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 python 2/2] add check for pycpumap length

2016-10-30 Thread Konstantin Neumoin
If we pass large(more than cpunum) cpu mask to any libvirt_virDomainPin*
function, it could leads to crash. So we have to check tuple size in
virPyCpuMapToChar and ignore extra tuple members.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 libvirt-utils.c | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/libvirt-utils.c b/libvirt-utils.c
index aaf4bea..3fc0fdd 100644
--- a/libvirt-utils.c
+++ b/libvirt-utils.c
@@ -589,7 +589,8 @@ virPyDictToTypedParams(PyObject *dict,
 
 
 /* virPyCpuMapToChar
- * @cpunum: the number of cpus
+ * @cpunum: the number of cpus, only this first elements make sense,
+ * so others will be ignored(filled by zeros).
  * @pycpumap: source Py cpu map
  * @cpumapptr: destination cpu map
  * @cpumaplen: destination cpu map length
@@ -604,7 +605,7 @@ virPyCpuMapToChar(int cpunum,
   unsigned char **cpumapptr,
   int *cpumaplen)
 {
-int tuple_size;
+int tuple_size, rel_cpumaplen;
 size_t i;
 int i_retval = -1;
 *cpumapptr = NULL;
@@ -624,7 +625,9 @@ virPyCpuMapToChar(int cpunum,
 goto exit;
 }
 
-for (i = 0; i < tuple_size; i++) {
+rel_cpumaplen = MIN(cpunum, tuple_size);
+
+for (i = 0; i < rel_cpumaplen; i++) {
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
-- 
2.5.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 python 1/2] minor clean-up for libvirt_virDomainPin*

2016-10-28 Thread Konstantin Neumoin
All libvirt_virDomainPin* functions do the same thing for convert
pycpumap to cpumap, so this patch moves all common logic to new
helper - virPyCpuMapToChar.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 libvirt-override.c | 131 +
 libvirt-utils.c|  60 
 libvirt-utils.h|   5 ++
 3 files changed, 76 insertions(+), 120 deletions(-)

diff --git a/libvirt-override.c b/libvirt-override.c
index fa3e2ca..ba0d87c 100644
--- a/libvirt-override.c
+++ b/libvirt-override.c
@@ -1302,8 +1302,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED,
 PyObject *pyobj_domain, *pycpumap;
 PyObject *ret = NULL;
 unsigned char *cpumap;
-int cpumaplen, vcpu, tuple_size, cpunum;
-size_t i;
+int cpumaplen, vcpu, cpunum;
 int i_retval;
 
 if (!PyArg_ParseTuple(args, (char *)"OiO:virDomainPinVcpu",
@@ -1314,34 +1313,8 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-if (PyTuple_Check(pycpumap)) {
-tuple_size = PyTuple_Size(pycpumap);
-if (tuple_size == -1)
-return ret;
-} else {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return ret;
-}
-
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0)
-goto cleanup;
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
+if (virPyCpuMapToChar(cpunum, pycpumap, , ) < 0)
+return NULL;
 
 LIBVIRT_BEGIN_ALLOW_THREADS;
 i_retval = virDomainPinVcpu(domain, vcpu, cpumap, cpumaplen);
@@ -1366,8 +1339,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self 
ATTRIBUTE_UNUSED,
 PyObject *pyobj_domain, *pycpumap;
 PyObject *ret = NULL;
 unsigned char *cpumap;
-int cpumaplen, vcpu, tuple_size, cpunum;
-size_t i;
+int cpumaplen, vcpu, cpunum;
 unsigned int flags;
 int i_retval;
 
@@ -1379,34 +1351,8 @@ libvirt_virDomainPinVcpuFlags(PyObject *self 
ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-if (PyTuple_Check(pycpumap)) {
-tuple_size = PyTuple_Size(pycpumap);
-if (tuple_size == -1)
-return ret;
-} else {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return ret;
-}
-
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0)
-goto cleanup;
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
+if (virPyCpuMapToChar(cpunum, pycpumap, , ) < 0)
+return NULL;
 
 LIBVIRT_BEGIN_ALLOW_THREADS;
 i_retval = virDomainPinVcpuFlags(domain, vcpu, cpumap, cpumaplen, flags);
@@ -1505,8 +1451,7 @@ libvirt_virDomainPinEmulator(PyObject *self 
ATTRIBUTE_UNUSED,
 virDomainPtr domain;
 PyObject *pyobj_domain, *pycpumap;
 unsigned char *cpumap = NULL;
-int cpumaplen, tuple_size, cpunum;
-size_t i;
+int cpumaplen, cpunum;
 int i_retval;
 unsigned int flags;
 
@@ -1519,37 +1464,9 @@ libvirt_virDomainPinEmulator(PyObject *self 
ATTRIBUTE_UNUSED,
 if ((cpunum = getPyNodeCPUCount(virDomainGetConnect(domain))) < 0)
 return VIR_PY_INT_FAIL;
 
-cpumaplen = VIR_CPU_MAPLEN(cpunum);
-
-if (!PyTuple_Check(pycpumap)) {
-PyErr_SetString(PyExc_TypeError, "Unexpected type, tuple is required");
-return NULL;
-}
-
-if ((tuple_size = PyTuple_Size(pycpumap)) == -1)
+if (virPyCpuMapToChar(cpunum, pycpumap, , ) < 0)
 return NULL;
 
-if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
-return PyErr_NoMemory();
-
-for (i = 0; i < tuple_size; i++) {
-PyObject *flag = PyTuple_GetItem(pycpumap, i);
-bool b;
-
-if (!flag || libvirt_boolUnwrap(flag, ) < 0) {
-VIR_FREE(cpumap);
-return NULL;
-}
-
-if (b)
-VIR_USE_CPU(cpumap, i);
-else
-VIR_UNUSE_CPU(cpumap, i);
-}
-
-for (; i < cpunum; i++)
-VIR_UNUSE_CPU(cpumap, i);
-
 LIBVIRT_BEGIN_ALLOW_THREADS;
  

[libvirt] [PATCH v2 python 0/2] fix crash in libvirt_virDomainPin*

2016-10-28 Thread Konstantin Neumoin
this small patch set:
* move common logic of all libvirt_virDomainPin* functions to new helper
 in util module.
* add check for pycpumap length.

Changes since v1:
- add new helper in util module.

Konstantin Neumoin (2):
  minor clean-up for libvirt_virDomainPin*
  add check for pycpumap length

 libvirt-override.c | 131 +
 libvirt-utils.c|  63 ++
 libvirt-utils.h|   5 ++
 3 files changed, 79 insertions(+), 120 deletions(-)

-- 
2.5.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH python 1/1] fix crash in libvirt_virDomainPin*

2016-10-25 Thread Konstantin Neumoin
If we pass large(more than cpunum) cpu mask to any libvirt_virDomainPin*
methods, it could leads to crash. So we have to check tuple size and
ignore extra tuple members.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 libvirt-override.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/libvirt-override.c b/libvirt-override.c
index fa3e2ca..83b760b 100644
--- a/libvirt-override.c
+++ b/libvirt-override.c
@@ -1327,7 +1327,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED,
 if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
 return PyErr_NoMemory();
 
-for (i = 0; i < tuple_size; i++) {
+for (i = 0; i < MIN(cpunum, tuple_size); i++) {
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
@@ -1392,7 +1392,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self 
ATTRIBUTE_UNUSED,
 if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
 return PyErr_NoMemory();
 
-for (i = 0; i < tuple_size; i++) {
+for (i = 0; i < MIN(cpunum, tuple_size); i++) {
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
@@ -1532,7 +1532,7 @@ libvirt_virDomainPinEmulator(PyObject *self 
ATTRIBUTE_UNUSED,
 if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
 return PyErr_NoMemory();
 
-for (i = 0; i < tuple_size; i++) {
+for (i = 0; i < MIN(cpunum, tuple_size); i++) {
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
@@ -1738,7 +1738,7 @@ libvirt_virDomainPinIOThread(PyObject *self 
ATTRIBUTE_UNUSED,
 if (VIR_ALLOC_N(cpumap, cpumaplen) < 0)
 return PyErr_NoMemory();
 
-for (i = 0; i < tuple_size; i++) {
+for (i = 0; i < MIN(cpunum, tuple_size); i++) {
 PyObject *flag = PyTuple_GetItem(pycpumap, i);
 bool b;
 
-- 
2.5.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/1] reset vcpu pin info from config for zero mask

2016-10-19 Thread Konstantin Neumoin
The option for removing vcpu pinning information from config was added
in:
'7ea9778 vcpupin: add vcpupin resetting feature to qemu driver'
and removed in:
'a02a161 qemu: libxl: vcpupin: Don't reset pinning when pinning to all pcpus'
by some reasons.

So, for now there is no way to remove vcpu pinning from config.
This patch returns options for remove vcpu/emulator pinning settings
from both configs if zero mask(mask filled by zeros) was specified.

Signed-off-by: Konstantin Neumoin <kneum...@virtuozzo.com>
---
 src/qemu/qemu_driver.c | 74 +++---
 1 file changed, 52 insertions(+), 22 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index bec7a38..7aa64a4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4969,7 +4969,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
   virQEMUDriverConfigPtr cfg,
   virBitmapPtr cpumap)
 {
-virBitmapPtr tmpmap = NULL;
+virBitmapPtr effective_cpumap = NULL;
+virBitmapPtr allcpu_map = NULL;
 virDomainVcpuInfoPtr vcpuinfo;
 qemuDomainObjPrivatePtr priv = vm->privateData;
 virCgroupPtr cgroup_vcpu = NULL;
@@ -4980,6 +4981,7 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
 int eventNparams = 0;
 int eventMaxparams = 0;
 int ret = -1;
+int hostcpus = 0;
 
 if (!qemuDomainHasVcpuPids(vm)) {
 virReportError(VIR_ERR_OPERATION_INVALID,
@@ -4994,29 +4996,38 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
 goto cleanup;
 }
 
-if (!(tmpmap = virBitmapNewCopy(cpumap)))
-goto cleanup;
+if (vcpuinfo->online) {
+if (cpumap) {
+effective_cpumap = cpumap;
+} else if (def->cpumask) {
+effective_cpumap = def->cpumask;
+} else {
+if ((hostcpus = nodeGetCPUCount(NULL)) < 0)
+goto cleanup;
 
-if (!(str = virBitmapFormat(cpumap)))
-goto cleanup;
+if (!(allcpu_map = virBitmapNew(hostcpus)))
+goto cleanup;
+virBitmapSetAll(allcpu_map);
+effective_cpumap = allcpu_map;
+}
 
-if (vcpuinfo->online) {
 /* Configure the corresponding cpuset cgroup before set affinity. */
 if (virCgroupHasController(priv->cgroup, 
VIR_CGROUP_CONTROLLER_CPUSET)) {
 if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, vcpu,
false, _vcpu) < 0)
 goto cleanup;
-if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumap) < 0)
+if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, effective_cpumap) < 0)
 goto cleanup;
 }
 
-if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), cpumap) < 0)
+if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), 
effective_cpumap) < 0)
 goto cleanup;
 }
 
 virBitmapFree(vcpuinfo->cpumask);
-vcpuinfo->cpumask = tmpmap;
-tmpmap = NULL;
+vcpuinfo->cpumask = NULL;
+if (cpumap && !(vcpuinfo->cpumask = virBitmapNewCopy(cpumap)))
+goto cleanup;
 
 if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 
0)
 goto cleanup;
@@ -5026,6 +5037,9 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
 goto cleanup;
 }
 
+if (!(str = virBitmapFormat(effective_cpumap)))
+goto cleanup;
+
 if (virTypedParamsAddString(, ,
 , paramField, str) < 0)
 goto cleanup;
@@ -5035,7 +5049,7 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
 ret = 0;
 
  cleanup:
-virBitmapFree(tmpmap);
+virBitmapFree(allcpu_map);
 virCgroupFree(_vcpu);
 VIR_FREE(str);
 qemuDomainEventQueue(driver, event);
@@ -5089,9 +5103,8 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
 goto endjob;
 
 if (virBitmapIsAllClear(pcpumap)) {
-virReportError(VIR_ERR_INVALID_ARG, "%s",
-   _("Empty cpu list for pinning"));
-goto endjob;
+virBitmapFree(pcpumap);
+pcpumap = NULL;
 }
 
 if (def &&
@@ -5177,12 +5190,15 @@ qemuDomainPinEmulator(virDomainPtr dom,
 int ret = -1;
 qemuDomainObjPrivatePtr priv;
 virBitmapPtr pcpumap = NULL;
+virBitmapPtr allcpu_map = NULL;
+virBitmapPtr effective_pcpumap = NULL;
 virQEMUDriverConfigPtr cfg = NULL;
 virObjectEventPtr event = NULL;
 char *str = NULL;
 virTypedParameterPtr eventParams = NULL;
 int eventNparams = 0;
 int eventMaxparams = 0;
+int hostcpus = 0;
 
 virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
   VIR_DOMAIN_AFFECT_CONFIG, -1);
@@ -5207,18 +5223,31 @@ qemuDomainPinEmulator(virDomainPtr dom,
 goto endjob;
 
 if (virBitmapIsAllClear(pcpumap)) {
-virReportError(VIR_ERR_INVALID_ARG, "%s",
-