[Xen-devel] [PATCH v2 1/3] xenstore: setup xenstore stubdom console interface properly

2020-02-11 Thread Juergen Gross
In order to be able to get access to the console of Xenstore stubdom
we need an appropriate granttab entry. So call xc_dom_gnttab_init()
when constructing the domain and preset some information needed
for that function in the dom structure.

We need to create the event channel for the console, too. Do that and
store all necessary data locally.

Signed-off-by: Juergen Gross 
---
 tools/helpers/init-xenstore-domain.c | 22 ++
 1 file changed, 22 insertions(+)

diff --git a/tools/helpers/init-xenstore-domain.c 
b/tools/helpers/init-xenstore-domain.c
index adb8408b63..3a8ca64741 100644
--- a/tools/helpers/init-xenstore-domain.c
+++ b/tools/helpers/init-xenstore-domain.c
@@ -24,6 +24,7 @@ static char *param;
 static char *name = "Xenstore";
 static int memory;
 static int maxmem;
+static xc_evtchn_port_or_error_t console_evtchn;
 
 static struct option options[] = {
 { "kernel", 1, NULL, 'k' },
@@ -113,6 +114,12 @@ static int build(xc_interface *xch)
 fprintf(stderr, "xc_domain_setmaxmem failed\n");
 goto err;
 }
+console_evtchn = xc_evtchn_alloc_unbound(xch, domid, 0);
+if ( console_evtchn < 0 )
+{
+fprintf(stderr, "xc_evtchn_alloc_unbound failed\n");
+goto err;
+}
 rv = xc_domain_set_memmap_limit(xch, domid, limit_kb);
 if ( rv )
 {
@@ -133,6 +140,15 @@ static int build(xc_interface *xch)
 snprintf(cmdline, 512, "--event %d --internal-db", rv);
 
 dom = xc_dom_allocate(xch, cmdline, NULL);
+if ( !dom )
+{
+fprintf(stderr, "xc_dom_allocate failed\n");
+goto err;
+}
+dom->container_type = XC_DOM_PV_CONTAINER;
+dom->xenstore_domid = domid;
+dom->console_evtchn = console_evtchn;
+
 rv = xc_dom_kernel_file(dom, kernel);
 if ( rv )
 {
@@ -186,6 +202,12 @@ static int build(xc_interface *xch)
 fprintf(stderr, "xc_dom_boot_image failed\n");
 goto err;
 }
+rv = xc_dom_gnttab_init(dom);
+if ( rv )
+{
+fprintf(stderr, "xc_dom_gnttab_init failed\n");
+goto err;
+}
 
 rv = xc_domain_set_virq_handler(xch, domid, VIRQ_DOM_EXC);
 if ( rv )
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2 0/3] tools/xenstore

2020-02-11 Thread Juergen Gross
Some patches for Xenstore-stubdom which have been lying around in my
local tree for some time now.

Juergen Gross (3):
  xenstore: setup xenstore stubdom console interface properly
  xenstore: add console xenstore entries for xenstore stubdom
  xenstore: remove not applicable control commands in stubdom

 tools/helpers/init-xenstore-domain.c | 55 +++-
 tools/xenstore/xenstored_control.c   | 18 
 2 files changed, 72 insertions(+), 1 deletion(-)

-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2 3/3] xenstore: remove not applicable control commands in stubdom

2020-02-11 Thread Juergen Gross
When run in a stubdom environment Xenstore can't select a logfile or
emit memory statistics to a specific file.

So remove or modify those control commands accordingly.

Signed-off-by: Juergen Gross 
Acked-by: Andrew Cooper 
---
 tools/xenstore/xenstored_control.c | 18 ++
 1 file changed, 18 insertions(+)

diff --git a/tools/xenstore/xenstored_control.c 
b/tools/xenstore/xenstored_control.c
index e4b8aa95ab..8d48ab4820 100644
--- a/tools/xenstore/xenstored_control.c
+++ b/tools/xenstore/xenstored_control.c
@@ -61,6 +61,19 @@ static int do_control_log(void *ctx, struct connection *conn,
return 0;
 }
 
+#ifdef __MINIOS__
+static int do_control_memreport(void *ctx, struct connection *conn,
+   char **vec, int num)
+{
+   if (num)
+   return EINVAL;
+
+   talloc_report_full(NULL, stdout);
+
+   send_ack(conn, XS_CONTROL);
+   return 0;
+}
+#else
 static int do_control_logfile(void *ctx, struct connection *conn,
  char **vec, int num)
 {
@@ -114,6 +127,7 @@ static int do_control_memreport(void *ctx, struct 
connection *conn,
send_ack(conn, XS_CONTROL);
return 0;
 }
+#endif
 
 static int do_control_print(void *ctx, struct connection *conn,
char **vec, int num)
@@ -132,8 +146,12 @@ static int do_control_help(void *, struct connection *, 
char **, int);
 static struct cmd_s cmds[] = {
{ "check", do_control_check, "" },
{ "log", do_control_log, "on|off" },
+#ifdef __MINIOS__
+   { "memreport", do_control_memreport, "" },
+#else
{ "logfile", do_control_logfile, "" },
{ "memreport", do_control_memreport, "[]" },
+#endif
{ "print", do_control_print, "" },
{ "help", do_control_help, "" },
 };
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2 2/3] xenstore: add console xenstore entries for xenstore stubdom

2020-02-11 Thread Juergen Gross
In order to be able to connect to the console of Xenstore stubdom we
need to create the appropriate entries in Xenstore.

For the moment we don't support xenconsoled living in another domain
than dom0, as this information isn't available other then via
Xenstore which we are just setting up.

Signed-off-by: Juergen Gross 
Acked-by: Andrew Cooper 
---
 tools/helpers/init-xenstore-domain.c | 33 -
 1 file changed, 32 insertions(+), 1 deletion(-)

diff --git a/tools/helpers/init-xenstore-domain.c 
b/tools/helpers/init-xenstore-domain.c
index 3a8ca64741..4ce8299c3c 100644
--- a/tools/helpers/init-xenstore-domain.c
+++ b/tools/helpers/init-xenstore-domain.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "init-dom-json.h"
 #include "_paths.h"
@@ -24,6 +25,7 @@ static char *param;
 static char *name = "Xenstore";
 static int memory;
 static int maxmem;
+static xen_pfn_t console_mfn;
 static xc_evtchn_port_or_error_t console_evtchn;
 
 static struct option options[] = {
@@ -223,6 +225,7 @@ static int build(xc_interface *xch)
 }
 
 rv = 0;
+console_mfn = xc_dom_p2m(dom, dom->console_pfn);
 
 err:
 if ( dom )
@@ -321,6 +324,15 @@ static void do_xs_write(struct xs_handle *xsh, char *path, 
char *val)
 fprintf(stderr, "writing %s to xenstore failed.\n", path);
 }
 
+static void do_xs_write_dir_node(struct xs_handle *xsh, char *dir, char *node,
+ char *val)
+{
+char full_path[100];
+
+snprintf(full_path, 100, "%s/%s", dir, node);
+do_xs_write(xsh, full_path, val);
+}
+
 static void do_xs_write_dom(struct xs_handle *xsh, char *path, char *val)
 {
 char full_path[64];
@@ -334,7 +346,7 @@ int main(int argc, char** argv)
 int opt;
 xc_interface *xch;
 struct xs_handle *xsh;
-char buf[16];
+char buf[16], be_path[64], fe_path[64];
 int rv, fd;
 char *maxmem_str = NULL;
 
@@ -423,6 +435,25 @@ int main(int argc, char** argv)
 if (maxmem)
 snprintf(buf, 16, "%d", maxmem * 1024);
 do_xs_write_dom(xsh, "memory/static-max", buf);
+snprintf(be_path, 64, "/local/domain/0/backend/console/%d/0", domid);
+snprintf(fe_path, 64, "/local/domain/%d/console", domid);
+snprintf(buf, 16, "%d", domid);
+do_xs_write_dir_node(xsh, be_path, "frontend-id", buf);
+do_xs_write_dir_node(xsh, be_path, "frontend", fe_path);
+do_xs_write_dir_node(xsh, be_path, "online", "1");
+snprintf(buf, 16, "%d", XenbusStateInitialising);
+do_xs_write_dir_node(xsh, be_path, "state", buf);
+do_xs_write_dir_node(xsh, be_path, "protocol", "vt100");
+do_xs_write_dir_node(xsh, fe_path, "backend", be_path);
+do_xs_write_dir_node(xsh, fe_path, "backend-id", "0");
+do_xs_write_dir_node(xsh, fe_path, "limit", "1048576");
+do_xs_write_dir_node(xsh, fe_path, "type", "xenconsoled");
+do_xs_write_dir_node(xsh, fe_path, "output", "pty");
+do_xs_write_dir_node(xsh, fe_path, "tty", "");
+snprintf(buf, 16, "%d", console_evtchn);
+do_xs_write_dir_node(xsh, fe_path, "port", buf);
+snprintf(buf, 16, "%ld", console_mfn);
+do_xs_write_dir_node(xsh, fe_path, "ring-ref", buf);
 xs_close(xsh);
 
 fd = creat(XEN_RUN_DIR "/xenstored.pid", 0666);
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 2/3] xenstore: add console xenstore entries for xenstore stubdom

2020-02-11 Thread Jürgen Groß

On 11.02.20 21:25, Andrew Cooper wrote:

On 28/01/2020 14:28, Juergen Gross wrote:

In order to be able to connect to the console of Xenstore stubdom we
need to create the appropriate entries in Xenstore.

For the moment we don't support xenconsoled living in another domain
than dom0, as this information isn't available other then via
Xenstore which we are just setting up.


Ah - I see the observation here.



Signed-off-by: Juergen Gross 


Acked-by: Andrew Cooper 


---
  tools/helpers/init-xenstore-domain.c | 31 ++-
  1 file changed, 30 insertions(+), 1 deletion(-)

diff --git a/tools/helpers/init-xenstore-domain.c 
b/tools/helpers/init-xenstore-domain.c
index a312bc38b8..a81a15a4de 100644
--- a/tools/helpers/init-xenstore-domain.c
+++ b/tools/helpers/init-xenstore-domain.c
@@ -12,6 +12,7 @@
  #include 
  #include 
  #include 
+#include 
  
  #include "init-dom-json.h"

  #include "_paths.h"
@@ -312,6 +313,15 @@ static void do_xs_write(struct xs_handle *xsh, char *path, 
char *val)
  fprintf(stderr, "writing %s to xenstore failed.\n", path);
  }
  
+static void do_xs_write_dir_node(struct xs_handle *xsh, char *dir, char *node,

+ char *val)
+{
+char full_path[100];
+
+snprintf(full_path, 100, "%s/%s", dir, node);
+do_xs_write(xsh, full_path, val);
+}
+
  static void do_xs_write_dom(struct xs_handle *xsh, char *path, char *val)
  {
  char full_path[64];
@@ -325,7 +335,7 @@ int main(int argc, char** argv)
  int opt;
  xc_interface *xch;
  struct xs_handle *xsh;
-char buf[16];
+char buf[16], be_path[64], fe_path[64];
  int rv, fd;
  char *maxmem_str = NULL;
  
@@ -414,6 +424,25 @@ int main(int argc, char** argv)

  if (maxmem)
  snprintf(buf, 16, "%d", maxmem * 1024);
  do_xs_write_dom(xsh, "memory/static-max", buf);
+snprintf(be_path, 64, "/local/domain/0/backend/console/%d/0", domid);
+snprintf(fe_path, 64, "/local/domain/%d/console", domid);
+snprintf(buf, 16, "%d", domid);
+do_xs_write_dir_node(xsh, be_path, "frontend-id", buf);
+do_xs_write_dir_node(xsh, be_path, "frontend", fe_path);
+do_xs_write_dir_node(xsh, be_path, "online", "1");
+snprintf(buf, 16, "%d", XenbusStateInitialising);
+do_xs_write_dir_node(xsh, be_path, "state", buf);
+do_xs_write_dir_node(xsh, be_path, "protocol", "vt100");
+do_xs_write_dir_node(xsh, fe_path, "backend", be_path);
+do_xs_write_dir_node(xsh, fe_path, "backend-id", "0");
+do_xs_write_dir_node(xsh, fe_path, "limit", "1048576");
+do_xs_write_dir_node(xsh, fe_path, "type", "xenconsoled");
+do_xs_write_dir_node(xsh, fe_path, "output", "pty");
+do_xs_write_dir_node(xsh, fe_path, "tty", "");
+snprintf(buf, 16, "%d", console_evtchn);
+do_xs_write_dir_node(xsh, fe_path, "port", buf);
+snprintf(buf, 16, "%ld", console_mfn);
+do_xs_write_dir_node(xsh, fe_path, "ring-ref", buf);


Eww.  Why are pty/tty details in the protocol?  vt100, fine, but the
backend specifics about what it does with the data shouldn't matter to
the frontend.

I presume this is too engrained in legacy to fix?


I think cleaning this up is an orthogonal patch series.


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable-smoke test] 146918: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146918 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146918/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64-libvirt   6 libvirt-buildfail REGR. vs. 146882
 build-arm64-xsm   6 xen-buildfail REGR. vs. 146882
 build-armhf   6 xen-buildfail REGR. vs. 146882

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a

version targeted for testing:
 xen  1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
baseline version:
 xen  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c

Last test of basis   146882  2020-02-11 16:00:54 Z0 days
Testing same since   146893  2020-02-11 20:01:02 Z0 days4 attempts


People who touched revisions under test:
  Andrew Cooper 
  Ian Jackson 

jobs:
 build-arm64-xsm  fail
 build-amd64  pass
 build-armhf  fail
 build-amd64-libvirt  fail
 test-armhf-armhf-xl  blocked 
 test-arm64-arm64-xl-xsm  blocked 
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-amd64-libvirt blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.


commit 1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
Author: Andrew Cooper 
Date:   Wed Feb 5 15:25:21 2020 +

tools/libxl: Combine legacy CPUID handling logic

While we are in the process of overhauling boot time CPUID/MSR handling, the
existing logic is going to have to remain in roughly this form for backwards
compatibility.

Fold libxl__cpuid_apply_policy() and libxl__cpuid_set() together into a 
single
libxl__cpuid_legacy() to reduce the complexity for callers.

No functional change.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 

commit dacb80f9757c011161cec6609f39837c9ea8caa8
Author: Andrew Cooper 
Date:   Wed Jan 8 12:53:49 2020 +

tools/libxl: Remove libxl_cpuid_{set,apply_policy}() from the API

These functions should never have been exposed.  They don't have external
users, and can't usefully be used for several reasons.

Move libxl_cpuid_{set,apply_policy}() to being internal functions, and leave
an equivalent of the nop stubs in the API for caller compatibility.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 
(qemu changes not included)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 1/3] xenstore: setup xenstore stubdom console interface properly

2020-02-11 Thread Jürgen Groß

On 11.02.20 21:18, Andrew Cooper wrote:

On 28/01/2020 14:28, Juergen Gross wrote:

In order to be able to get access to the console of Xenstore stubdom
we need an appropriate granttab entry. So call xc_dom_gnttab_init()
when constructing the domain and preset some information needed
for that function in the dom structure.

We need to create the event channel for the console, too. Do that and
store all necessary data locally.

Signed-off-by: Juergen Gross 
---
  tools/helpers/init-xenstore-domain.c | 13 +
  1 file changed, 13 insertions(+)

diff --git a/tools/helpers/init-xenstore-domain.c 
b/tools/helpers/init-xenstore-domain.c
index adb8408b63..a312bc38b8 100644
--- a/tools/helpers/init-xenstore-domain.c
+++ b/tools/helpers/init-xenstore-domain.c
@@ -24,6 +24,8 @@ static char *param;
  static char *name = "Xenstore";
  static int memory;
  static int maxmem;
+static xen_pfn_t console_mfn;
+static unsigned int console_evtchn;
  
  static struct option options[] = {

  { "kernel", 1, NULL, 'k' },
@@ -113,6 +115,7 @@ static int build(xc_interface *xch)
  fprintf(stderr, "xc_domain_setmaxmem failed\n");
  goto err;
  }
+console_evtchn = xc_evtchn_alloc_unbound(xch, domid, 0);


Presumably some form of error checking?


Yes.



Also, while it is probably fine for now, I think this does highlight a
future issue.  What happens when xenconsoled is also a stubdomain?

I suspect we have a looming chicken problem, where the toolstack is
going to have to do some careful domid and plumbing to set up build both
stubdoms in tandem.


Hmm, I'd rather defer console setup in xenstore-stubdom then and do it
later via a XS_CONTROL message. This will even enable a restart of
console-stubdom.




  rv = xc_domain_set_memmap_limit(xch, domid, limit_kb);
  if ( rv )
  {
@@ -133,6 +136,9 @@ static int build(xc_interface *xch)
  snprintf(cmdline, 512, "--event %d --internal-db", rv);
  
  dom = xc_dom_allocate(xch, cmdline, NULL);


Any chance of some error handling, unlikely as it is to go wrong?


Okay.




+dom->container_type = XC_DOM_PV_CONTAINER;
+dom->xenstore_domid = domid;
+dom->console_evtchn = console_evtchn;


and a newline here?


Okay.




  rv = xc_dom_kernel_file(dom, kernel);
  if ( rv )
  {
@@ -186,6 +192,12 @@ static int build(xc_interface *xch)
  fprintf(stderr, "xc_dom_boot_image failed\n");
  goto err;
  }
+rv = xc_dom_gnttab_init(dom);
+if ( rv )
+{
+fprintf(stderr, "xc_dom_gnttab_init failed\n");
+goto err;
+}
  
  rv = xc_domain_set_virq_handler(xch, domid, VIRQ_DOM_EXC);

  if ( rv )
@@ -201,6 +213,7 @@ static int build(xc_interface *xch)
  }
  
  rv = 0;

+console_mfn = xc_dom_p2m(dom, dom->console_pfn);


This doesn't appear to be used.


Oh, the usage is in patch 2. Probably I should move this addition to
that patch then.


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [linux-5.4 test] 146876: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146876 linux-5.4 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146876/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 
146121
 test-amd64-i386-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 146121
 test-amd64-amd64-qemuu-nested-intel 17 debian-hvm-install/l1/l2 fail REGR. vs. 
146121

Regressions which are regarded as allowable (not blocking):
 test-amd64-amd64-xl-rtds 18 guest-localmigrate/x10   fail REGR. vs. 146121
 test-armhf-armhf-xl-rtds16 guest-start/debian.repeat fail REGR. vs. 146121

Tests which did not succeed, but are not blocking:
 test-amd64-i386-xl-pvshim12 guest-start  fail   never pass
 test-amd64-amd64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 13 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  13 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  13 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 11 migrate-support-check 
fail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 11 migrate-support-check 
fail never pass
 test-amd64-amd64-qemuu-nested-amd 17 debian-hvm-install/l1/l2  fail never pass
 test-arm64-arm64-xl-thunderx 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-win7-amd64 17 guest-stop fail never pass
 test-amd64-i386-xl-qemut-win7-amd64 17 guest-stop  fail never pass
 test-amd64-amd64-xl-qemut-win7-amd64 17 guest-stop fail never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64 17 guest-stop fail never pass
 test-armhf-armhf-xl-multivcpu 13 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 14 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-credit1  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 13 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  14 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-win7-amd64 17 guest-stop  fail never pass
 test-amd64-i386-xl-qemut-ws16-amd64 17 guest-stop  fail never pass
 test-arm64-arm64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-cubietruck 13 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 14 saverestore-support-checkfail never pass
 test-armhf-armhf-xl-rtds 13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-ws16-amd64 17 guest-stop  fail never pass
 test-armhf-armhf-xl-vhd  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-xl-qemut-ws16-amd64 17 guest-stop fail never pass

version targeted for testing:
 linuxd6591ea2dd1a44b1c72c5a3e3b6555d7585acdae
baseline version:
 linux

[Xen-devel] [qemu-mainline test] 146892: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146892 qemu-mainline real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146892/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-arm64   6 xen-buildfail REGR. vs. 144861
 build-arm64-xsm   6 xen-buildfail REGR. vs. 144861
 build-armhf   6 xen-buildfail REGR. vs. 144861
 build-amd64-xsm   6 xen-buildfail REGR. vs. 144861
 build-i386-xsm6 xen-buildfail REGR. vs. 144861
 build-amd64   6 xen-buildfail REGR. vs. 144861
 build-i3866 xen-buildfail REGR. vs. 144861

Tests which did not succeed, but are not blocking:
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1) blocked n/a
 build-armhf-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-arm64-arm64-xl-seattle   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit1   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 build-arm64-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl   1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-vhd   1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-arm64-arm64-xl-thunderx  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 

Re: [Xen-devel] Xen fails to resume on AMD Fam15h (and Fam17h?) because of CPUID mismatch

2020-02-11 Thread Claudia
February 11, 2020 9:09 PM, "Marek Marczykowski-Górecki" 
 wrote:

> On Tue, Feb 11, 2020 at 12:59:22PM +, Claudia wrote:
> 
>> February 10, 2020 12:14 PM, "Marek Marczykowski-Górecki" 
>>  wrote:
>> 
>> On Mon, Feb 10, 2020 at 11:17:34AM +, Andrew Cooper wrote:
>> 
>> On 10/02/2020 08:55, Jan Beulich wrote:
>> On 10.02.2020 00:06, Marek Marczykowski-Górecki wrote:
>> Hi,
>> 
>> Multiple Qubes users have reported issues with resuming from S3 on AMD
>> systems (Ryzen 2500U, Ryzen Pro 3700U, maybe more). The error message
>> is:
>> 
>> (XEN) CPU0: cap[ 1] is 7ed8320b (expected f6d8320b)
>> 
>> If I read it right, this is:
>> - OSXSAVE: 0 -> 1
>> - HYPERVISOR: 1 -> 0
>> 
>> Commenting out the panic on a failed recheck_cpu_features() in power.c
>> makes the system work after resume, reportedly stable. But that doesn't
>> sounds like a good idea generally.
>> 
>> Is this difference a Xen fault (some missing MSR / other register
>> restore on resume)? Or BIOS vendor / AMD, that could be worked around in
>> Xen?
>> The transition of the HYPERVISOR bit is definitely a Xen issue,
>> with Andrew having sent a patch already (iirc).
>> 
>> https://lore.kernel.org/xen-devel/20200127202121.2961-1-andrew.coop...@citrix.com
>> 
>> Code is correct. Commit message needs rework, including in light of
>> this discovery. (I may eventually split it into two patches.)
>> 
>> Claudia, do you want to test with this patch?
>> 
>> I'm getting hunk failed in domctl.c applying to R4.1 default repo (fc31, Xen 
>> 4.13). I'll see if I
>> can fix it but bear with me, I'm new at this.
>> 
>> Marek: Would you by any chance be willing to merge this into a test branch 
>> on your repo, so the
>> rest of us can pull it directly into qubes-builder? It'll take you a 
>> fraction of the time it'll
>> take me, plus then zachm and awokd and anyone else can pull it also.
> 
> Here is one for Xen 4.13:
> https://github.com/QubesOS/qubes-vmm-xen/pull/71
> builder.conf snippet for qubes-builder:
> 
> BRANCH_vmm_xen=xen-4.13-amd-suspend
> GIT_URL_vmm_xen=https://github.com/marmarek/qubes-vmm-xen
> 
> This is already v2 patch from the other thread.

Thanks! For anyone else trying this, I also had to add NO_CHECK=vmm-xen 
vmm-xen-stubdom-legacy, I guess because there are no tags on that branch. The 
RPMs built successfully, and I'll be able to test them as soon as I get the 
latest R4.1 build downloaded and installed (I'm currently running 4.0).

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [ovmf test] 146886: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146886 ovmf real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146886/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 
145767
 test-amd64-i386-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 145767

version targeted for testing:
 ovmf 69c135462d9ef28b59a065ad45dc0089d8dacdd0
baseline version:
 ovmf 70911f1f4aee0366b6122f2b90d367ec0f066beb

Last test of basis   145767  2020-01-08 00:39:09 Z   35 days
Failing since145774  2020-01-08 02:50:20 Z   35 days  121 attempts
Testing same since   146886  2020-02-11 17:12:02 Z0 days1 attempts


People who touched revisions under test:
  Aaron Li 
  Albecki, Mateusz 
  Amol N Sukerkar 
  Anthony PERARD 
  Antoine Coeur 
  Ard Biesheuvel 
  Ashish Singhal 
  Bob Feng 
  Bret Barkelew 
  Brian R Haug 
  Eric Dong 
  Fan, ZhijuX 
  Guo Dong 
  Hao A Wu 
  Heng Luo 
  Jason Voelz 
  Jeff Brasen 
  Jian J Wang 
  Kinney, Michael D 
  Krzysztof Koch 
  Laszlo Ersek 
  Leif Lindholm 
  Li, Aaron 
  Liming Gao 
  Liu, Zhiguang 
  Mateusz Albecki 
  Matthew Carlson 
  Michael D Kinney 
  Michael Kubacki 
  Pavana.K 
  Philippe Mathieu-Daud? 
  Philippe Mathieu-Daude 
  Philippe Mathieu-Daudé 
  Philippe Mathieu-Daudé 
  Pierre Gondois 
  Sean Brogan 
  Siyuan Fu 
  Siyuan, Fu 
  Steven 
  Steven Shi 
  Sudipto Paul 
  Vitaly Cheptsov 
  Vitaly Cheptsov via Groups.Io 
  Wei6 Xu 
  Xu, Wei6 
  Zhichao Gao 
  Zhiguang Liu 
  Zhiju.Fan 

jobs:
 build-amd64-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-xl-qemuu-ovmf-amd64 fail
 test-amd64-i386-xl-qemuu-ovmf-amd64  fail



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.

(No revision log; it would be 4795 lines long.)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable-smoke test] 146909: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146909 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146909/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64-libvirt   6 libvirt-buildfail REGR. vs. 146882
 build-arm64-xsm   6 xen-buildfail REGR. vs. 146882
 build-armhf   6 xen-buildfail REGR. vs. 146882

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a

version targeted for testing:
 xen  1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
baseline version:
 xen  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c

Last test of basis   146882  2020-02-11 16:00:54 Z0 days
Testing same since   146893  2020-02-11 20:01:02 Z0 days3 attempts


People who touched revisions under test:
  Andrew Cooper 
  Ian Jackson 

jobs:
 build-arm64-xsm  fail
 build-amd64  pass
 build-armhf  fail
 build-amd64-libvirt  fail
 test-armhf-armhf-xl  blocked 
 test-arm64-arm64-xl-xsm  blocked 
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-amd64-libvirt blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.


commit 1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
Author: Andrew Cooper 
Date:   Wed Feb 5 15:25:21 2020 +

tools/libxl: Combine legacy CPUID handling logic

While we are in the process of overhauling boot time CPUID/MSR handling, the
existing logic is going to have to remain in roughly this form for backwards
compatibility.

Fold libxl__cpuid_apply_policy() and libxl__cpuid_set() together into a 
single
libxl__cpuid_legacy() to reduce the complexity for callers.

No functional change.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 

commit dacb80f9757c011161cec6609f39837c9ea8caa8
Author: Andrew Cooper 
Date:   Wed Jan 8 12:53:49 2020 +

tools/libxl: Remove libxl_cpuid_{set,apply_policy}() from the API

These functions should never have been exposed.  They don't have external
users, and can't usefully be used for several reasons.

Move libxl_cpuid_{set,apply_policy}() to being internal functions, and leave
an equivalent of the nop stubs in the API for caller compatibility.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 
(qemu changes not included)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [linux-4.4 test] 146860: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146860 linux-4.4 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146860/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-i386-xsm6 xen-buildfail REGR. vs. 139698
 build-i3866 xen-buildfail REGR. vs. 139698
 build-amd64-xsm   6 xen-buildfail REGR. vs. 139698
 build-amd64   6 xen-buildfail REGR. vs. 139698

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-xl-rtds16 guest-start/debian.repeat fail REGR. vs. 139698

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-qemut-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 test-amd64-i386-examine   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-shadow 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-amd64-amd64-examine  1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemut-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) 

[Xen-devel] [linux-arm-xen test] 146859: tolerable all pass - PUSHED

2020-02-11 Thread osstest service owner
flight 146859 linux-arm-xen real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146859/

Failures :-/ but no regressions.

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-libvirt 14 saverestore-support-checkfail  like 134708
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-rtds 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-rtds 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-libvirt 13 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-libvirt-qcow2 12 migrate-support-checkfail never pass
 test-arm64-arm64-libvirt-qcow2 13 saverestore-support-checkfail never pass
 test-arm64-arm64-xl-credit2  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-multivcpu 13 migrate-support-checkfail  never pass
 test-arm64-arm64-xl-multivcpu 14 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-arndale  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 13 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 14 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-rtds 13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-cubietruck 13 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 14 saverestore-support-checkfail never pass

version targeted for testing:
 linuxa6c5dd1dbaffe4cc398d8454546ba9246b9a95c9
baseline version:
 linuxe64ac26749dc2c0f390caccd04274608ab31c8cf

Last test of basis   134832  2019-04-15 18:29:23 Z  302 days
Testing same since   146859  2020-02-11 11:11:57 Z0 days1 attempts


9850 people touched revisions under test,
not listing them all

jobs:
 build-arm64-xsm  pass
 build-arm64  pass
 build-armhf  pass
 build-arm64-libvirt  pass
 build-armhf-libvirt  pass
 build-arm64-pvopspass
 build-armhf-pvopspass
 test-arm64-arm64-xl  pass
 test-armhf-armhf-xl  pass
 test-arm64-arm64-libvirt-xsm pass
 test-arm64-arm64-xl-xsm  pass
 test-armhf-armhf-xl-arndale  pass
 test-arm64-arm64-xl-credit1  pass
 test-armhf-armhf-xl-credit1  pass
 

[Xen-devel] [linux-4.9 test] 146858: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146858 linux-4.9 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146858/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-i386-xl-xsm  20 guest-start/debian.repeat fail REGR. vs. 142947
 build-amd64   6 xen-buildfail REGR. vs. 142947

Tests which did not succeed, but are not blocking:
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-shadow 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1) blocked n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-examine   1 build-check(1)   blocked  n/a
 test-amd64-amd64-examine  1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1) blocked n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1) blocked n/a
 test-armhf-armhf-xl-rtds 16 guest-start/debian.repeatfail  like 142893
 test-amd64-i386-libvirt-xsm  13 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 11 migrate-support-check 
fail never pass
 test-armhf-armhf-xl-credit1  13 migrate-support-check

[Xen-devel] [xen-unstable-smoke test] 146899: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146899 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146899/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64-libvirt   6 libvirt-buildfail REGR. vs. 146882
 build-arm64-xsm   6 xen-buildfail REGR. vs. 146882
 build-armhf   6 xen-buildfail REGR. vs. 146882

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a

version targeted for testing:
 xen  1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
baseline version:
 xen  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c

Last test of basis   146882  2020-02-11 16:00:54 Z0 days
Testing same since   146893  2020-02-11 20:01:02 Z0 days2 attempts


People who touched revisions under test:
  Andrew Cooper 
  Ian Jackson 

jobs:
 build-arm64-xsm  fail
 build-amd64  pass
 build-armhf  fail
 build-amd64-libvirt  fail
 test-armhf-armhf-xl  blocked 
 test-arm64-arm64-xl-xsm  blocked 
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-amd64-libvirt blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.


commit 1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
Author: Andrew Cooper 
Date:   Wed Feb 5 15:25:21 2020 +

tools/libxl: Combine legacy CPUID handling logic

While we are in the process of overhauling boot time CPUID/MSR handling, the
existing logic is going to have to remain in roughly this form for backwards
compatibility.

Fold libxl__cpuid_apply_policy() and libxl__cpuid_set() together into a 
single
libxl__cpuid_legacy() to reduce the complexity for callers.

No functional change.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 

commit dacb80f9757c011161cec6609f39837c9ea8caa8
Author: Andrew Cooper 
Date:   Wed Jan 8 12:53:49 2020 +

tools/libxl: Remove libxl_cpuid_{set,apply_policy}() from the API

These functions should never have been exposed.  They don't have external
users, and can't usefully be used for several reasons.

Move libxl_cpuid_{set,apply_policy}() to being internal functions, and leave
an equivalent of the nop stubs in the API for caller compatibility.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 
(qemu changes not included)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [linux-4.14 test] 146857: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146857 linux-4.14 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146857/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-armhf-armhf-examine  8 reboot   fail REGR. vs. 142849
 test-armhf-armhf-xl   7 xen-boot fail REGR. vs. 142849
 test-armhf-armhf-xl-multivcpu  7 xen-bootfail REGR. vs. 142849
 test-armhf-armhf-xl-arndale   7 xen-boot fail REGR. vs. 142849
 test-armhf-armhf-xl-credit1   7 xen-boot fail REGR. vs. 142849
 test-armhf-armhf-xl-credit2   7 xen-boot fail REGR. vs. 142849
 test-armhf-armhf-libvirt  7 xen-boot fail REGR. vs. 142849
 test-armhf-armhf-xl-vhd   7 xen-boot fail REGR. vs. 142849
 build-i3866 xen-buildfail REGR. vs. 142849
 build-amd64   6 xen-buildfail REGR. vs. 142849
 build-amd64-xsm   6 xen-buildfail REGR. vs. 142849

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-examine   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1) blocked n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-amd64-examine  1 build-check(1)   

[Xen-devel] [linux-linus test] 146850: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146850 linux-linus real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146850/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-arm64-arm64-xl-credit1  12 guest-start  fail REGR. vs. 133580
 build-i386-pvops  6 kernel-build fail REGR. vs. 133580
 test-arm64-arm64-xl 16 guest-start/debian.repeat fail REGR. vs. 133580
 test-arm64-arm64-xl-credit2  12 guest-start  fail REGR. vs. 133580
 test-armhf-armhf-xl-multivcpu 12 guest-start fail REGR. vs. 133580
 test-armhf-armhf-xl-credit1  12 guest-start  fail REGR. vs. 133580
 test-armhf-armhf-xl-credit2  12 guest-start  fail REGR. vs. 133580
 test-arm64-arm64-xl-xsm  15 guest-stop   fail REGR. vs. 133580
 build-i386-xsm6 xen-buildfail REGR. vs. 133580
 build-amd64-xsm   6 xen-buildfail REGR. vs. 133580
 build-amd64   6 xen-buildfail REGR. vs. 133580

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-xl-rtds 12 guest-start  fail REGR. vs. 133580

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-examine  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-examine   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-xl-shadow 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-win7-amd64  1 build-check(1)

[Xen-devel] [linux-4.19 test] 146851: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146851 linux-4.19 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146851/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64-xsm   6 xen-buildfail REGR. vs. 142932

Tests which did not succeed, but are not blocking:
 test-amd64-i386-xl-qemut-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-amd64-xl-qemuu-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-ws16-amd64 17 guest-stop fail like 142932
 test-amd64-amd64-libvirt 13 migrate-support-checkfail   never pass
 test-amd64-i386-xl-pvshim12 guest-start  fail   never pass
 test-amd64-i386-libvirt  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 14 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 12 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 17 debian-hvm-install/l1/l2  fail never pass
 test-arm64-arm64-xl-credit2  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 13 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-credit2  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 14 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-rtds 13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 13 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 saverestore-support-checkfail   never pass
 test-amd64-amd64-xl-qemut-win7-amd64 17 guest-stop fail never pass
 test-amd64-i386-xl-qemut-win7-amd64 17 guest-stop  fail never pass
 test-armhf-armhf-xl-cubietruck 13 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 14 saverestore-support-checkfail never pass
 test-amd64-amd64-xl-qemuu-win7-amd64 17 guest-stop fail never pass
 test-amd64-i386-xl-qemuu-win7-amd64 17 guest-stop  fail never pass
 test-armhf-armhf-xl-vhd  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 saverestore-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64 17 guest-stop fail never pass
 test-amd64-i386-xl-qemuu-ws16-amd64 17 guest-stop  fail never pass
 test-amd64-amd64-xl-qemut-ws16-amd64 

[Xen-devel] [xen-unstable-smoke test] 146893: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146893 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146893/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64-libvirt   6 libvirt-buildfail REGR. vs. 146882
 build-arm64-xsm   6 xen-buildfail REGR. vs. 146882
 build-armhf   6 xen-buildfail REGR. vs. 146882

Tests which did not succeed, but are not blocking:
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a

version targeted for testing:
 xen  1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
baseline version:
 xen  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c

Last test of basis   146882  2020-02-11 16:00:54 Z0 days
Testing same since   146893  2020-02-11 20:01:02 Z0 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Ian Jackson 

jobs:
 build-arm64-xsm  fail
 build-amd64  pass
 build-armhf  fail
 build-amd64-libvirt  fail
 test-armhf-armhf-xl  blocked 
 test-arm64-arm64-xl-xsm  blocked 
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-amd64-libvirt blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.


commit 1b3cec69bf300e012a0269f0a4f28cca1ebf22c9
Author: Andrew Cooper 
Date:   Wed Feb 5 15:25:21 2020 +

tools/libxl: Combine legacy CPUID handling logic

While we are in the process of overhauling boot time CPUID/MSR handling, the
existing logic is going to have to remain in roughly this form for backwards
compatibility.

Fold libxl__cpuid_apply_policy() and libxl__cpuid_set() together into a 
single
libxl__cpuid_legacy() to reduce the complexity for callers.

No functional change.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 

commit dacb80f9757c011161cec6609f39837c9ea8caa8
Author: Andrew Cooper 
Date:   Wed Jan 8 12:53:49 2020 +

tools/libxl: Remove libxl_cpuid_{set,apply_policy}() from the API

These functions should never have been exposed.  They don't have external
users, and can't usefully be used for several reasons.

Move libxl_cpuid_{set,apply_policy}() to being internal functions, and leave
an equivalent of the nop stubs in the API for caller compatibility.

Signed-off-by: Andrew Cooper 
Acked-by: Ian Jackson 
(qemu changes not included)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable test] 146848: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146848 xen-unstable real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146848/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-i3866 xen-buildfail REGR. vs. 146796
 build-amd64   6 xen-build  fail in 146839 REGR. vs. 146787

Tests which are failing intermittently (not blocking):
 test-armhf-armhf-libvirt-raw  7 xen-boot   fail pass in 146839

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)  blocked in 146839 n/a
 test-xtf-amd64-amd64-21 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64 1 build-check(1) blocked in 146839 
n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)  blocked in 146839 n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1)   blocked in 146839 n/a
 test-xtf-amd64-amd64-41 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)blocked in 146839 n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
in 146839 n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)  blocked in 146839 n/a
 test-xtf-amd64-amd64-11 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-migrupgrade  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)blocked in 146839 n/a
 test-amd64-amd64-xl-rtds  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1)   blocked in 146839 n/a
 test-xtf-amd64-amd64-51 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)  blocked in 146839 n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow 1 build-check(1) blocked in 
146839 n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked in 
146839 n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked in 146839 n/a
 test-xtf-amd64-amd64-31 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-livepatch1 build-check(1)   blocked in 146839 n/a
 build-amd64-libvirt   1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)  blocked in 146839 n/a
 test-amd64-amd64-examine  1 build-check(1)   blocked in 146839 n/a
 test-amd64-amd64-xl-qemut-debianhvm-amd64 1 build-check(1) blocked in 146839 
n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-i386-livepatch 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-migrupgrade   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemut-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 

Re: [Xen-devel] Xen fails to resume on AMD Fam15h (and Fam17h?) because of CPUID mismatch

2020-02-11 Thread Marek Marczykowski-Górecki
On Tue, Feb 11, 2020 at 12:59:22PM +, Claudia wrote:
> February 10, 2020 12:14 PM, "Marek Marczykowski-Górecki" 
>  wrote:
> 
> > On Mon, Feb 10, 2020 at 11:17:34AM +, Andrew Cooper wrote:
> > 
> >> On 10/02/2020 08:55, Jan Beulich wrote:
> >> On 10.02.2020 00:06, Marek Marczykowski-Górecki wrote:
> >> Hi,
> >> 
> >> Multiple Qubes users have reported issues with resuming from S3 on AMD
> >> systems (Ryzen 2500U, Ryzen Pro 3700U, maybe more). The error message
> >> is:
> >> 
> >> (XEN) CPU0: cap[ 1] is 7ed8320b (expected f6d8320b)
> >> 
> >> If I read it right, this is:
> >> - OSXSAVE: 0 -> 1
> >> - HYPERVISOR: 1 -> 0
> >> 
> >> Commenting out the panic on a failed recheck_cpu_features() in power.c
> >> makes the system work after resume, reportedly stable. But that doesn't
> >> sounds like a good idea generally.
> >> 
> >> Is this difference a Xen fault (some missing MSR / other register
> >> restore on resume)? Or BIOS vendor / AMD, that could be worked around in
> >> Xen?
> >> The transition of the HYPERVISOR bit is definitely a Xen issue,
> >> with Andrew having sent a patch already (iirc).
> >> 
> >> https://lore.kernel.org/xen-devel/20200127202121.2961-1-andrew.coop...@citrix.com
> >> 
> >> Code is correct.  Commit message needs rework, including in light of
> >> this discovery.  (I may eventually split it into two patches.)
> > 
> > Claudia, do you want to test with this patch?
> 
> I'm getting hunk failed in domctl.c applying to R4.1 default repo (fc31, Xen 
> 4.13). I'll see if I can fix it but bear with me, I'm new at this.
> 
> Marek: Would you by any chance be willing to merge this into a test branch on 
> your repo, so the rest of us can pull it directly into qubes-builder? It'll 
> take you a fraction of the time it'll take me, plus then zachm and awokd and 
> anyone else can pull it also.

Here is one for Xen 4.13:
https://github.com/QubesOS/qubes-vmm-xen/pull/71
builder.conf snippet for qubes-builder:

BRANCH_vmm_xen=xen-4.13-amd-suspend
GIT_URL_vmm_xen=https://github.com/marmarek/qubes-vmm-xen

This is already v2 patch from the other thread.

-- 
Best Regards,
Marek Marczykowski-Górecki
Invisible Things Lab
A: Because it messes up the order in which people normally read text.
Q: Why is top-posting such a bad thing?


signature.asc
Description: PGP signature
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 3/3] xenstore: remove not applicable control commands in stubdom

2020-02-11 Thread Andrew Cooper
On 28/01/2020 14:28, Juergen Gross wrote:
> When run in a stubdom environment Xenstore can't select a logfile or
> emit memory statistics to a specific file.
>
> So remove or modify those control commands accordingly.
>
> Signed-off-by: Juergen Gross 

Acked-by: Andrew Cooper 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 2/3] xenstore: add console xenstore entries for xenstore stubdom

2020-02-11 Thread Andrew Cooper
On 28/01/2020 14:28, Juergen Gross wrote:
> In order to be able to connect to the console of Xenstore stubdom we
> need to create the appropriate entries in Xenstore.
>
> For the moment we don't support xenconsoled living in another domain
> than dom0, as this information isn't available other then via
> Xenstore which we are just setting up.

Ah - I see the observation here.

>
> Signed-off-by: Juergen Gross 

Acked-by: Andrew Cooper 

> ---
>  tools/helpers/init-xenstore-domain.c | 31 ++-
>  1 file changed, 30 insertions(+), 1 deletion(-)
>
> diff --git a/tools/helpers/init-xenstore-domain.c 
> b/tools/helpers/init-xenstore-domain.c
> index a312bc38b8..a81a15a4de 100644
> --- a/tools/helpers/init-xenstore-domain.c
> +++ b/tools/helpers/init-xenstore-domain.c
> @@ -12,6 +12,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include "init-dom-json.h"
>  #include "_paths.h"
> @@ -312,6 +313,15 @@ static void do_xs_write(struct xs_handle *xsh, char 
> *path, char *val)
>  fprintf(stderr, "writing %s to xenstore failed.\n", path);
>  }
>  
> +static void do_xs_write_dir_node(struct xs_handle *xsh, char *dir, char 
> *node,
> + char *val)
> +{
> +char full_path[100];
> +
> +snprintf(full_path, 100, "%s/%s", dir, node);
> +do_xs_write(xsh, full_path, val);
> +}
> +
>  static void do_xs_write_dom(struct xs_handle *xsh, char *path, char *val)
>  {
>  char full_path[64];
> @@ -325,7 +335,7 @@ int main(int argc, char** argv)
>  int opt;
>  xc_interface *xch;
>  struct xs_handle *xsh;
> -char buf[16];
> +char buf[16], be_path[64], fe_path[64];
>  int rv, fd;
>  char *maxmem_str = NULL;
>  
> @@ -414,6 +424,25 @@ int main(int argc, char** argv)
>  if (maxmem)
>  snprintf(buf, 16, "%d", maxmem * 1024);
>  do_xs_write_dom(xsh, "memory/static-max", buf);
> +snprintf(be_path, 64, "/local/domain/0/backend/console/%d/0", domid);
> +snprintf(fe_path, 64, "/local/domain/%d/console", domid);
> +snprintf(buf, 16, "%d", domid);
> +do_xs_write_dir_node(xsh, be_path, "frontend-id", buf);
> +do_xs_write_dir_node(xsh, be_path, "frontend", fe_path);
> +do_xs_write_dir_node(xsh, be_path, "online", "1");
> +snprintf(buf, 16, "%d", XenbusStateInitialising);
> +do_xs_write_dir_node(xsh, be_path, "state", buf);
> +do_xs_write_dir_node(xsh, be_path, "protocol", "vt100");
> +do_xs_write_dir_node(xsh, fe_path, "backend", be_path);
> +do_xs_write_dir_node(xsh, fe_path, "backend-id", "0");
> +do_xs_write_dir_node(xsh, fe_path, "limit", "1048576");
> +do_xs_write_dir_node(xsh, fe_path, "type", "xenconsoled");
> +do_xs_write_dir_node(xsh, fe_path, "output", "pty");
> +do_xs_write_dir_node(xsh, fe_path, "tty", "");
> +snprintf(buf, 16, "%d", console_evtchn);
> +do_xs_write_dir_node(xsh, fe_path, "port", buf);
> +snprintf(buf, 16, "%ld", console_mfn);
> +do_xs_write_dir_node(xsh, fe_path, "ring-ref", buf);

Eww.  Why are pty/tty details in the protocol?  vt100, fine, but the
backend specifics about what it does with the data shouldn't matter to
the frontend.

I presume this is too engrained in legacy to fix?

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 1/3] xenstore: setup xenstore stubdom console interface properly

2020-02-11 Thread Andrew Cooper
On 28/01/2020 14:28, Juergen Gross wrote:
> In order to be able to get access to the console of Xenstore stubdom
> we need an appropriate granttab entry. So call xc_dom_gnttab_init()
> when constructing the domain and preset some information needed
> for that function in the dom structure.
>
> We need to create the event channel for the console, too. Do that and
> store all necessary data locally.
>
> Signed-off-by: Juergen Gross 
> ---
>  tools/helpers/init-xenstore-domain.c | 13 +
>  1 file changed, 13 insertions(+)
>
> diff --git a/tools/helpers/init-xenstore-domain.c 
> b/tools/helpers/init-xenstore-domain.c
> index adb8408b63..a312bc38b8 100644
> --- a/tools/helpers/init-xenstore-domain.c
> +++ b/tools/helpers/init-xenstore-domain.c
> @@ -24,6 +24,8 @@ static char *param;
>  static char *name = "Xenstore";
>  static int memory;
>  static int maxmem;
> +static xen_pfn_t console_mfn;
> +static unsigned int console_evtchn;
>  
>  static struct option options[] = {
>  { "kernel", 1, NULL, 'k' },
> @@ -113,6 +115,7 @@ static int build(xc_interface *xch)
>  fprintf(stderr, "xc_domain_setmaxmem failed\n");
>  goto err;
>  }
> +console_evtchn = xc_evtchn_alloc_unbound(xch, domid, 0);

Presumably some form of error checking?

Also, while it is probably fine for now, I think this does highlight a
future issue.  What happens when xenconsoled is also a stubdomain?

I suspect we have a looming chicken problem, where the toolstack is
going to have to do some careful domid and plumbing to set up build both
stubdoms in tandem.

>  rv = xc_domain_set_memmap_limit(xch, domid, limit_kb);
>  if ( rv )
>  {
> @@ -133,6 +136,9 @@ static int build(xc_interface *xch)
>  snprintf(cmdline, 512, "--event %d --internal-db", rv);
>  
>  dom = xc_dom_allocate(xch, cmdline, NULL);

Any chance of some error handling, unlikely as it is to go wrong?

> +dom->container_type = XC_DOM_PV_CONTAINER;
> +dom->xenstore_domid = domid;
> +dom->console_evtchn = console_evtchn;

and a newline here?

>  rv = xc_dom_kernel_file(dom, kernel);
>  if ( rv )
>  {
> @@ -186,6 +192,12 @@ static int build(xc_interface *xch)
>  fprintf(stderr, "xc_dom_boot_image failed\n");
>  goto err;
>  }
> +rv = xc_dom_gnttab_init(dom);
> +if ( rv )
> +{
> +fprintf(stderr, "xc_dom_gnttab_init failed\n");
> +goto err;
> +}
>  
>  rv = xc_domain_set_virq_handler(xch, domid, VIRQ_DOM_EXC);
>  if ( rv )
> @@ -201,6 +213,7 @@ static int build(xc_interface *xch)
>  }
>  
>  rv = 0;
> +console_mfn = xc_dom_p2m(dom, dom->console_pfn);

This doesn't appear to be used.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable-smoke test] 146882: tolerable all pass - PUSHED

2020-02-11 Thread osstest service owner
flight 146882 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146882/

Failures :-/ but no regressions.

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-libvirt 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass

version targeted for testing:
 xen  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c
baseline version:
 xen  3dd724dff085e13ad520f8e35aea717db2ff07d0

Last test of basis   146838  2020-02-10 22:00:35 Z0 days
Failing since146871  2020-02-11 12:01:04 Z0 days2 attempts
Testing same since   146882  2020-02-11 16:00:54 Z0 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Anthony PERARD 
  George Dunlap 
  Jan Beulich 
  Juergen Gross 
  Wei Liu 

jobs:
 build-arm64-xsm  pass
 build-amd64  pass
 build-armhf  pass
 build-amd64-libvirt  pass
 test-armhf-armhf-xl  pass
 test-arm64-arm64-xl-xsm  pass
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-amd64-libvirt pass



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Pushing revision :

To xenbits.xen.org:/home/xen/git/xen.git
   3dd724dff0..6c47c37b9b  6c47c37b9b40d6fe40bce8c8fd39135f6d549c8c -> smoke

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Igor Druzhinin
On 11/02/2020 18:11, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 05:29:09PM +, Igor Druzhinin wrote:
>> On 11/02/2020 16:42, Roger Pau Monné wrote:
>>> On Tue, Feb 11, 2020 at 04:29:36PM +, Igor Druzhinin wrote:
 Agree. But as I said I'm not aware of any guest that violates the
 invariant of decrease_reservation() being the last call.
>>>
>>> Maybe we could piggyback on whether a page is removed from the domain
>>> domheap and use that as a signal that the page should be ballooned
>>> out?
>>>
>>> There's already an arch_free_heap_page that's called when a page is
>>> removed from a domain, which might be suitable for this. It would
>>> however imply making an hypercall for every page to be ballooned out.
>>
>> I tested that - doesn't work - too many hypercalls make ballooning take
>> ages. This simply cannot be done on page-by-page basis.
> 
> Why not place them on a list (in arch_free_heap_page) and do the flush
> either after a timeout or when it gets to a certain number of
> elements?

How do you know that "certain number of elements"? How do you know what
timeout is just right? This all seems like it will introduce more problems
than solve.

What if instead we place them on a temp list in L1 decrease_reservation()
and then will just move to another list as soon as they are freed,
then pass the whole list to L0 as soon as the first list is empty?

Igor

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [qemu-mainline test] 146875: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146875 qemu-mainline real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146875/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-arm64   6 xen-buildfail REGR. vs. 144861
 build-arm64-xsm   6 xen-buildfail REGR. vs. 144861
 build-armhf   6 xen-buildfail REGR. vs. 144861
 build-amd64-xsm   6 xen-buildfail REGR. vs. 144861
 build-i386-xsm6 xen-buildfail REGR. vs. 144861
 build-amd64   6 xen-buildfail REGR. vs. 144861
 build-i3866 xen-buildfail REGR. vs. 144861

Tests which did not succeed, but are not blocking:
 build-arm64-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl   1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 build-armhf-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-armhf-armhf-xl-credit2   1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-shadow 1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit1   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-seattle   1 build-check(1)   blocked  n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-i386-xsm  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-arm64-arm64-xl-xsm   1 build-check(1) 

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Roger Pau Monné
On Tue, Feb 11, 2020 at 05:29:09PM +, Igor Druzhinin wrote:
> On 11/02/2020 16:42, Roger Pau Monné wrote:
> > On Tue, Feb 11, 2020 at 04:29:36PM +, Igor Druzhinin wrote:
> >> Agree. But as I said I'm not aware of any guest that violates the
> >> invariant of decrease_reservation() being the last call.
> > 
> > Maybe we could piggyback on whether a page is removed from the domain
> > domheap and use that as a signal that the page should be ballooned
> > out?
> > 
> > There's already an arch_free_heap_page that's called when a page is
> > removed from a domain, which might be suitable for this. It would
> > however imply making an hypercall for every page to be ballooned out.
> 
> I tested that - doesn't work - too many hypercalls make ballooning take
> ages. This simply cannot be done on page-by-page basis.

Why not place them on a list (in arch_free_heap_page) and do the flush
either after a timeout or when it gets to a certain number of
elements?

Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 6/6] xen/public: Obsolete HVM_PARAM_PAE_ENABLED

2020-02-11 Thread Andrew Cooper
On 11/02/2020 17:49, Ian Jackson wrote:
> Andrew Cooper writes ("[PATCH 6/6] xen/public: Obsolete 
> HVM_PARAM_PAE_ENABLED"):
>> HVM_PARAM_PAE_ENABLED is undocumented and Xen has never acted upon its value,
>> contrary perhaps to expectations based on how other boolean fields work.
>>
>> It was only ever used as a non-standard calling convention for
>> xc_cpuid_apply_policy() but that has been fixed now.
>>
>> Purge its use, and any possible confusion over its behaviour, by having Xen
>> reject any attempts to use it.  Forgo setting it up in libxl's
>> hvm_set_conf_params().  The only backwards compatibility necessary is to have
>> the HVM restore stream discard it if found.
> This looks plausible too.  But maybe I should be reading this patch
> and the previous one together ?  Or maybe they would be better
> squashed ?
>
> If you think that is likely to make me less confused I'm happy to try
> squashing them locally and reading the result...

I don't think that is going to help.  They are two logically different
changes.

Patch 5 fixes a libxl=>libxc api which has a (deliberate) side effect of
removing the sole use of HVM_PARAM_PAE_ENABLED.

This patch takes the final steps to remove HVM_PARAM_PAE_ENABLED from
use, everywhere.  This is partly to prevent ever regaining this knobble
on the CPUID handling side of things, and eventually to reduce memory
usage in Xen (by not allocating memory for obsolete params).

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 5/6] tools/libx[cl]: Don't use HVM_PARAM_PAE_ENABLED as a function parameter

2020-02-11 Thread Andrew Cooper
On 11/02/2020 17:47, Ian Jackson wrote:
> Andrew Cooper writes ("[PATCH 5/6] tools/libx[cl]: Don't use 
> HVM_PARAM_PAE_ENABLED as a function parameter"):
>> The sole use of HVM_PARAM_PAE_ENABLED is as a non-standard calling convention
>> for xc_cpuid_apply_policy().  Pass PAE as a regular parameter instead.
>>
>> Leave a rather better explaination of why only HVM guests have a choice in 
>> PAE
>> setting.
> I am inclined believe you that this is right (since you are evidently
> familiar with this whole area and I'm not), but the explanations leave
> me confused.
>
>>  int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid,
>> -  const uint32_t *featureset, unsigned int 
>> nr_features)
>> +  const uint32_t *featureset, unsigned int 
>> nr_features,
>> +  bool pae)
>>  {
>>  int rc;
>>  xc_dominfo_t di;
>> @@ -579,8 +580,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t 
>> domid,
>>  }
>>  else
>>  {
>> -uint64_t val;
>> -
>>  /*
>>   * Topology for HVM guests is entirely controlled by Xen.  For now, 
>> we
>>   * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
>> @@ -635,14 +634,10 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t 
>> domid,
>>  }
>>  
>>  /*
>> - * HVM_PARAM_PAE_ENABLED is a parameter to this function, stashed in
>> - * Xen.  Nothing else has ever taken notice of the value.
>> + * PAE used to be a parameter passed to this function by
>> + * HVM_PARAM_PAE_ENABLED.  It is now passed normally.
> In particular, I don't understand what these comments mean by
> "HVM_PARAM_PAE_ENABLED is a parameter to this function" and "PAE used
> to be a parameter passed to this function by HVM_PARAM_PAE_ENABLED".
>
> Maybe this is some loose use of the term "parameter" ?
>
> If you could explain more clearly (ideally, explain the meaning of the
> old comment in the commit message, and make the new comment
> unambiguous) then that would be great.

HVM_PARAM_PAE_ENABLED encapsulates a boolean meaning "should I advertise
the PAE feature to the guest?".

It has only ever been used in a way which should have been "bool pae"
passed into xc_cpuid_apply_policy().  This patch tries to do just that.


I think there might be confusion as to which comment the commit message
referred to.

In xc_cpuid_apply_policy(), I want a comment explaining why we have this
weird pae parameter.  It will disappear from the new way of doing CPUID
at boot, but will have to remain for the pre-4.14 compatibility.

The comment I was referring to in the commit message was actually the
libxl comment, explaining why PV and PVH guests don't get a choice to
hide the PAE feature.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 6/6] xen/public: Obsolete HVM_PARAM_PAE_ENABLED

2020-02-11 Thread Ian Jackson
Andrew Cooper writes ("[PATCH 6/6] xen/public: Obsolete HVM_PARAM_PAE_ENABLED"):
> HVM_PARAM_PAE_ENABLED is undocumented and Xen has never acted upon its value,
> contrary perhaps to expectations based on how other boolean fields work.
> 
> It was only ever used as a non-standard calling convention for
> xc_cpuid_apply_policy() but that has been fixed now.
> 
> Purge its use, and any possible confusion over its behaviour, by having Xen
> reject any attempts to use it.  Forgo setting it up in libxl's
> hvm_set_conf_params().  The only backwards compatibility necessary is to have
> the HVM restore stream discard it if found.

This looks plausible too.  But maybe I should be reading this patch
and the previous one together ?  Or maybe they would be better
squashed ?

If you think that is likely to make me less confused I'm happy to try
squashing them locally and reading the result...

Ian.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 5/6] tools/libx[cl]: Don't use HVM_PARAM_PAE_ENABLED as a function parameter

2020-02-11 Thread Ian Jackson
Andrew Cooper writes ("[PATCH 5/6] tools/libx[cl]: Don't use 
HVM_PARAM_PAE_ENABLED as a function parameter"):
> The sole use of HVM_PARAM_PAE_ENABLED is as a non-standard calling convention
> for xc_cpuid_apply_policy().  Pass PAE as a regular parameter instead.
> 
> Leave a rather better explaination of why only HVM guests have a choice in PAE
> setting.

I am inclined believe you that this is right (since you are evidently
familiar with this whole area and I'm not), but the explanations leave
me confused.

>  int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid,
> -  const uint32_t *featureset, unsigned int 
> nr_features)
> +  const uint32_t *featureset, unsigned int 
> nr_features,
> +  bool pae)
>  {
>  int rc;
>  xc_dominfo_t di;
> @@ -579,8 +580,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t 
> domid,
>  }
>  else
>  {
> -uint64_t val;
> -
>  /*
>   * Topology for HVM guests is entirely controlled by Xen.  For now, 
> we
>   * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
> @@ -635,14 +634,10 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t 
> domid,
>  }
>  
>  /*
> - * HVM_PARAM_PAE_ENABLED is a parameter to this function, stashed in
> - * Xen.  Nothing else has ever taken notice of the value.
> + * PAE used to be a parameter passed to this function by
> + * HVM_PARAM_PAE_ENABLED.  It is now passed normally.

In particular, I don't understand what these comments mean by
"HVM_PARAM_PAE_ENABLED is a parameter to this function" and "PAE used
to be a parameter passed to this function by HVM_PARAM_PAE_ENABLED".

Maybe this is some loose use of the term "parameter" ?

If you could explain more clearly (ideally, explain the meaning of the
old comment in the commit message, and make the new comment
unambiguous) then that would be great.

Thanks,
Ian.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 4/6] tools/libxl: Combine legacy CPUID handling logic

2020-02-11 Thread Ian Jackson
Andrew Cooper writes ("[PATCH 4/6] tools/libxl: Combine legacy CPUID handling 
logic"):
> While we are in the process of overhauling boot time CPUID/MSR handling, the
> existing logic is going to have to remain in roughly this form for backwards
> compatibility.
> 
> Fold libxl__cpuid_apply_policy() and libxl__cpuid_set() together into a single
> libxl__cpuid_legacy() to reduce the complexity for callers.

Acked-by: Ian Jackson 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 3/6] tools/python: Drop cpuid helpers

2020-02-11 Thread Ian Jackson
Andrew Cooper writes ("[PATCH 3/6] tools/python: Drop cpuid helpers"):
> These are believed-unused, and the underlying infrastructure is about to be
> rewritten completely.

Acked-by: Ian Jackson 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 1/6] tools/libxl: Remove libxl_cpuid_{set, apply_policy}() from the API

2020-02-11 Thread Ian Jackson
Andrew Cooper writes ("[PATCH 1/6] tools/libxl: Remove 
libxl_cpuid_{set,apply_policy}() from the API"):
> These functions should never have been exposed.  They don't have external
> users, and can't usefully be used for several reasons.

Acked-by: Ian Jackson 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Igor Druzhinin
On 11/02/2020 16:42, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 04:29:36PM +, Igor Druzhinin wrote:
>> Agree. But as I said I'm not aware of any guest that violates the
>> invariant of decrease_reservation() being the last call.
> 
> Maybe we could piggyback on whether a page is removed from the domain
> domheap and use that as a signal that the page should be ballooned
> out?
> 
> There's already an arch_free_heap_page that's called when a page is
> removed from a domain, which might be suitable for this. It would
> however imply making an hypercall for every page to be ballooned out.

I tested that - doesn't work - too many hypercalls make ballooning take
ages. This simply cannot be done on page-by-page basis.

Igor

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] x86/amd: Avoid cpu_has_hypervisor evaluating true on native hardware

2020-02-11 Thread Andrew Cooper
On 11/02/2020 16:59, Jan Beulich wrote:
> On 11.02.2020 17:31, Roger Pau Monné wrote:
>> On Tue, Feb 11, 2020 at 03:51:54PM +, Andrew Cooper wrote:
>>> Currently when booting native on AMD hardware, cpuidmask_defaults._1cd gets
>>> configured with the HYPERVISOR bit before native CPUID is scanned for 
>>> feature
>>> bits.
>>>
>>> This results in cpu_has_hypervisor becoming set as part of identify_cpu(), 
>>> and
>>> ends up appearing in the raw and host CPU policies.
>>>
>>> A combination of this bug, and c/s bb502a8ca59 "x86: check feature flags 
>>> after
>>> resume" which checks that feature bits don't go missing, results in broken 
>>> S3
>>> on AMD hardware.
>>>
>>> Alter amd_init_levelling() to exclude the HYPERVISOR bit from
>>> cpumask_defaults, and update domain_cpu_policy_changed() to allow it to be
>>> explicitly forwarded.
>>>
>>> This also fixes a bug on kexec, where the hypervisor bit is left enabled for
>>> the new kernel to find.
>>>
>>> These changes highlight a further but - dom0 construction is asymetric with
>>> domU construction, by not having any calls to domain_cpu_policy_changed().
>>> Extend arch_domain_create() to always call domain_cpu_policy_changed().
>>>
>>> Reported-by: Igor Druzhinin 
>>> Signed-off-by: Andrew Cooper 
>>> ---
>>> CC: Jan Beulich 
>>> CC: Wei Liu 
>>> CC: Roger Pau Monné 
>>> CC: Igor Druzhinin 
>>> CC: Marek Marczykowski-Górecki 
>>> CC: Claudia 
>>>
>>> v2:
>>>  * Rewrite the commit message.  No change to the patch content.
>>>
>>> Marek/Claudia: Do either of you want a Reported-by tag seeing as you found a
>>> brand new way that this was broken?
> I understand this is addressing only one half of their issue. Since
> you said you don't find it surprising, do you have any idea why the
> OSXSAVE bit is behaving differently on AMD and on Intel?

It isn't behaving differently between Intel and AMD, I don't think.

The diagnostics are asymmetric - they ever printed when a feature
disappears, not for a feature appearing.

OSXSAVE is clear until fairly late on boot (therefore misses being
cached), but is restored as part of mmu_cr4_features (before the feature
check).

The only reason anything gets printed in the first place is because the
HYPERVISOR bit disappeared.

Overall, OSXSAVE (and in principle OSPKE if we start supporting it in PV
guests) are benign.  We could filter them out of the diagnostics but
don't currently have a suitable featuremask, and I'm not sure adding one
is worth it.

>
>>> @@ -106,6 +106,13 @@ static void domain_cpu_policy_changed(struct domain *d)
>>>  ecx = 0;
>>>  edx = cpufeat_mask(X86_FEATURE_APIC);
>>>  
>>> +/*
>>> + * If the Hypervisor bit is set in the policy, we can also
>>> + * forward it into real CPUID.
>>> + */
>>> +if ( p->basic.hypervisor )
>>> +ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
>> AFAICT dom0 will also get the hypervisor bit set by default, as that's
>> part of both the HVM and the PV max policy?
>>
>> If so:
>>
>> Reviewed-by: Roger Pau Monné 
> Acked-by: Jan Beulich 

Thanks.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Jan Beulich
On 11.02.2020 17:54, Jürgen Groß wrote:
> On 11.02.20 17:46, Jan Beulich wrote:
>> On 11.02.2020 14:10, Jürgen Groß wrote:
>>> On 11.02.20 14:01, Jan Beulich wrote:
 On 11.02.2020 13:27, Juergen Gross wrote:
> When dumping the run queue information add some more data regarding
> current and (if known) previous vcpu for each physical cpu.
>
> With core scheduling activated the printed data will be e.g.:
>
> (XEN) CPUs info:
> (XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
> (XEN) CPU[01] current=d[IDLE]v1
> (XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
> (XEN) CPU[03] current=d[IDLE]v3
>
> Signed-off-by: Juergen Gross 
> ---
> V2: add proper locking

 "Proper" is ambiguous in the context of dumping functions. In a
 number of places we use try-lock, to avoid the dumping hanging
 on something else monopolizing the lock. I'd like to suggest to
 do so here, too.
>>>
>>> All the scheduler related dumping functions are using the "real" locks.
>>> So using trylock in this single case wouldn't help at all. Additionally
>>> using trylock only would make a crash during dumping the data more
>>> probable, so I'm not sure we want that.
>>
>> Why would it make a crash more likely? If you can't get the lock,
>> you'd simply skip dumping.
> 
> Ah, okay, then I misunderstood your intention.
> 
> I still think that this should be done not only in one place, but in a
> more general fashion. I'd rather give up only after some time trying
> (1 millisecond per default?) and apply the same scheme to all dumping
> functions.
> 
> I can have a try for such a series if you agree on taking a more general
> approach.

Getting behavior consistent across key handlers would of course
be very nice.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] x86/amd: Avoid cpu_has_hypervisor evaluating true on native hardware

2020-02-11 Thread Jan Beulich
On 11.02.2020 17:31, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 03:51:54PM +, Andrew Cooper wrote:
>> Currently when booting native on AMD hardware, cpuidmask_defaults._1cd gets
>> configured with the HYPERVISOR bit before native CPUID is scanned for feature
>> bits.
>>
>> This results in cpu_has_hypervisor becoming set as part of identify_cpu(), 
>> and
>> ends up appearing in the raw and host CPU policies.
>>
>> A combination of this bug, and c/s bb502a8ca59 "x86: check feature flags 
>> after
>> resume" which checks that feature bits don't go missing, results in broken S3
>> on AMD hardware.
>>
>> Alter amd_init_levelling() to exclude the HYPERVISOR bit from
>> cpumask_defaults, and update domain_cpu_policy_changed() to allow it to be
>> explicitly forwarded.
>>
>> This also fixes a bug on kexec, where the hypervisor bit is left enabled for
>> the new kernel to find.
>>
>> These changes highlight a further but - dom0 construction is asymetric with
>> domU construction, by not having any calls to domain_cpu_policy_changed().
>> Extend arch_domain_create() to always call domain_cpu_policy_changed().
>>
>> Reported-by: Igor Druzhinin 
>> Signed-off-by: Andrew Cooper 
>> ---
>> CC: Jan Beulich 
>> CC: Wei Liu 
>> CC: Roger Pau Monné 
>> CC: Igor Druzhinin 
>> CC: Marek Marczykowski-Górecki 
>> CC: Claudia 
>>
>> v2:
>>  * Rewrite the commit message.  No change to the patch content.
>>
>> Marek/Claudia: Do either of you want a Reported-by tag seeing as you found a
>> brand new way that this was broken?

I understand this is addressing only one half of their issue. Since
you said you don't find it surprising, do you have any idea why the
OSXSAVE bit is behaving differently on AMD and on Intel?

>> @@ -106,6 +106,13 @@ static void domain_cpu_policy_changed(struct domain *d)
>>  ecx = 0;
>>  edx = cpufeat_mask(X86_FEATURE_APIC);
>>  
>> +/*
>> + * If the Hypervisor bit is set in the policy, we can also
>> + * forward it into real CPUID.
>> + */
>> +if ( p->basic.hypervisor )
>> +ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> 
> AFAICT dom0 will also get the hypervisor bit set by default, as that's
> part of both the HVM and the PV max policy?
> 
> If so:
> 
> Reviewed-by: Roger Pau Monné 

Acked-by: Jan Beulich 



___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Jürgen Groß

On 11.02.20 17:46, Jan Beulich wrote:

On 11.02.2020 14:10, Jürgen Groß wrote:

On 11.02.20 14:01, Jan Beulich wrote:

On 11.02.2020 13:27, Juergen Gross wrote:

When dumping the run queue information add some more data regarding
current and (if known) previous vcpu for each physical cpu.

With core scheduling activated the printed data will be e.g.:

(XEN) CPUs info:
(XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
(XEN) CPU[01] current=d[IDLE]v1
(XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
(XEN) CPU[03] current=d[IDLE]v3

Signed-off-by: Juergen Gross 
---
V2: add proper locking


"Proper" is ambiguous in the context of dumping functions. In a
number of places we use try-lock, to avoid the dumping hanging
on something else monopolizing the lock. I'd like to suggest to
do so here, too.


All the scheduler related dumping functions are using the "real" locks.
So using trylock in this single case wouldn't help at all. Additionally
using trylock only would make a crash during dumping the data more
probable, so I'm not sure we want that.


Why would it make a crash more likely? If you can't get the lock,
you'd simply skip dumping.


Ah, okay, then I misunderstood your intention.

I still think that this should be done not only in one place, but in a
more general fashion. I'd rather give up only after some time trying
(1 millisecond per default?) and apply the same scheme to all dumping
functions.

I can have a try for such a series if you agree on taking a more general
approach.


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Jan Beulich
On 11.02.2020 14:10, Jürgen Groß wrote:
> On 11.02.20 14:01, Jan Beulich wrote:
>> On 11.02.2020 13:27, Juergen Gross wrote:
>>> When dumping the run queue information add some more data regarding
>>> current and (if known) previous vcpu for each physical cpu.
>>>
>>> With core scheduling activated the printed data will be e.g.:
>>>
>>> (XEN) CPUs info:
>>> (XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
>>> (XEN) CPU[01] current=d[IDLE]v1
>>> (XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
>>> (XEN) CPU[03] current=d[IDLE]v3
>>>
>>> Signed-off-by: Juergen Gross 
>>> ---
>>> V2: add proper locking
>>
>> "Proper" is ambiguous in the context of dumping functions. In a
>> number of places we use try-lock, to avoid the dumping hanging
>> on something else monopolizing the lock. I'd like to suggest to
>> do so here, too.
> 
> All the scheduler related dumping functions are using the "real" locks.
> So using trylock in this single case wouldn't help at all. Additionally
> using trylock only would make a crash during dumping the data more
> probable, so I'm not sure we want that.

Why would it make a crash more likely? If you can't get the lock,
you'd simply skip dumping.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [ovmf test] 146846: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146846 ovmf real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146846/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 
145767
 build-i3866 xen-buildfail REGR. vs. 145767

Tests which did not succeed, but are not blocking:
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a

version targeted for testing:
 ovmf 67ead55b35e16a5de5f4695eb61cb484465e0009
baseline version:
 ovmf 70911f1f4aee0366b6122f2b90d367ec0f066beb

Last test of basis   145767  2020-01-08 00:39:09 Z   34 days
Failing since145774  2020-01-08 02:50:20 Z   34 days  120 attempts
Testing same since   146846  2020-02-11 09:09:56 Z0 days1 attempts


People who touched revisions under test:
  Aaron Li 
  Albecki, Mateusz 
  Amol N Sukerkar 
  Anthony PERARD 
  Antoine Coeur 
  Ard Biesheuvel 
  Ashish Singhal 
  Bob Feng 
  Bret Barkelew 
  Brian R Haug 
  Eric Dong 
  Fan, ZhijuX 
  Guo Dong 
  Hao A Wu 
  Heng Luo 
  Jason Voelz 
  Jeff Brasen 
  Jian J Wang 
  Kinney, Michael D 
  Krzysztof Koch 
  Laszlo Ersek 
  Leif Lindholm 
  Li, Aaron 
  Liming Gao 
  Liu, Zhiguang 
  Mateusz Albecki 
  Matthew Carlson 
  Michael D Kinney 
  Michael Kubacki 
  Pavana.K 
  Philippe Mathieu-Daud? 
  Philippe Mathieu-Daude 
  Philippe Mathieu-Daudé 
  Philippe Mathieu-Daudé 
  Pierre Gondois 
  Sean Brogan 
  Siyuan Fu 
  Siyuan, Fu 
  Steven 
  Steven Shi 
  Sudipto Paul 
  Vitaly Cheptsov 
  Vitaly Cheptsov via Groups.Io 
  Wei6 Xu 
  Xu, Wei6 
  Zhichao Gao 
  Zhiguang Liu 
  Zhiju.Fan 

jobs:
 build-amd64-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-i386   fail
 build-amd64-libvirt  pass
 build-i386-libvirt   blocked 
 build-amd64-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-xl-qemuu-ovmf-amd64 fail
 test-amd64-i386-xl-qemuu-ovmf-amd64  blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.

(No revision log; it would be 4784 lines long.)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Roger Pau Monné
On Tue, Feb 11, 2020 at 04:29:36PM +, Igor Druzhinin wrote:
> On 11/02/2020 16:01, Roger Pau Monné wrote:
> > On Tue, Feb 11, 2020 at 01:39:42PM +, Andrew Cooper wrote:
> >> Shim can't decrease reservation (HVM with L0 Xen) on any frame who's
> >> reference count didn't drop to 0 from the PV guests' call, and there is
> >> nothing presently to check this condition.
> > 
> > But shim will only balloon out free domheap pages (as it gets them
> > from alloc_domheap_pages), and those shouldn't have any reference by
> > the guest?
> 
> Correct, however all the guests that we test in XenRT behave properly.
> I'm not aware of any guest that keeps references after calling
> decrease_reservation().
> 
> >> Short of a PGC bit and extra shim logic in free_domheap_page(), I can't
> >> see any way to reconcile the behaviour, except to change the semantics
> >> of decrease reservation for PV guests.  In practice, this would be far
> >> more sensible behaviour, but we have no idea if existing PV guests would
> >> manage.
> > 
> > Hm, I guess we could add some hook to free_domheap_page in order to
> > remove them from the physmap once the guest frees them?
> >
> > How does Xen know which pages freed by a PV guest should be ballooned
> > out?
> 
> It doesn't currently.

Well, not when running on the shim, but I guess when running as a
classic PV guest the reservation for the guest will be lowered (so
that after the call to decrease_reservation the guest will have an
overcommit of memory) and pages would be removed from the domheap as
references are dropped.

> 
> > Is that done solely based on the fact that those pages don't have any
> > reference?
> 
> Yes.
> 
> > That doesn't seem like a viable option unless we add a new bit to the
> > page struct in order to signal that those pages should be ballooned
> > out once freed, as you suggest.
> 
> Agree. But as I said I'm not aware of any guest that violates the
> invariant of decrease_reservation() being the last call.

Maybe we could piggyback on whether a page is removed from the domain
domheap and use that as a signal that the page should be ballooned
out?

There's already an arch_free_heap_page that's called when a page is
removed from a domain, which might be suitable for this. It would
however imply making an hypercall for every page to be ballooned out.

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] x86/amd: Avoid cpu_has_hypervisor evaluating true on native hardware

2020-02-11 Thread Andrew Cooper
On 11/02/2020 16:31, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 03:51:54PM +, Andrew Cooper wrote:
>> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
>> index 4fa9c91140..ce76d6d776 100644
>> --- a/xen/arch/x86/domctl.c
>> +++ b/xen/arch/x86/domctl.c
>> @@ -48,7 +48,7 @@ static int gdbsx_guest_mem_io(domid_t domid, struct 
>> xen_domctl_gdbsx_memio *iop)
>>  }
>>  #endif
>>  
>> -static void domain_cpu_policy_changed(struct domain *d)
>> +void domain_cpu_policy_changed(struct domain *d)
>>  {
>>  const struct cpuid_policy *p = d->arch.cpuid;
>>  struct vcpu *v;
>> @@ -106,6 +106,13 @@ static void domain_cpu_policy_changed(struct domain *d)
>>  ecx = 0;
>>  edx = cpufeat_mask(X86_FEATURE_APIC);
>>  
>> +/*
>> + * If the Hypervisor bit is set in the policy, we can also
>> + * forward it into real CPUID.
>> + */
>> +if ( p->basic.hypervisor )
>> +ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> AFAICT dom0 will also get the hypervisor bit set by default, as that's
> part of both the HVM and the PV max policy?

Correct.

>
> If so:
>
> Reviewed-by: Roger Pau Monné 

Thanks.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] x86/amd: Avoid cpu_has_hypervisor evaluating true on native hardware

2020-02-11 Thread Roger Pau Monné
On Tue, Feb 11, 2020 at 03:51:54PM +, Andrew Cooper wrote:
> Currently when booting native on AMD hardware, cpuidmask_defaults._1cd gets
> configured with the HYPERVISOR bit before native CPUID is scanned for feature
> bits.
> 
> This results in cpu_has_hypervisor becoming set as part of identify_cpu(), and
> ends up appearing in the raw and host CPU policies.
> 
> A combination of this bug, and c/s bb502a8ca59 "x86: check feature flags after
> resume" which checks that feature bits don't go missing, results in broken S3
> on AMD hardware.
> 
> Alter amd_init_levelling() to exclude the HYPERVISOR bit from
> cpumask_defaults, and update domain_cpu_policy_changed() to allow it to be
> explicitly forwarded.
> 
> This also fixes a bug on kexec, where the hypervisor bit is left enabled for
> the new kernel to find.
> 
> These changes highlight a further but - dom0 construction is asymetric with
> domU construction, by not having any calls to domain_cpu_policy_changed().
> Extend arch_domain_create() to always call domain_cpu_policy_changed().
> 
> Reported-by: Igor Druzhinin 
> Signed-off-by: Andrew Cooper 
> ---
> CC: Jan Beulich 
> CC: Wei Liu 
> CC: Roger Pau Monné 
> CC: Igor Druzhinin 
> CC: Marek Marczykowski-Górecki 
> CC: Claudia 
> 
> v2:
>  * Rewrite the commit message.  No change to the patch content.
> 
> Marek/Claudia: Do either of you want a Reported-by tag seeing as you found a
> brand new way that this was broken?
> ---
>  xen/arch/x86/cpu/amd.c   | 3 ---
>  xen/arch/x86/domain.c| 2 ++
>  xen/arch/x86/domctl.c| 9 -
>  xen/include/asm-x86/domain.h | 2 ++
>  4 files changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
> index e351dd227f..f95a8e0fd3 100644
> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -298,9 +298,6 @@ static void __init noinline amd_init_levelling(void)
>   ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
>   edx |= cpufeat_mask(X86_FEATURE_APIC);
>  
> - /* Allow the HYPERVISOR bit to be set via guest policy. */
> - ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
> -
>   cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
>   }
>  
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index f53ae5ff86..12bd554391 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -656,6 +656,8 @@ int arch_domain_create(struct domain *d,
>   */
>  d->arch.x87_fip_width = cpu_has_fpu_sel ? 0 : 8;
>  
> +domain_cpu_policy_changed(d);
> +
>  return 0;
>  
>   fail:
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 4fa9c91140..ce76d6d776 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -48,7 +48,7 @@ static int gdbsx_guest_mem_io(domid_t domid, struct 
> xen_domctl_gdbsx_memio *iop)
>  }
>  #endif
>  
> -static void domain_cpu_policy_changed(struct domain *d)
> +void domain_cpu_policy_changed(struct domain *d)
>  {
>  const struct cpuid_policy *p = d->arch.cpuid;
>  struct vcpu *v;
> @@ -106,6 +106,13 @@ static void domain_cpu_policy_changed(struct domain *d)
>  ecx = 0;
>  edx = cpufeat_mask(X86_FEATURE_APIC);
>  
> +/*
> + * If the Hypervisor bit is set in the policy, we can also
> + * forward it into real CPUID.
> + */
> +if ( p->basic.hypervisor )
> +ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);

AFAICT dom0 will also get the hypervisor bit set by default, as that's
part of both the HVM and the PV max policy?

If so:

Reviewed-by: Roger Pau Monné 

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Igor Druzhinin
On 11/02/2020 16:01, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 01:39:42PM +, Andrew Cooper wrote:
>> Shim can't decrease reservation (HVM with L0 Xen) on any frame who's
>> reference count didn't drop to 0 from the PV guests' call, and there is
>> nothing presently to check this condition.
> 
> But shim will only balloon out free domheap pages (as it gets them
> from alloc_domheap_pages), and those shouldn't have any reference by
> the guest?

Correct, however all the guests that we test in XenRT behave properly.
I'm not aware of any guest that keeps references after calling
decrease_reservation().

>> Short of a PGC bit and extra shim logic in free_domheap_page(), I can't
>> see any way to reconcile the behaviour, except to change the semantics
>> of decrease reservation for PV guests.  In practice, this would be far
>> more sensible behaviour, but we have no idea if existing PV guests would
>> manage.
> 
> Hm, I guess we could add some hook to free_domheap_page in order to
> remove them from the physmap once the guest frees them?
>
> How does Xen know which pages freed by a PV guest should be ballooned
> out?

It doesn't currently.

> Is that done solely based on the fact that those pages don't have any
> reference?

Yes.

> That doesn't seem like a viable option unless we add a new bit to the
> page struct in order to signal that those pages should be ballooned
> out once freed, as you suggest.

Agree. But as I said I'm not aware of any guest that violates the
invariant of decrease_reservation() being the last call.

Igor

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Roger Pau Monné
On Tue, Feb 11, 2020 at 01:39:42PM +, Andrew Cooper wrote:
> Ballooning inside PV shim is currently very broken.
> 
> From an instrumented Xen and 32bit PV XTF test:
> 
> (d3) (d3) --- Xen Test Framework ---
> (d3) (d3) Ballooning: PV 32bit (PAE 3 levels)
> (d3) (d3) mr { 0010a940, 1024, 0x7ff0 }
> (d3) (d3) About to decrease
> (d3) (XEN) *** D { 82008020, nr 1020, done 0 }
> (d3) (XEN) d3v0 failed to reserve 267 extents of order 0 for offlining
> (d3) (XEN) *** D { 82007fffe040, nr 1024, done 1020 }
> (d3) (XEN) d3v0 failed to reserve 1024 extents of order 0 for offlining
> (d3) (d3) => got 1024
> 
> This test takes 1024 frames and calls decrease reservation on them,
> before unmapping.  i.e. the decrease reservation should fail.  Shim
> successfully offlines 753 pages (nothing to do with the frames the guest
> selected), and fails to offline 1291, and despite this, returns success.
> 
> First of all, the "failed to reserve" is in pv_shim_offline_memory()
> which is a void function that has a semantically relevant failure case. 
> This obviously isn't ok.

So on failure to reserve the pages for offlining we should likely add
them again to the domU and return the number of pages that have been
fully offlined?

Not sure if that's doable, but I think by poking at the extends list
Xen should be able to repopulate the entries.

> 
> Second, the way the compat code loops over the translated data is
> incompatible with how args.nr_done is used for the call into
> pv_shim_offline_memory().

Oh, I would have to check that, I tend to get lost in compat code. The
code in pv_shim_offline_memory assumes that args.nr_done will contain
the total amount of successfully ballooned out pages.

> Why is pv_shim_offline_memory() not in decrease_reservation() to begin with?

I guess to try to batch the decrease into a single call to
batch_memory_op, and to keep the symmetry with the call to
pv_shim_online_memory.

But most of this was done in a hurry, so it's likely it's just there
because that's the first place that seemed sensible enough.

> Furthermore, there is a fundamental difference in ballooning behaviour
> between PV and HVM guests, which I don't think we can compensate for. 
> PV guests need to call decrease reservation once to release the frames,
> and unmap the frames (in any order).  HVM guests calling decrease
> reservation automatically make the frame unusable no matter how many
> outstanding references exist.

Ouch, so you can call XENMEM_decrease_reservation and then unmap the
pages from the guest page-tables and they will be ballooned out?

TBH I had no idea this was possible, I've mostly assumed a model
similar with HVM, where you call decrease_reservation and the pages
are just removed from the physmap.

> Shim can't decrease reservation (HVM with L0 Xen) on any frame who's
> reference count didn't drop to 0 from the PV guests' call, and there is
> nothing presently to check this condition.

But shim will only balloon out free domheap pages (as it gets them
from alloc_domheap_pages), and those shouldn't have any reference by
the guest?

> Short of a PGC bit and extra shim logic in free_domheap_page(), I can't
> see any way to reconcile the behaviour, except to change the semantics
> of decrease reservation for PV guests.  In practice, this would be far
> more sensible behaviour, but we have no idea if existing PV guests would
> manage.

Hm, I guess we could add some hook to free_domheap_page in order to
remove them from the physmap once the guest frees them?

How does Xen know which pages freed by a PV guest should be ballooned
out?

Is that done solely based on the fact that those pages don't have any
reference?

That doesn't seem like a viable option unless we add a new bit to the
page struct in order to signal that those pages should be ballooned
out once freed, as you suggest.

Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2] x86/amd: Avoid cpu_has_hypervisor evaluating true on native hardware

2020-02-11 Thread Andrew Cooper
Currently when booting native on AMD hardware, cpuidmask_defaults._1cd gets
configured with the HYPERVISOR bit before native CPUID is scanned for feature
bits.

This results in cpu_has_hypervisor becoming set as part of identify_cpu(), and
ends up appearing in the raw and host CPU policies.

A combination of this bug, and c/s bb502a8ca59 "x86: check feature flags after
resume" which checks that feature bits don't go missing, results in broken S3
on AMD hardware.

Alter amd_init_levelling() to exclude the HYPERVISOR bit from
cpumask_defaults, and update domain_cpu_policy_changed() to allow it to be
explicitly forwarded.

This also fixes a bug on kexec, where the hypervisor bit is left enabled for
the new kernel to find.

These changes highlight a further but - dom0 construction is asymetric with
domU construction, by not having any calls to domain_cpu_policy_changed().
Extend arch_domain_create() to always call domain_cpu_policy_changed().

Reported-by: Igor Druzhinin 
Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Wei Liu 
CC: Roger Pau Monné 
CC: Igor Druzhinin 
CC: Marek Marczykowski-Górecki 
CC: Claudia 

v2:
 * Rewrite the commit message.  No change to the patch content.

Marek/Claudia: Do either of you want a Reported-by tag seeing as you found a
brand new way that this was broken?
---
 xen/arch/x86/cpu/amd.c   | 3 ---
 xen/arch/x86/domain.c| 2 ++
 xen/arch/x86/domctl.c| 9 -
 xen/include/asm-x86/domain.h | 2 ++
 4 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index e351dd227f..f95a8e0fd3 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -298,9 +298,6 @@ static void __init noinline amd_init_levelling(void)
ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
edx |= cpufeat_mask(X86_FEATURE_APIC);
 
-   /* Allow the HYPERVISOR bit to be set via guest policy. */
-   ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
-
cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
}
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f53ae5ff86..12bd554391 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -656,6 +656,8 @@ int arch_domain_create(struct domain *d,
  */
 d->arch.x87_fip_width = cpu_has_fpu_sel ? 0 : 8;
 
+domain_cpu_policy_changed(d);
+
 return 0;
 
  fail:
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 4fa9c91140..ce76d6d776 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -48,7 +48,7 @@ static int gdbsx_guest_mem_io(domid_t domid, struct 
xen_domctl_gdbsx_memio *iop)
 }
 #endif
 
-static void domain_cpu_policy_changed(struct domain *d)
+void domain_cpu_policy_changed(struct domain *d)
 {
 const struct cpuid_policy *p = d->arch.cpuid;
 struct vcpu *v;
@@ -106,6 +106,13 @@ static void domain_cpu_policy_changed(struct domain *d)
 ecx = 0;
 edx = cpufeat_mask(X86_FEATURE_APIC);
 
+/*
+ * If the Hypervisor bit is set in the policy, we can also
+ * forward it into real CPUID.
+ */
+if ( p->basic.hypervisor )
+ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
+
 mask |= ((uint64_t)ecx << 32) | edx;
 break;
 }
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f0c25ffec0..1843c76d1a 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -624,6 +624,8 @@ struct guest_memory_policy
 void update_guest_memory_policy(struct vcpu *v,
 struct guest_memory_policy *policy);
 
+void domain_cpu_policy_changed(struct domain *d);
+
 bool update_runstate_area(struct vcpu *);
 bool update_secondary_system_time(struct vcpu *,
   struct vcpu_time_info *);
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 0/3] tools/xenstore

2020-02-11 Thread Jürgen Groß

On 28.01.20 15:28, Juergen Gross wrote:

Some patches for Xenstore-stubdom which have been lying around in my
local tree for some time now.

Juergen Gross (3):
   xenstore: setup xenstore stubdom console interface properly
   xenstore: add console xenstore entries for xenstore stubdom
   xenstore: remove not applicable control commands in stubdom

  tools/helpers/init-xenstore-domain.c | 44 +++-
  tools/xenstore/xenstored_control.c   | 18 +++
  2 files changed, 61 insertions(+), 1 deletion(-)



Ping?


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable-smoke test] 146871: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146871 xen-unstable-smoke real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146871/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64   6 xen-buildfail REGR. vs. 146838

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass

version targeted for testing:
 xen  4e9929f5bde62e19653a4c7f5792648f56ef35ab
baseline version:
 xen  3dd724dff085e13ad520f8e35aea717db2ff07d0

Last test of basis   146838  2020-02-10 22:00:35 Z0 days
Testing same since   146871  2020-02-11 12:01:04 Z0 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Anthony PERARD 
  Jan Beulich 
  Juergen Gross 
  Wei Liu 

jobs:
 build-arm64-xsm  pass
 build-amd64  fail
 build-armhf  pass
 build-amd64-libvirt  blocked 
 test-armhf-armhf-xl  pass
 test-arm64-arm64-xl-xsm  pass
 test-amd64-amd64-xl-qemuu-debianhvm-amd64blocked 
 test-amd64-amd64-libvirt blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.


commit 4e9929f5bde62e19653a4c7f5792648f56ef35ab
Author: Andrew Cooper 
Date:   Wed Dec 4 18:21:04 2019 +

x86/vmx: Shrink TASK_SWITCH's hvm_task_switch_reason reasons[]

No need to use 4-byte integers to store two bits of information.

Signed-off-by: Andrew Cooper 
Reviewed-by: Kevin Tian 
Reviewed-by: Jan Beulich 

commit 3d3cf4bea8e5bd8edb3ae57a9b2f15449f1fbcb4
Author: Anthony PERARD 
Date:   Mon Jan 20 11:50:53 2020 +

tools: Default to python3

Main reason, newer version of QEMU doesn't support python 2.x anymore.
Second main reason, python2 is EOL.

Signed-off-by: Anthony PERARD 
Acked-by: Wei Liu 

commit 6ee10313623c1f41fc72fe12372e176e744463c1
Author: Andrew Cooper 
Date:   Mon Feb 10 18:33:26 2020 +

x86/pvh: Adjust dom0's starting state

Fixes: b25fb1a04e "xen/pvh: Fix segment selector ABI"
Signed-off-by: Andrew Cooper 
Reviewed-by: Wei Liu 
Acked-by: Jan Beulich 

commit 32db853d95fc8a3bf107c896bad7e4298a547ac9
Author: Andrew Cooper 
Date:   Sun Feb 2 18:23:47 2020 +

AMD/IOMMU: Treat head/tail pointers as byte offsets

The MMIO registers as already byte offsets.  Using them in this form removes
the need to shift their values for use.

It is also inefficient to store both entries and alloc_size (which only 
differ
by entry_size).  Rename alloc_size to size, and drop entries entirely, which
simplifies the allocation/deallocation helpers slightly.

Mark send_iommu_command() and invalidate_iommu_all() as static, as they have
no external declaration or callers.

Signed-off-by: Andrew Cooper 
Reviewed-by: Jan Beulich 

commit 6bb06bb0eb1528319998de602f969c474396c306
Author: Juergen Gross 
Date:   Tue Feb 11 10:44:18 2020 +0100

xen/sched: remove pointless BUG_ON() in credit2

The BUG_ON() at the top of csched2_context_saved() is completely
pointless, as the ASSERT() just following it catches the same problem
already.

Signed-off-by: Juergen Gross 
Reviewed-by: Dario Faggioli 
(qemu changes not included)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [Vote] Approve hypervisor project check-in policy

2020-02-11 Thread George Dunlap

> On Jan 27, 2020, at 2:12 PM, George Dunlap  wrote:
> 
> I have drafted an explicit policy on what is (generally) required to
> check a patch in.  It's been through several rounds, and v4 has been
> acked [1].
> 
> I've had informal assent from all committers, but just to dot all our
> i's and cross all our t's, it's probably worth having a vote of the
> committers, in line with the XenProject governance policy [1].
> 
> Please respond by 10 February with your vote:
> +1: for proposal
> -1: against proposal
> in public or private.

By my count we have 4 +1’s and no objections, so the policy is approved.  I’ll 
check in the patch modifying MAINTAINERS.

 -George

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] AMD/IOMMU: Remove unused iommu_get_addr_{lo, hi}_from_cmd() helpers

2020-02-11 Thread Andrew Cooper
These were introduced in 262bb227a4 in 2012, and have never had any users.

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Wei Liu 
CC: Roger Pau Monné 
---
 xen/drivers/passthrough/amd/iommu-defs.h |  5 -
 xen/drivers/passthrough/amd/iommu.h  | 13 -
 2 files changed, 18 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu-defs.h 
b/xen/drivers/passthrough/amd/iommu-defs.h
index 50613ca150..5eb7fc7ffb 100644
--- a/xen/drivers/passthrough/amd/iommu-defs.h
+++ b/xen/drivers/passthrough/amd/iommu-defs.h
@@ -485,11 +485,6 @@ struct amd_iommu_pte {
 #define IOMMU_CMD_DEVICE_ID_MASK0x
 #define IOMMU_CMD_DEVICE_ID_SHIFT   0
 
-#define IOMMU_CMD_ADDR_LOW_MASK 0xF000
-#define IOMMU_CMD_ADDR_LOW_SHIFT12
-#define IOMMU_CMD_ADDR_HIGH_MASK0x
-#define IOMMU_CMD_ADDR_HIGH_SHIFT   0
-
 #define IOMMU_REG_BASE_ADDR_LOW_MASK0xF000
 #define IOMMU_REG_BASE_ADDR_LOW_SHIFT   12
 #define IOMMU_REG_BASE_ADDR_HIGH_MASK   0x000F
diff --git a/xen/drivers/passthrough/amd/iommu.h 
b/xen/drivers/passthrough/amd/iommu.h
index 1abfdc685a..2297ac8e73 100644
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -391,19 +391,6 @@ static inline void iommu_set_devid_to_cmd(uint32_t *cmd, 
uint16_t id)
  IOMMU_CMD_DEVICE_ID_SHIFT, cmd);
 }
 
-/* access address field from iommu cmd */
-static inline uint32_t iommu_get_addr_lo_from_cmd(uint32_t cmd)
-{
-return get_field_from_reg_u32(cmd, IOMMU_CMD_ADDR_LOW_MASK,
-  IOMMU_CMD_ADDR_LOW_SHIFT);
-}
-
-static inline uint32_t iommu_get_addr_hi_from_cmd(uint32_t cmd)
-{
-return get_field_from_reg_u32(cmd, IOMMU_CMD_ADDR_LOW_MASK,
-  IOMMU_CMD_ADDR_HIGH_SHIFT);
-}
-
 /* access iommu base addresses field from mmio regs */
 static inline void iommu_set_addr_lo_to_reg(uint32_t *reg, uint32_t addr)
 {
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v4 7/7] x86/tlb: use Xen L0 assisted TLB flush when available

2020-02-11 Thread Wei Liu
On Tue, Feb 11, 2020 at 03:06:21PM +0100, Roger Pau Monné wrote:
> On Tue, Feb 11, 2020 at 10:34:24AM +, Wei Liu wrote:
> > On Mon, Feb 10, 2020 at 06:28:29PM +0100, Roger Pau Monne wrote:
> > [...]
> > >  
> > >  struct hypervisor_ops {
> > > @@ -32,6 +34,8 @@ struct hypervisor_ops {
> > >  void (*resume)(void);
> > >  /* Fix up e820 map */
> > >  void (*e820_fixup)(struct e820map *e820);
> > > +/* L0 assisted TLB flush */
> > > +int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int 
> > > order);
> > >  };
> > >  
> > >  #ifdef CONFIG_GUEST
> > > @@ -41,6 +45,14 @@ void hypervisor_setup(void);
> > >  int hypervisor_ap_setup(void);
> > >  void hypervisor_resume(void);
> > >  void hypervisor_e820_fixup(struct e820map *e820);
> > > +/*
> > > + * L0 assisted TLB flush.
> > > + * mask: cpumask of the dirty vCPUs that should be flushed.
> > > + * va: linear address to flush, or NULL for global flushes.
> > 
> > I was in the middle of writing my patch and noticed this.
> > 
> > I think NULL means "flushing the entire address space" here?
> 
> Yes, that's right. I didn't add a way to differentiate between global
> (ie: PGE mappings included) flushes and non-global flushes, so all
> calls are assumed to imply flushes of global mappings.
> 
> It might be better if you adapt it yourself to whatever is more suited
> for HyperV which has more selective flushes available. Xen only has an
> hypercall to request a global flush on all vCPUs.

OK. Thanks for confirming.

I will change this comment in my patch.

Wei.

> 
> Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] hvmloader: Drop use of XENVER_extraversion

2020-02-11 Thread Andrew Cooper
The printf() in init_hypercalls() only ends up in the hypervisor console log,
so extraversion really isn't interesting.

The SMBios table doesn't need extraversion, and removing it reduces the
ability for a guest to fingerprint the exact hypervisor it is running under.

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Wei Liu 
CC: Roger Pau Monné 
---
 tools/firmware/hvmloader/hvmloader.c |  4 +---
 tools/firmware/hvmloader/smbios.c| 10 --
 2 files changed, 1 insertion(+), 13 deletions(-)

diff --git a/tools/firmware/hvmloader/hvmloader.c 
b/tools/firmware/hvmloader/hvmloader.c
index 598a226278..99c8841790 100644
--- a/tools/firmware/hvmloader/hvmloader.c
+++ b/tools/firmware/hvmloader/hvmloader.c
@@ -121,7 +121,6 @@ static void init_hypercalls(void)
 uint32_t eax, ebx, ecx, edx;
 unsigned long i;
 char signature[13];
-xen_extraversion_t extraversion;
 uint32_t base;
 
 for ( base = 0x4000; base < 0x4001; base += 0x100 )
@@ -146,8 +145,7 @@ static void init_hypercalls(void)
 
 /* Print version information. */
 cpuid(base + 1, , , , );
-hypercall_xen_version(XENVER_extraversion, extraversion);
-printf("Detected Xen v%u.%u%s\n", eax >> 16, eax & 0x, extraversion);
+printf("Detected Xen v%u.%u\n", eax >> 16, eax & 0x);
 }
 
 /* Replace possibly erroneous memory-size CMOS fields with correct values. */
diff --git a/tools/firmware/hvmloader/smbios.c 
b/tools/firmware/hvmloader/smbios.c
index 97a054e9e3..46ba1cb7b3 100644
--- a/tools/firmware/hvmloader/smbios.c
+++ b/tools/firmware/hvmloader/smbios.c
@@ -256,7 +256,6 @@ hvm_write_smbios_tables(
 xen_domain_handle_t uuid;
 uint16_t xen_major_version, xen_minor_version;
 uint32_t xen_version;
-char xen_extra_version[XEN_EXTRAVERSION_LEN];
 /* guess conservatively on buffer length for Xen version string */
 char xen_version_str[80];
 /* temporary variables used to build up Xen version string */
@@ -274,8 +273,6 @@ hvm_write_smbios_tables(
 xen_major_version = (uint16_t) (xen_version >> 16);
 xen_minor_version = (uint16_t) xen_version;
 
-hypercall_xen_version(XENVER_extraversion, xen_extra_version);
-
 /* build up human-readable Xen version string */
 p = xen_version_str;
 len = 0;
@@ -302,13 +299,6 @@ hvm_write_smbios_tables(
 strcpy(p, tmp);
 p += tmp_len;
 
-tmp_len = strlen(xen_extra_version);
-len += tmp_len;
-if ( len >= sizeof(xen_version_str) )
-goto error_out;
-strcpy(p, xen_extra_version);
-p += tmp_len;
-
 xen_version_str[sizeof(xen_version_str)-1] = '\0';
 
 /* scratch_start is a safe large memory area for scratch. */
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v4 7/7] x86/tlb: use Xen L0 assisted TLB flush when available

2020-02-11 Thread Roger Pau Monné
On Tue, Feb 11, 2020 at 10:34:24AM +, Wei Liu wrote:
> On Mon, Feb 10, 2020 at 06:28:29PM +0100, Roger Pau Monne wrote:
> [...]
> >  
> >  struct hypervisor_ops {
> > @@ -32,6 +34,8 @@ struct hypervisor_ops {
> >  void (*resume)(void);
> >  /* Fix up e820 map */
> >  void (*e820_fixup)(struct e820map *e820);
> > +/* L0 assisted TLB flush */
> > +int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int 
> > order);
> >  };
> >  
> >  #ifdef CONFIG_GUEST
> > @@ -41,6 +45,14 @@ void hypervisor_setup(void);
> >  int hypervisor_ap_setup(void);
> >  void hypervisor_resume(void);
> >  void hypervisor_e820_fixup(struct e820map *e820);
> > +/*
> > + * L0 assisted TLB flush.
> > + * mask: cpumask of the dirty vCPUs that should be flushed.
> > + * va: linear address to flush, or NULL for global flushes.
> 
> I was in the middle of writing my patch and noticed this.
> 
> I think NULL means "flushing the entire address space" here?

Yes, that's right. I didn't add a way to differentiate between global
(ie: PGE mappings included) flushes and non-global flushes, so all
calls are assumed to imply flushes of global mappings.

It might be better if you adapt it yourself to whatever is more suited
for HyperV which has more selective flushes available. Xen only has an
hypercall to request a global flush on all vCPUs.

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] Xen-unstable: pci-passthrough regression bisected to: x86/smp: use APIC ALLBUT destination shorthand when possible

2020-02-11 Thread Roger Pau Monné
On Mon, Feb 10, 2020 at 09:49:30PM +0100, Sander Eikelenboom wrote:
> On 03/02/2020 14:21, Roger Pau Monné wrote:
> > On Mon, Feb 03, 2020 at 01:44:06PM +0100, Sander Eikelenboom wrote:
> >> On 03/02/2020 13:41, Roger Pau Monné wrote:
> >>> On Mon, Feb 03, 2020 at 01:30:55PM +0100, Sander Eikelenboom wrote:
>  On 03/02/2020 13:23, Roger Pau Monné wrote:
> > On Mon, Feb 03, 2020 at 09:33:51AM +0100, Sander Eikelenboom wrote:
> >> Hi Roger,
> >>
> >> Last week I encountered an issue with the PCI-passthrough of a USB 
> >> controller. 
> >> In the guest I get:
> >> [ 1143.313756] xhci_hcd :00:05.0: xHCI host not responding to 
> >> stop endpoint command.
> >> [ 1143.334825] xhci_hcd :00:05.0: xHCI host controller not 
> >> responding, assume dead
> >> [ 1143.347364] xhci_hcd :00:05.0: HC died; cleaning up
> >> [ 1143.356407] usb 1-2: USB disconnect, device number 2
> >>
> >> Bisection turned up as the culprit: 
> >>commit 5500d265a2a8fa63d60c08beb549de8ec82ff7a5
> >>x86/smp: use APIC ALLBUT destination shorthand when possible
> >
> > Sorry to hear that, let see if we can figure out what's wrong.
> 
>  No problem, that is why I test stuff :)
> 
> >> I verified by reverting that commit and now it works fine again.
> >
> > Does the same controller work fine when used in dom0?
> 
>  Will test that, but as all other pci devices in dom0 work fine,
>  I assume this controller would also work fine in dom0 (as it has also
>  worked fine for ages with PCI-passthrough to that guest and still works
>  fine when reverting the referenced commit).
> >>>
> >>> Is this the only device that fails to work when doing pci-passthrough,
> >>> or other devices also don't work with the mentioned change applied?
> >>>
> >>> Have you tested on other boxes?
> >>>
>  I don't know if your change can somehow have a side effect
>  on latency around the processing of pci-passthrough ?
> >>>
> >>> Hm, the mentioned commit should speed up broadcast IPIs, but I don't
> >>> see how it could slow down other interrupts. Also I would think the
> >>> domain is not receiving interrupts from the device, rather than
> >>> interrupts being slow.
> >>>
> >>> Can you also paste the output of lspci -v for that xHCI device from
> >>> dom0?
> >>>
> >>> Thanks, Roger.
> >>
> >> Will do this evening including the testing in dom0 etc.
> >> Will also see if there is any pattern when observing /proc/interrupts in
> >> the guest.
> > 
> > Thanks! I also have some trivial patch that I would like you to try,
> > just to discard send_IPI_mask clearing the scratch_cpumask under
> > another function feet.
> > 
> > Roger.
> 
> Hi Roger,
> 
> Took a while, but I was able to run some tests now.
> 
> I also forgot a detail in the first report (probably still a bit tired from 
> FOSDEM), 
> namely: the device passedthrough works OK for a while before I get the kernel 
> message.
> 
> I tested the patch and it looks like it makes the issue go away,
> I tested for a day, while without the patch (or revert of the commit) the 
> device
> will give problems within a few hours.

Thanks, I have another patch for you to try, which will likely make
your system crash. Could you give it a try and paste the log output?

Thanks, Roger.
---8<---
commit 909880219efc4fe3c25536454d04f07bfe61e3b1
Author: Roger Pau Monne 
Date:   Tue Feb 11 11:14:48 2020 +0100

x86: add accessors for scratch cpu mask

Current usage of the per-CPU scratch cpumask is dangerous since
there's no way to figure out if the mask is already being used except
for manual code inspection of all the callers and possible call paths.

This is unsafe and not reliable, so introduce a minimal get/put
infrastructure to prevent nested usage of the scratch mask.

Signed-off-by: Roger Pau Monné 

diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index e98e08e9c8..4ee261b632 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2236,10 +2236,11 @@ int io_apic_set_pci_routing (int ioapic, int pin, int 
irq, int edge_level, int a
 entry.vector = vector;
 
 if (cpumask_intersects(desc->arch.cpu_mask, TARGET_CPUS)) {
-cpumask_t *mask = this_cpu(scratch_cpumask);
+cpumask_t *mask = get_scratch_cpumask();
 
 cpumask_and(mask, desc->arch.cpu_mask, TARGET_CPUS);
 SET_DEST(entry, logical, cpu_mask_to_apicid(mask));
+put_scratch_cpumask();
 } else {
 printk(XENLOG_ERR "IRQ%d: no target CPU (%*pb vs %*pb)\n",
irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
@@ -2433,10 +2434,11 @@ int ioapic_guest_write(unsigned long physbase, unsigned 
int reg, u32 val)
 
 if ( cpumask_intersects(desc->arch.cpu_mask, TARGET_CPUS) )
 {
-cpumask_t *mask = this_cpu(scratch_cpumask);
+cpumask_t *mask = 

Re: [Xen-devel] [PATCH v4 1/2] xsm: add Kconfig option for denied string

2020-02-11 Thread Andrew Cooper
On 11/02/2020 13:42, Sergey Dyasli wrote:
> Add Kconfig option to make it possible to configure the string returned
> to non-privileged guests instead of the default "" which could
> propagate to UI / logs after the subsequent patch that hides detailed
> Xen version information from unprivileged guests.
>
> Introduce XENVER_denied_string to allow guests to set up UI / logs
> filtering which dependens on the new CONFIG_XSM_DENIED_STRING.

No.  This is even worse than other suggestions.

It is entirely unacceptable to expect guests to have to modify them to
figure out when they're being lied to.

And it is now possible *without source code modifications* to create a
Xen which reports one string in this hypercall, and has empty strings
elsewhere, which is even more chaotic for guests.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] PV Shim ballooning

2020-02-11 Thread Igor Druzhinin
On 11/02/2020 13:39, Andrew Cooper wrote:
> Ballooning inside PV shim is currently very broken.
> 
> From an instrumented Xen and 32bit PV XTF test:
> 
> (d3) (d3) --- Xen Test Framework ---
> (d3) (d3) Ballooning: PV 32bit (PAE 3 levels)
> (d3) (d3) mr { 0010a940, 1024, 0x7ff0 }
> (d3) (d3) About to decrease
> (d3) (XEN) *** D { 82008020, nr 1020, done 0 }
> (d3) (XEN) d3v0 failed to reserve 267 extents of order 0 for offlining
> (d3) (XEN) *** D { 82007fffe040, nr 1024, done 1020 }
> (d3) (XEN) d3v0 failed to reserve 1024 extents of order 0 for offlining
> (d3) (d3) => got 1024
> 
> This test takes 1024 frames and calls decrease reservation on them,
> before unmapping.  i.e. the decrease reservation should fail.  Shim
> successfully offlines 753 pages (nothing to do with the frames the guest
> selected), and fails to offline 1291, and despite this, returns success.
> 
> First of all, the "failed to reserve" is in pv_shim_offline_memory()
> which is a void function that has a semantically relevant failure case. 
> This obviously isn't ok.
> 
> Second, the way the compat code loops over the translated data is
> incompatible with how args.nr_done is used for the call into
> pv_shim_offline_memory().
> 
> Why is pv_shim_offline_memory() not in decrease_reservation() to begin with?

Could be moved assuming it will just offline the frames that already processed.

> Furthermore, there is a fundamental difference in ballooning behaviour
> between PV and HVM guests, which I don't think we can compensate for. 
> PV guests need to call decrease reservation once to release the frames,
> and unmap the frames (in any order).  HVM guests calling decrease
> reservation automatically make the frame unusable no matter how many
> outstanding references exist.
> 
> Shim can't decrease reservation (HVM with L0 Xen) on any frame who's
> reference count didn't drop to 0 from the PV guests' call, and there is
> nothing presently to check this condition.

It will allocated the pages from allocator - yes, it checks that counter
is dropped to 0.

Igor

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v4 2/2] xsm: hide detailed Xen version from unprivileged guests

2020-02-11 Thread Sergey Dyasli
Hide the following information that can help identify the running Xen
binary version: XENVER_[extraversion|compile_info|changeset]
This makes harder for malicious guests to fingerprint Xen to identify
exploitable systems.

Introduce xsm_filter_denied() to hvmloader to remove "" string
from guest's DMI tables that otherwise would be shown in tools like
dmidecode.

While at it, add explicit cases for XENVER_[commandline|build_id]
for better code readability. Add a default case with an ASSERT to make
sure that every case is explicitly listed as well.

Signed-off-by: Sergey Dyasli 
---
v3 --> v4:
- Updated commit message
- Re-add hvmloader filtering

v2 --> v3:
- Remove hvmloader filtering
- Add ASSERT_UNREACHABLE

v1 --> v2:
- Added xsm_filter_denied() to hvmloader instead of modifying xen_deny()
- Made behaviour the same for both Release and Debug builds
- XENVER_capabilities is no longer hided

---
 tools/firmware/hvmloader/hvmloader.c |  1 +
 tools/firmware/hvmloader/smbios.c|  1 +
 tools/firmware/hvmloader/util.c  | 11 +++
 tools/firmware/hvmloader/util.h  |  2 ++
 xen/include/xsm/dummy.h  | 15 +++
 5 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/tools/firmware/hvmloader/hvmloader.c 
b/tools/firmware/hvmloader/hvmloader.c
index 598a226278..b35899f2fb 100644
--- a/tools/firmware/hvmloader/hvmloader.c
+++ b/tools/firmware/hvmloader/hvmloader.c
@@ -147,6 +147,7 @@ static void init_hypercalls(void)
 /* Print version information. */
 cpuid(base + 1, , , , );
 hypercall_xen_version(XENVER_extraversion, extraversion);
+xsm_filter_denied(extraversion);
 printf("Detected Xen v%u.%u%s\n", eax >> 16, eax & 0x, extraversion);
 }
 
diff --git a/tools/firmware/hvmloader/smbios.c 
b/tools/firmware/hvmloader/smbios.c
index 97a054e9e3..a71bfe8392 100644
--- a/tools/firmware/hvmloader/smbios.c
+++ b/tools/firmware/hvmloader/smbios.c
@@ -275,6 +275,7 @@ hvm_write_smbios_tables(
 xen_minor_version = (uint16_t) xen_version;
 
 hypercall_xen_version(XENVER_extraversion, xen_extra_version);
+xsm_filter_denied(xen_extra_version);
 
 /* build up human-readable Xen version string */
 p = xen_version_str;
diff --git a/tools/firmware/hvmloader/util.c b/tools/firmware/hvmloader/util.c
index 0c3f2d24cd..49b4b321e3 100644
--- a/tools/firmware/hvmloader/util.c
+++ b/tools/firmware/hvmloader/util.c
@@ -28,6 +28,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -995,6 +996,16 @@ void hvmloader_acpi_build_tables(struct acpi_config 
*config,
 hvm_param_set(HVM_PARAM_VM_GENERATION_ID_ADDR, config->vm_gid_addr);
 }
 
+void xsm_filter_denied(char *str)
+{
+xen_denied_string_t deny_str = "";
+
+hypercall_xen_version(XENVER_denied_string, deny_str);
+
+if ( strcmp(str, deny_str) == 0 )
+*str = '\0';
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/firmware/hvmloader/util.h b/tools/firmware/hvmloader/util.h
index 7bca6418d2..e4fd26de9d 100644
--- a/tools/firmware/hvmloader/util.h
+++ b/tools/firmware/hvmloader/util.h
@@ -286,6 +286,8 @@ struct acpi_config;
 void hvmloader_acpi_build_tables(struct acpi_config *config,
  unsigned int physical);
 
+void xsm_filter_denied(char *str);
+
 #endif /* __HVMLOADER_UTIL_H__ */
 
 /*
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 72a101b106..2567ccaa0a 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -751,16 +751,23 @@ static XSM_INLINE int xsm_xen_version (XSM_DEFAULT_ARG 
uint32_t op)
 case XENVER_denied_string:
 /* These sub-ops ignore the permission checks and return data. */
 return 0;
-case XENVER_extraversion:
-case XENVER_compile_info:
+
 case XENVER_capabilities:
-case XENVER_changeset:
 case XENVER_pagesize:
 case XENVER_guest_handle:
 /* These MUST always be accessible to any guest by default. */
 return xsm_default_action(XSM_HOOK, current->domain, NULL);
-default:
+
+case XENVER_extraversion:
+case XENVER_compile_info:
+case XENVER_changeset:
+case XENVER_commandline:
+case XENVER_build_id:
 return xsm_default_action(XSM_PRIV, current->domain, NULL);
+
+default:
+ASSERT_UNREACHABLE();
+return -EPERM;
 }
 }
 
-- 
2.17.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v4 1/2] xsm: add Kconfig option for denied string

2020-02-11 Thread Sergey Dyasli
Add Kconfig option to make it possible to configure the string returned
to non-privileged guests instead of the default "" which could
propagate to UI / logs after the subsequent patch that hides detailed
Xen version information from unprivileged guests.

Introduce XENVER_denied_string to allow guests to set up UI / logs
filtering which dependens on the new CONFIG_XSM_DENIED_STRING.

Signed-off-by: Sergey Dyasli 
---
v3 --> v4:
- Updated kconfig prompt description
- Added XENVER_denied_string
- Added #ifdef to fix build when CONFIG_XSM is not set

v2 --> v3:
- new patch

---
 xen/common/Kconfig   |  8 
 xen/common/kernel.c  | 11 +++
 xen/common/version.c |  4 
 xen/include/public/version.h |  5 +
 xen/include/xsm/dummy.h  |  1 +
 5 files changed, 29 insertions(+)

diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index a6914fcae9..4a1a9398cd 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -228,6 +228,14 @@ choice
bool "SILO" if XSM_SILO
 endchoice
 
+config XSM_DENIED_STRING
+   string "xen_version hypercall denied information replacement string"
+   default ""
+   depends on XSM
+   ---help---
+ A string which substitutes sensitive information returned via
+ xen_version hypercall to non-privileged guests
+
 config LATE_HWDOM
bool "Dedicated hardware domain"
default n
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index 22941cec94..1c22e5d167 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -561,6 +561,17 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 return sz;
 }
+
+case XENVER_denied_string:
+{
+xen_denied_string_t str;
+
+safe_strcpy(str, xen_deny());
+if ( copy_to_guest(arg, str, XEN_DENIED_STRING_LEN) )
+return -EFAULT;
+
+return 0;
+}
 }
 
 return -ENOSYS;
diff --git a/xen/common/version.c b/xen/common/version.c
index 937eb1281c..fbd0ef4668 100644
--- a/xen/common/version.c
+++ b/xen/common/version.c
@@ -67,7 +67,11 @@ const char *xen_banner(void)
 
 const char *xen_deny(void)
 {
+#ifdef CONFIG_XSM_DENIED_STRING
+return CONFIG_XSM_DENIED_STRING;
+#else
 return "";
+#endif
 }
 
 static const void *build_id_p __read_mostly;
diff --git a/xen/include/public/version.h b/xen/include/public/version.h
index 17a81e23cd..f65001d2d9 100644
--- a/xen/include/public/version.h
+++ b/xen/include/public/version.h
@@ -100,6 +100,11 @@ struct xen_build_id {
 };
 typedef struct xen_build_id xen_build_id_t;
 
+/* arg == xen_denied_string_t. */
+#define XENVER_denied_string 11
+typedef char xen_denied_string_t[64];
+#define XEN_DENIED_STRING_LEN (sizeof(xen_denied_string_t))
+
 #endif /* __XEN_PUBLIC_VERSION_H__ */
 
 /*
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b8e185e6fa..72a101b106 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -748,6 +748,7 @@ static XSM_INLINE int xsm_xen_version (XSM_DEFAULT_ARG 
uint32_t op)
 case XENVER_version:
 case XENVER_platform_parameters:
 case XENVER_get_features:
+case XENVER_denied_string:
 /* These sub-ops ignore the permission checks and return data. */
 return 0;
 case XENVER_extraversion:
-- 
2.17.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v4 0/2] xsm: hide detailed Xen version

2020-02-11 Thread Sergey Dyasli
Now a proper 2 patches series.

Sergey Dyasli (2):
  xsm: add Kconfig option for denied string
  xsm: hide detailed Xen version from unprivileged guests

 tools/firmware/hvmloader/hvmloader.c |  1 +
 tools/firmware/hvmloader/smbios.c|  1 +
 tools/firmware/hvmloader/util.c  | 11 +++
 tools/firmware/hvmloader/util.h  |  2 ++
 xen/common/Kconfig   |  8 
 xen/common/kernel.c  | 11 +++
 xen/common/version.c |  4 
 xen/include/public/version.h |  5 +
 xen/include/xsm/dummy.h  | 16 
 9 files changed, 55 insertions(+), 4 deletions(-)

-- 
2.17.1


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] PV Shim ballooning

2020-02-11 Thread Andrew Cooper
Ballooning inside PV shim is currently very broken.

From an instrumented Xen and 32bit PV XTF test:

(d3) (d3) --- Xen Test Framework ---
(d3) (d3) Ballooning: PV 32bit (PAE 3 levels)
(d3) (d3) mr { 0010a940, 1024, 0x7ff0 }
(d3) (d3) About to decrease
(d3) (XEN) *** D { 82008020, nr 1020, done 0 }
(d3) (XEN) d3v0 failed to reserve 267 extents of order 0 for offlining
(d3) (XEN) *** D { 82007fffe040, nr 1024, done 1020 }
(d3) (XEN) d3v0 failed to reserve 1024 extents of order 0 for offlining
(d3) (d3) => got 1024

This test takes 1024 frames and calls decrease reservation on them,
before unmapping.  i.e. the decrease reservation should fail.  Shim
successfully offlines 753 pages (nothing to do with the frames the guest
selected), and fails to offline 1291, and despite this, returns success.

First of all, the "failed to reserve" is in pv_shim_offline_memory()
which is a void function that has a semantically relevant failure case. 
This obviously isn't ok.

Second, the way the compat code loops over the translated data is
incompatible with how args.nr_done is used for the call into
pv_shim_offline_memory().

Why is pv_shim_offline_memory() not in decrease_reservation() to begin with?

Furthermore, there is a fundamental difference in ballooning behaviour
between PV and HVM guests, which I don't think we can compensate for. 
PV guests need to call decrease reservation once to release the frames,
and unmap the frames (in any order).  HVM guests calling decrease
reservation automatically make the frame unusable no matter how many
outstanding references exist.

Shim can't decrease reservation (HVM with L0 Xen) on any frame who's
reference count didn't drop to 0 from the PV guests' call, and there is
nothing presently to check this condition.

Short of a PGC bit and extra shim logic in free_domheap_page(), I can't
see any way to reconcile the behaviour, except to change the semantics
of decrease reservation for PV guests.  In practice, this would be far
more sensible behaviour, but we have no idea if existing PV guests would
manage.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v8 1/5] x86/p2m: Allow p2m_get_page_from_gfn to return shared entries

2020-02-11 Thread Tamas K Lengyel
On Tue, Feb 11, 2020 at 4:04 AM Jan Beulich  wrote:
>
> On 11.02.2020 11:29, Tamas K Lengyel wrote:
> > On Tue, Feb 11, 2020 at 2:17 AM Jan Beulich  wrote:
> >>
> >> On 10.02.2020 20:21, Tamas K Lengyel wrote:
> >>> The owner domain of shared pages is dom_cow, use that for get_page
> >>> otherwise the function fails to return the correct page under some
> >>> situations. The check if dom_cow should be used was only performed in
> >>> a subset of use-cases. Fixing the error and simplifying the existing check
> >>> since we can't have any shared entries with dom_cow being NULL.
> >>>
> >>> Signed-off-by: Tamas K Lengyel 
> >>
> >> I find it quite disappointing that the blank lines requested to be
> >> added ...
> >>
> >>> --- a/xen/arch/x86/mm/p2m.c
> >>> +++ b/xen/arch/x86/mm/p2m.c
> >>> @@ -574,11 +574,12 @@ struct page_info *p2m_get_page_from_gfn(
> >>>  if ( fdom == NULL )
> >>>  page = NULL;
> >>>  }
> >>> -else if ( !get_page(page, p2m->domain) &&
> >>> -  /* Page could be shared */
> >>> -  (!dom_cow || !p2m_is_shared(*t) ||
> >>> -   !get_page(page, dom_cow)) )
> >>> -page = NULL;
> >>> +else
> >>> +{
> >>> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : 
> >>> dom_cow;
> >>> +if ( !get_page(page, d) )
> >>
> >> .. above here and ...
> >>
> >>> @@ -594,8 +595,9 @@ struct page_info *p2m_get_page_from_gfn(
> >>>  mfn = get_gfn_type_access(p2m, gfn_x(gfn), t, a, q, NULL);
> >>>  if ( p2m_is_ram(*t) && mfn_valid(mfn) )
> >>>  {
> >>> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : dom_cow;
> >>>  page = mfn_to_page(mfn);
> >>
> >> ... above here still haven't appeared. No matter that it's easy to
> >> do so while committing, when you send a new version you should
> >> really address such remarks yourself, I think.
> >
> > Noted. I haven't addressed it since it appeared to me that this patch
> > has been ready to go in for like 3 revisions already as-is given the
> > blank-lines were non-blockers.
>
> The patch continues to lack a maintainer ack. Hence it hasn't been
> ready to go in at any point in time.

I meant there has been no comments or anything blocking noted for
three resends now.

Tamas

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [qemu-mainline test] 146844: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146844 qemu-mainline real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146844/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-arm64   6 xen-buildfail REGR. vs. 144861
 build-arm64-xsm   6 xen-buildfail REGR. vs. 144861
 build-armhf   6 xen-buildfail REGR. vs. 144861
 build-amd64-xsm   6 xen-buildfail REGR. vs. 144861
 build-i386-xsm6 xen-buildfail REGR. vs. 144861
 build-amd64   6 xen-buildfail REGR. vs. 144861
 build-i3866 xen-buildfail REGR. vs. 144861

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-qemuu-nested-amd  1 build-check(1)   blocked  n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-arm64-arm64-xl-thunderx  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-i386-xsm  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-arm64-arm64-xl-credit2   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked  n/a
 build-armhf-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemuu-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl   1 build-check(1)   blocked  n/a
 

[Xen-devel] [linux-5.4 test] 146842: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146842 linux-5.4 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146842/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 
146121
 test-amd64-i386-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 146121
 test-amd64-amd64-qemuu-nested-intel 17 debian-hvm-install/l1/l2 fail REGR. vs. 
146121

Tests which are failing intermittently (not blocking):
 test-armhf-armhf-xl-rtds 16 guest-start/debian.repeat fail in 146833 pass in 
146760
 test-amd64-i386-qemut-rhel6hvm-amd 12 guest-start/redhat.repeat fail pass in 
146833
 test-armhf-armhf-xl-rtds 12 guest-startfail pass in 146833

Regressions which are regarded as allowable (not blocking):
 test-amd64-amd64-xl-rtds 18 guest-localmigrate/x10   fail REGR. vs. 146121

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-rtds13 migrate-support-check fail in 146833 never pass
 test-armhf-armhf-xl-rtds 14 saverestore-support-check fail in 146833 never pass
 test-amd64-i386-xl-pvshim12 guest-start  fail   never pass
 test-amd64-amd64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 13 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  13 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  13 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 11 migrate-support-check 
fail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 11 migrate-support-check 
fail never pass
 test-amd64-amd64-qemuu-nested-amd 17 debian-hvm-install/l1/l2  fail never pass
 test-arm64-arm64-xl-thunderx 13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-thunderx 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  14 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-win7-amd64 17 guest-stop fail never pass
 test-amd64-i386-xl-qemut-win7-amd64 17 guest-stop  fail never pass
 test-amd64-amd64-xl-qemut-win7-amd64 17 guest-stop fail never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64 17 guest-stop fail never pass
 test-armhf-armhf-xl-multivcpu 13 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 14 saverestore-support-checkfail  never pass
 test-armhf-armhf-libvirt 13 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  14 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-win7-amd64 17 guest-stop  fail never pass
 test-amd64-i386-xl-qemut-ws16-amd64 17 guest-stop  fail never pass
 test-arm64-arm64-libvirt-xsm 13 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  13 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-seattle  14 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-cubietruck 13 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 14 saverestore-support-checkfail never pass
 test-armhf-armhf-xl-credit1  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit1  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  13 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  14 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-ws16-amd64 17 guest-stop  fail never pass
 test-armhf-armhf-xl-vhd  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  13 saverestore-support-checkfail   never pass
 

Re: [Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Jürgen Groß

On 11.02.20 14:01, Jan Beulich wrote:

On 11.02.2020 13:27, Juergen Gross wrote:

When dumping the run queue information add some more data regarding
current and (if known) previous vcpu for each physical cpu.

With core scheduling activated the printed data will be e.g.:

(XEN) CPUs info:
(XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
(XEN) CPU[01] current=d[IDLE]v1
(XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
(XEN) CPU[03] current=d[IDLE]v3

Signed-off-by: Juergen Gross 
---
V2: add proper locking


"Proper" is ambiguous in the context of dumping functions. In a
number of places we use try-lock, to avoid the dumping hanging
on something else monopolizing the lock. I'd like to suggest to
do so here, too.


All the scheduler related dumping functions are using the "real" locks.
So using trylock in this single case wouldn't help at all. Additionally
using trylock only would make a crash during dumping the data more
probable, so I'm not sure we want that.

Instead of unconditionally using trylock in dumping functions I could
imagine to have a "dumplock" using proper locking by default which can
be toggled to trylock in case it is needed (or maybe automatically by
adding a timeout to the dumplock variant).


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] Xen fails to resume on AMD Fam15h (and Fam17h?) because of CPUID mismatch

2020-02-11 Thread Claudia
February 10, 2020 12:14 PM, "Marek Marczykowski-Górecki" 
 wrote:

> On Mon, Feb 10, 2020 at 11:17:34AM +, Andrew Cooper wrote:
> 
>> On 10/02/2020 08:55, Jan Beulich wrote:
>> On 10.02.2020 00:06, Marek Marczykowski-Górecki wrote:
>> Hi,
>> 
>> Multiple Qubes users have reported issues with resuming from S3 on AMD
>> systems (Ryzen 2500U, Ryzen Pro 3700U, maybe more). The error message
>> is:
>> 
>> (XEN) CPU0: cap[ 1] is 7ed8320b (expected f6d8320b)
>> 
>> If I read it right, this is:
>> - OSXSAVE: 0 -> 1
>> - HYPERVISOR: 1 -> 0
>> 
>> Commenting out the panic on a failed recheck_cpu_features() in power.c
>> makes the system work after resume, reportedly stable. But that doesn't
>> sounds like a good idea generally.
>> 
>> Is this difference a Xen fault (some missing MSR / other register
>> restore on resume)? Or BIOS vendor / AMD, that could be worked around in
>> Xen?
>> The transition of the HYPERVISOR bit is definitely a Xen issue,
>> with Andrew having sent a patch already (iirc).
>> 
>> https://lore.kernel.org/xen-devel/20200127202121.2961-1-andrew.coop...@citrix.com
>> 
>> Code is correct.  Commit message needs rework, including in light of
>> this discovery.  (I may eventually split it into two patches.)
> 
> Claudia, do you want to test with this patch?

I'm getting hunk failed in domctl.c applying to R4.1 default repo (fc31, Xen 
4.13). I'll see if I can fix it but bear with me, I'm new at this.

Marek: Would you by any chance be willing to merge this into a test branch on 
your repo, so the rest of us can pull it directly into qubes-builder? It'll 
take you a fraction of the time it'll take me, plus then zachm and awokd and 
anyone else can pull it also.

Jan Beulich: Yes, based on symptoms this appears to only affect AMD from what 
I've seen so far, although I'm not sure if any Intel users have tried the 
workaround patch. In my case, I experience the exact same symptoms on 4.8, 
4.12, and 4.13, and patching 4.8 resulted in successful resume and confirmed 
the cause of the issue, but I haven't tried patching any other versions. 

Thank you everyone for your attention to this issue so far. This is all over my 
head but I'm happy to provide any additional information I can. I have the 
Ryzen 2500U.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Jan Beulich
On 11.02.2020 13:27, Juergen Gross wrote:
> When dumping the run queue information add some more data regarding
> current and (if known) previous vcpu for each physical cpu.
> 
> With core scheduling activated the printed data will be e.g.:
> 
> (XEN) CPUs info:
> (XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
> (XEN) CPU[01] current=d[IDLE]v1
> (XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
> (XEN) CPU[03] current=d[IDLE]v3
> 
> Signed-off-by: Juergen Gross 
> ---
> V2: add proper locking

"Proper" is ambiguous in the context of dumping functions. In a
number of places we use try-lock, to avoid the dumping hanging
on something else monopolizing the lock. I'd like to suggest to
do so here, too.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] UPDATE [Vote] For Xen Project Code of Conduct (deadline March 31st)

2020-02-11 Thread George Dunlap
FYI, If you have voted in private for this (by replying directly to Lars),
you'll need to re-send your vote to 
(which is currently being redirected to Ian Jackson and myself).

 -George

On Fri, Jan 17, 2020 at 7:13 PM Lars Kurth  wrote:

> I propose to tally the votes after March 31st. You can reply via
> +1: for proposal
> -1: against proposal
> in public or private.
>
> Votes will be tallied by subproject – aka the Hypervisor and XAPI project
> by %
> for the proposal - and then averaged across sub-projects that achieved the
> quorum. The vote needs to achieve a 2/3 majority to pass.
>
> Sub-project needs to achieve the following quorum of votes in favour for
> the
> sub-project’s vote to count
> Hypervisor: 3 + votes
> XAPI: 2 + votes
> Windows PV Drivers: 1 + votes
>
>
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen: remove empty softirq_init()

2020-02-11 Thread Andrew Cooper
On 11/02/2020 12:37, Juergen Gross wrote:
> softirq_init() is empty since Sen 4.1. Remove it together with its call
> sites.
>
> Signed-off-by: Juergen Gross 

Acked-by: Andrew Cooper 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] xen: remove empty softirq_init()

2020-02-11 Thread Juergen Gross
softirq_init() is empty since Sen 4.1. Remove it together with its call
sites.

Signed-off-by: Juergen Gross 
---
 xen/arch/arm/setup.c  | 2 --
 xen/arch/x86/setup.c  | 1 -
 xen/common/softirq.c  | 4 
 xen/include/xen/softirq.h | 1 -
 4 files changed, 8 deletions(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 3c8ae11b73..7968cee47d 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -876,8 +876,6 @@ void __init start_xen(unsigned long boot_phys_offset,
 
 gic_init();
 
-softirq_init();
-
 tasklet_subsys_init();
 
 if ( xsm_dt_init() != 1 )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index e50e1f86b3..3fbaee156d 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1533,7 +1533,6 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 console_init_ring();
 vesa_init();
 
-softirq_init();
 tasklet_subsys_init();
 
 paging_init();
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 2d66193203..b83ad96d6c 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -132,10 +132,6 @@ void raise_softirq(unsigned int nr)
 set_bit(nr, _pending(smp_processor_id()));
 }
 
-void __init softirq_init(void)
-{
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h
index d7273b389b..b4724f5c8b 100644
--- a/xen/include/xen/softirq.h
+++ b/xen/include/xen/softirq.h
@@ -25,7 +25,6 @@ typedef void (*softirq_handler)(void);
 
 void do_softirq(void);
 void open_softirq(int nr, softirq_handler handler);
-void softirq_init(void);
 
 void cpumask_raise_softirq(const cpumask_t *, unsigned int nr);
 void cpu_raise_softirq(unsigned int cpu, unsigned int nr);
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2] xen/sched: add some diagnostic info in the run queue keyhandler

2020-02-11 Thread Juergen Gross
When dumping the run queue information add some more data regarding
current and (if known) previous vcpu for each physical cpu.

With core scheduling activated the printed data will be e.g.:

(XEN) CPUs info:
(XEN) CPU[00] current=d[IDLE]v0, curr=d[IDLE]v0, prev=NULL
(XEN) CPU[01] current=d[IDLE]v1
(XEN) CPU[02] current=d[IDLE]v2, curr=d[IDLE]v2, prev=NULL
(XEN) CPU[03] current=d[IDLE]v3

Signed-off-by: Juergen Gross 
---
V2: add proper locking
---
 xen/common/sched/core.c | 26 --
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 2e43f8029f..6fbc30e678 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -3234,7 +3234,7 @@ void scheduler_free(struct scheduler *sched)
 
 void schedule_dump(struct cpupool *c)
 {
-unsigned int  i;
+unsigned int  i, j;
 struct scheduler *sched;
 cpumask_t*cpus;
 
@@ -3245,7 +3245,7 @@ void schedule_dump(struct cpupool *c)
 if ( c != NULL )
 {
 sched = c->sched;
-cpus = c->cpu_valid;
+cpus = c->res_valid;
 printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
 sched_dump_settings(sched);
 }
@@ -3255,11 +3255,25 @@ void schedule_dump(struct cpupool *c)
 cpus = _free_cpus;
 }
 
-if ( sched->dump_cpu_state != NULL )
+printk("CPUs info:\n");
+for_each_cpu (i, cpus)
 {
-printk("CPUs info:\n");
-for_each_cpu (i, cpus)
-sched_dump_cpu_state(sched, i);
+struct sched_resource *sr = get_sched_res(i);
+unsigned long flags;
+spinlock_t *lock;
+
+lock = pcpu_schedule_lock_irqsave(i, );
+
+printk("CPU[%02d] current=%pv, curr=%pv, prev=%pv\n", i,
+   get_cpu_current(i), sr->curr ? sr->curr->vcpu_list : NULL,
+   sr->prev ? sr->prev->vcpu_list : NULL);
+for_each_cpu (j, sr->cpus)
+if ( i != j )
+printk("CPU[%02d] current=%pv\n", j, get_cpu_current(j));
+
+pcpu_schedule_unlock_irqrestore(lock, flags, i);
+
+sched_dump_cpu_state(sched, i);
 }
 
 rcu_read_unlock(_res_rculock);
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2] xen/sched: remove sched_init_pdata()

2020-02-11 Thread Juergen Gross
sched_init_pdata() is used nowhere, it can be removed. Same applies to
the .init_pdata hook of the per-scheduler interface. The last caller
has been removed with commit f855dd962523b6cb47a92037bdd28b1485141abe
("sched: add minimalistic idle scheduler for free cpus").

With the idle scheduler introduction the switch_sched hook became the
only place where new cpus get added to a normal scheduler, so the
init_pdata functionality is performed inside that hook.

Adjust some comments as well to reflect reality. While at it correct a
typo in a comment next to a modified comment.

Signed-off-by: Juergen Gross 
---
 xen/common/sched/core.c|  9 -
 xen/common/sched/credit.c  | 12 
 xen/common/sched/credit2.c | 21 -
 xen/common/sched/null.c| 10 --
 xen/common/sched/private.h |  8 
 xen/common/sched/rt.c  | 31 ---
 6 files changed, 4 insertions(+), 87 deletions(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index d4e8944e0e..2e43f8029f 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -2775,10 +2775,10 @@ static int cpu_schedule_callback(
  * order. If no per-pCPU memory was allocated, there is no need to
  * provide an implementation of free_pdata. deinit_pdata may, however,
  * be necessary/useful in this case too (e.g., it can undo something done
- * on scheduler wide data structure during init_pdata). Both deinit_pdata
+ * on scheduler wide data structure during switch_sched). Both deinit_pdata
  * and free_pdata are called during CPU_DEAD.
  *
- * If someting goes wrong during bringup, we go to CPU_UP_CANCELLED.
+ * If something goes wrong during bringup, we go to CPU_UP_CANCELLED.
  */
 switch ( action )
 {
@@ -2968,9 +2968,8 @@ int schedule_cpu_add(unsigned int cpu, struct cpupool *c)
  * To setup the cpu for the new scheduler we need:
  *  - a valid instance of per-CPU scheduler specific data, as it is
  *allocated by sched_alloc_pdata(). Note that we do not want to
- *initialize it yet (i.e., we are not calling sched_init_pdata()).
- *That will be done by the target scheduler, in sched_switch_sched(),
- *in proper ordering and with locking.
+ *initialize it yet, as that will be done by the target scheduler,
+ *in sched_switch_sched(), in proper ordering and with locking.
  *  - a valid instance of per-vCPU scheduler specific data, for the idle
  *vCPU of cpu. That is what the target scheduler will use for the
  *sched_priv field of the per-vCPU info of the idle domain.
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 05946eea6e..93d89da278 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -614,17 +614,6 @@ init_pdata(struct csched_private *prv, struct csched_pcpu 
*spc, int cpu)
 spc->nr_runnable = 0;
 }
 
-static void
-csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
-unsigned long flags;
-struct csched_private *prv = CSCHED_PRIV(ops);
-
-spin_lock_irqsave(>lock, flags);
-init_pdata(prv, pdata, cpu);
-spin_unlock_irqrestore(>lock, flags);
-}
-
 /* Change the scheduler of cpu to us (Credit). */
 static spinlock_t *
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
@@ -2273,7 +2262,6 @@ static const struct scheduler sched_credit_def = {
 .alloc_udata= csched_alloc_udata,
 .free_udata = csched_free_udata,
 .alloc_pdata= csched_alloc_pdata,
-.init_pdata = csched_init_pdata,
 .deinit_pdata   = csched_deinit_pdata,
 .free_pdata = csched_free_pdata,
 .switch_sched   = csched_switch_sched,
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index f2752f27e2..7d104f15d0 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -3816,26 +3816,6 @@ init_pdata(struct csched2_private *prv, struct 
csched2_pcpu *spc,
 return spc->runq_id;
 }
 
-static void
-csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
-struct csched2_private *prv = csched2_priv(ops);
-spinlock_t *old_lock;
-unsigned long flags;
-unsigned rqi;
-
-write_lock_irqsave(>lock, flags);
-old_lock = pcpu_schedule_lock(cpu);
-
-rqi = init_pdata(prv, pdata, cpu);
-/* Move the scheduler lock to the new runq lock. */
-get_sched_res(cpu)->schedule_lock = >rqd[rqi].lock;
-
-/* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
-spin_unlock(old_lock);
-write_unlock_irqrestore(>lock, flags);
-}
-
 /* Change the scheduler of cpu to us (Credit2). */
 static spinlock_t *
 csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
@@ -4083,7 +4063,6 @@ static const struct scheduler sched_credit2_def = {
 .alloc_udata= csched2_alloc_udata,
 .free_udata = csched2_free_udata,
 .alloc_pdata= csched2_alloc_pdata,

Re: [Xen-devel] [PATCH] AMD/IOMMU: Clean up the allocation helpers

2020-02-11 Thread Durrant, Paul
> -Original Message-
> From: Xen-devel  On Behalf Of
> Andrew Cooper
> Sent: 11 February 2020 12:27
> To: Xen-devel 
> Cc: Andrew Cooper ; Wei Liu ; Jan
> Beulich ; Roger Pau Monné 
> Subject: [Xen-devel] [PATCH] AMD/IOMMU: Clean up the allocation helpers
> 
> Conform to style, drop unnecessary local variables, and avoid opencoding
> clear_domain_page().
> 
> Signed-off-by: Andrew Cooper 

Reviewed-by: Paul Durrant 
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] AMD/IOMMU: Clean up the allocation helpers

2020-02-11 Thread Andrew Cooper
Conform to style, drop unnecessary local variables, and avoid opencoding
clear_domain_page().

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Wei Liu 
CC: Roger Pau Monné 

Avoiding opencoding clear_domain_page() drops a surprising quantity of code.

  add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-269 (-269)
  Function old new   delta
  amd_iommu_alloc_root.part167 116 -51
  iommu_pde_from_dfn  1061 955-106
  amd_iommu_quarantine_init694 582-112
  Total: Before=3019031, After=3018762, chg -0.01%
---
 xen/drivers/passthrough/amd/iommu.h | 26 ++
 1 file changed, 10 insertions(+), 16 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu.h 
b/xen/drivers/passthrough/amd/iommu.h
index 1abfdc685a..16af40b8cd 100644
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -341,34 +341,28 @@ static inline unsigned long region_to_pages(unsigned long 
addr, unsigned long si
 return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static inline struct page_info* alloc_amd_iommu_pgtable(void)
+static inline struct page_info *alloc_amd_iommu_pgtable(void)
 {
-struct page_info *pg;
-void *vaddr;
-
-pg = alloc_domheap_page(NULL, 0);
-if ( pg == NULL )
-return 0;
-vaddr = __map_domain_page(pg);
-memset(vaddr, 0, PAGE_SIZE);
-unmap_domain_page(vaddr);
+struct page_info *pg = alloc_domheap_page(NULL, 0);
+
+if ( pg )
+clear_domain_page(page_to_mfn(pg));
+
 return pg;
 }
 
 static inline void free_amd_iommu_pgtable(struct page_info *pg)
 {
-if ( pg != 0 )
+if ( pg )
 free_domheap_page(pg);
 }
 
-static inline void* __alloc_amd_iommu_tables(int order)
+static inline void *__alloc_amd_iommu_tables(unsigned int order)
 {
-void *buf;
-buf = alloc_xenheap_pages(order, 0);
-return buf;
+return alloc_xenheap_pages(order, 0);
 }
 
-static inline void __free_amd_iommu_tables(void *table, int order)
+static inline void __free_amd_iommu_tables(void *table, unsigned int order)
 {
 free_xenheap_pages(table, order);
 }
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [XEN PATCH 0/3] Default to python3

2020-02-11 Thread Wei Liu
On Tue, Feb 11, 2020 at 11:19:02AM +, Anthony PERARD wrote:
> On Mon, Jan 27, 2020 at 02:40:40PM +, Wei Liu wrote:
> > On Mon, Jan 27, 2020 at 12:36:23PM +, Anthony PERARD wrote:
> > > On Mon, Jan 27, 2020 at 12:30:21PM +, Wei Liu wrote:
> > > > On Mon, Jan 20, 2020 at 11:52:17AM +, Anthony PERARD wrote:
> > > > > On Mon, Jan 20, 2020 at 11:50:50AM +, Anthony PERARD wrote:
> > > > > > Patch series available in this git branch:
> > > > > > https://xenbits.xen.org/git-http/people/aperard/xen-unstable.git 
> > > > > > br.python3-default-v1
> > > > > > 
> > > > > > Hi,
> > > > > > 
> > > > > > I think it's time for Xen to build with python3 by default.
> > > > > > 
> > > > > > The main reason for that is that QEMU upstream don't build with 
> > > > > > python 2.x
> > > > > > anymore, and the python binary selected by Xen build system is the 
> > > > > > one used
> > > > > > when building qemu-xen. So now osstest failed to build QEMU 
> > > > > > upstream.
> > > > > > 
> > > > > > Also, python2 is EOL.
> > > > > > 
> > > > > > FYI, the hypervisor build system already select python3 by default, 
> > > > > > this change
> > > > > > the tools side.
> > > > > 
> > > > > I forgot to say that there's a osstest patch as well:
> > > > > [OSSTEST PATCH] ts-xen-build-prep: Install python3-dev
> > > > 
> > > > AIUI I don't need to wait for that patch to be applied before applying
> > > > this series. Let me know if I'm wrong.
> > > 
> > > It just going to prevent a push :-). All build of staging will fail. So,
> > > the osstest patch is needed before applying the patch 3/3.
> > 
> > Ack. I will push the first two patches first.
> 
> osstest should be ready, could you push that last patch?

Pushed.

Wei.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [XEN PATCH 0/3] Default to python3

2020-02-11 Thread Anthony PERARD
On Mon, Jan 27, 2020 at 02:40:40PM +, Wei Liu wrote:
> On Mon, Jan 27, 2020 at 12:36:23PM +, Anthony PERARD wrote:
> > On Mon, Jan 27, 2020 at 12:30:21PM +, Wei Liu wrote:
> > > On Mon, Jan 20, 2020 at 11:52:17AM +, Anthony PERARD wrote:
> > > > On Mon, Jan 20, 2020 at 11:50:50AM +, Anthony PERARD wrote:
> > > > > Patch series available in this git branch:
> > > > > https://xenbits.xen.org/git-http/people/aperard/xen-unstable.git 
> > > > > br.python3-default-v1
> > > > > 
> > > > > Hi,
> > > > > 
> > > > > I think it's time for Xen to build with python3 by default.
> > > > > 
> > > > > The main reason for that is that QEMU upstream don't build with 
> > > > > python 2.x
> > > > > anymore, and the python binary selected by Xen build system is the 
> > > > > one used
> > > > > when building qemu-xen. So now osstest failed to build QEMU upstream.
> > > > > 
> > > > > Also, python2 is EOL.
> > > > > 
> > > > > FYI, the hypervisor build system already select python3 by default, 
> > > > > this change
> > > > > the tools side.
> > > > 
> > > > I forgot to say that there's a osstest patch as well:
> > > > [OSSTEST PATCH] ts-xen-build-prep: Install python3-dev
> > > 
> > > AIUI I don't need to wait for that patch to be applied before applying
> > > this series. Let me know if I'm wrong.
> > 
> > It just going to prevent a push :-). All build of staging will fail. So,
> > the osstest patch is needed before applying the patch 3/3.
> 
> Ack. I will push the first two patches first.

osstest should be ready, could you push that last patch?

Thanks,

-- 
Anthony PERARD

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v8 1/5] x86/p2m: Allow p2m_get_page_from_gfn to return shared entries

2020-02-11 Thread Jan Beulich
On 11.02.2020 11:29, Tamas K Lengyel wrote:
> On Tue, Feb 11, 2020 at 2:17 AM Jan Beulich  wrote:
>>
>> On 10.02.2020 20:21, Tamas K Lengyel wrote:
>>> The owner domain of shared pages is dom_cow, use that for get_page
>>> otherwise the function fails to return the correct page under some
>>> situations. The check if dom_cow should be used was only performed in
>>> a subset of use-cases. Fixing the error and simplifying the existing check
>>> since we can't have any shared entries with dom_cow being NULL.
>>>
>>> Signed-off-by: Tamas K Lengyel 
>>
>> I find it quite disappointing that the blank lines requested to be
>> added ...
>>
>>> --- a/xen/arch/x86/mm/p2m.c
>>> +++ b/xen/arch/x86/mm/p2m.c
>>> @@ -574,11 +574,12 @@ struct page_info *p2m_get_page_from_gfn(
>>>  if ( fdom == NULL )
>>>  page = NULL;
>>>  }
>>> -else if ( !get_page(page, p2m->domain) &&
>>> -  /* Page could be shared */
>>> -  (!dom_cow || !p2m_is_shared(*t) ||
>>> -   !get_page(page, dom_cow)) )
>>> -page = NULL;
>>> +else
>>> +{
>>> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : 
>>> dom_cow;
>>> +if ( !get_page(page, d) )
>>
>> .. above here and ...
>>
>>> @@ -594,8 +595,9 @@ struct page_info *p2m_get_page_from_gfn(
>>>  mfn = get_gfn_type_access(p2m, gfn_x(gfn), t, a, q, NULL);
>>>  if ( p2m_is_ram(*t) && mfn_valid(mfn) )
>>>  {
>>> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : dom_cow;
>>>  page = mfn_to_page(mfn);
>>
>> ... above here still haven't appeared. No matter that it's easy to
>> do so while committing, when you send a new version you should
>> really address such remarks yourself, I think.
> 
> Noted. I haven't addressed it since it appeared to me that this patch
> has been ready to go in for like 3 revisions already as-is given the
> blank-lines were non-blockers.

The patch continues to lack a maintainer ack. Hence it hasn't been
ready to go in at any point in time.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen/sched: remove sched_init_pdata()

2020-02-11 Thread Jürgen Groß

On 11.02.20 11:37, Dario Faggioli wrote:

On Mon, 2020-02-10 at 16:39 +0100, Juergen Gross wrote:

sched_init_pdata() is used nowhere, it can be removed. Same applies
to
the .init_pdata hook of the per-scheduler interface.


Right, and that appear to be the case since
f855dd962523b6cb47a92037bdd28b1485141abe ("sched: add minimalistic idle
scheduler for free cpus"), which removed all call sites.

And that is because switching to a scheduler always happens via
switch_sched from the idle scheduler, and it's there that we do all the
initializations, right?

This change is obviously doing the right thing, removing code that is
never called! :-)

Can we, though:
- add a mention to the commit above and a quick explanation of things
   in the changelog?


Okay.


- update the following comments too:
   1) in cpu_schedule_callback()

"* This happens by calling the deinit_pdata and free_pdata hooks, in this
  * order. If no per-pCPU memory was allocated, there is no need to
  * provide an implementation of free_pdata. deinit_pdata may, however,
  * be necessary/useful in this case too (e.g., it can undo something done
  * on scheduler wide data structure during init_pdata). Both deinit_pdata
  * and free_pdata are called during CPU_DEAD."

   2) schedule_cpu_add()

"*  - a valid instance of per-CPU scheduler specific data, as it is
  *allocated by sched_alloc_pdata(). Note that we do not want to
  *initialize it yet (i.e., we are not calling sched_init_pdata()).
  *That will be done by the target scheduler, in sched_switch_sched(),
  *in proper ordering and with locking."


Oh, I missed those. Will modify the comments.


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen/sched: remove sched_init_pdata()

2020-02-11 Thread Dario Faggioli
On Mon, 2020-02-10 at 16:39 +0100, Juergen Gross wrote:
> sched_init_pdata() is used nowhere, it can be removed. Same applies
> to
> the .init_pdata hook of the per-scheduler interface.
>
Right, and that appear to be the case since
f855dd962523b6cb47a92037bdd28b1485141abe ("sched: add minimalistic idle
scheduler for free cpus"), which removed all call sites.

And that is because switching to a scheduler always happens via
switch_sched from the idle scheduler, and it's there that we do all the
initializations, right?

This change is obviously doing the right thing, removing code that is
never called! :-)

Can we, though:
- add a mention to the commit above and a quick explanation of things 
  in the changelog?
- update the following comments too:
  1) in cpu_schedule_callback()

"* This happens by calling the deinit_pdata and free_pdata hooks, in this   
   
 * order. If no per-pCPU memory was allocated, there is no need to  
   
 * provide an implementation of free_pdata. deinit_pdata may, however,  
   
 * be necessary/useful in this case too (e.g., it can undo something done   
   
 * on scheduler wide data structure during init_pdata). Both deinit_pdata   
   
 * and free_pdata are called during CPU_DEAD."

  2) schedule_cpu_add()

"*  - a valid instance of per-CPU scheduler specific data, as it is
 *allocated by sched_alloc_pdata(). Note that we do not want to
 *initialize it yet (i.e., we are not calling sched_init_pdata()).
 *That will be done by the target scheduler, in sched_switch_sched(),
 *in proper ordering and with locking."

Regards
-- 
Dario Faggioli, Ph.D
http://about.me/dario.faggioli
Virtualization Software Engineer
SUSE Labs, SUSE https://www.suse.com/
---
<> (Raistlin Majere)



signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v4 7/7] x86/tlb: use Xen L0 assisted TLB flush when available

2020-02-11 Thread Wei Liu
On Mon, Feb 10, 2020 at 06:28:29PM +0100, Roger Pau Monne wrote:
[...]
>  
>  struct hypervisor_ops {
> @@ -32,6 +34,8 @@ struct hypervisor_ops {
>  void (*resume)(void);
>  /* Fix up e820 map */
>  void (*e820_fixup)(struct e820map *e820);
> +/* L0 assisted TLB flush */
> +int (*flush_tlb)(const cpumask_t *mask, const void *va, unsigned int 
> order);
>  };
>  
>  #ifdef CONFIG_GUEST
> @@ -41,6 +45,14 @@ void hypervisor_setup(void);
>  int hypervisor_ap_setup(void);
>  void hypervisor_resume(void);
>  void hypervisor_e820_fixup(struct e820map *e820);
> +/*
> + * L0 assisted TLB flush.
> + * mask: cpumask of the dirty vCPUs that should be flushed.
> + * va: linear address to flush, or NULL for global flushes.

I was in the middle of writing my patch and noticed this.

I think NULL means "flushing the entire address space" here?

Wei.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v8 1/5] x86/p2m: Allow p2m_get_page_from_gfn to return shared entries

2020-02-11 Thread Tamas K Lengyel
On Tue, Feb 11, 2020 at 2:17 AM Jan Beulich  wrote:
>
> On 10.02.2020 20:21, Tamas K Lengyel wrote:
> > The owner domain of shared pages is dom_cow, use that for get_page
> > otherwise the function fails to return the correct page under some
> > situations. The check if dom_cow should be used was only performed in
> > a subset of use-cases. Fixing the error and simplifying the existing check
> > since we can't have any shared entries with dom_cow being NULL.
> >
> > Signed-off-by: Tamas K Lengyel 
>
> I find it quite disappointing that the blank lines requested to be
> added ...
>
> > --- a/xen/arch/x86/mm/p2m.c
> > +++ b/xen/arch/x86/mm/p2m.c
> > @@ -574,11 +574,12 @@ struct page_info *p2m_get_page_from_gfn(
> >  if ( fdom == NULL )
> >  page = NULL;
> >  }
> > -else if ( !get_page(page, p2m->domain) &&
> > -  /* Page could be shared */
> > -  (!dom_cow || !p2m_is_shared(*t) ||
> > -   !get_page(page, dom_cow)) )
> > -page = NULL;
> > +else
> > +{
> > +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : 
> > dom_cow;
> > +if ( !get_page(page, d) )
>
> .. above here and ...
>
> > @@ -594,8 +595,9 @@ struct page_info *p2m_get_page_from_gfn(
> >  mfn = get_gfn_type_access(p2m, gfn_x(gfn), t, a, q, NULL);
> >  if ( p2m_is_ram(*t) && mfn_valid(mfn) )
> >  {
> > +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : dom_cow;
> >  page = mfn_to_page(mfn);
>
> ... above here still haven't appeared. No matter that it's easy to
> do so while committing, when you send a new version you should
> really address such remarks yourself, I think.

Noted. I haven't addressed it since it appeared to me that this patch
has been ready to go in for like 3 revisions already as-is given the
blank-lines were non-blockers. By the time I get around rolling a new
one I simply forget nuisance style issues like this. I know we have
been having the discussion about having automated style-checks and
style-formatting added to Xen, this just further highlights to me the
need for it as we are wasting time and energy on stuff like this for
no real reason.

Tamas

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [xen-unstable test] 146839: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146839 xen-unstable real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146839/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-amd64   6 xen-buildfail REGR. vs. 146787

Tests which did not succeed, but are not blocking:
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-amd64-amd64-i386-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvshim1 build-check(1)   blocked  n/a
 test-xtf-amd64-amd64-41 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1)  blocked n/a
 test-xtf-amd64-amd64-31 build-check(1)   blocked  n/a
 test-amd64-amd64-livepatch1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-pvshim 1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pair 1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-win7-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-qemut-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-amd64-xl-qemuu-dmrestrict-amd64-dmrestrict 1 build-check(1) blocked 
n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemut-ws16-amd64  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-shadow  1 build-check(1) blocked n/a
 test-amd64-i386-migrupgrade   1 build-check(1)   blocked  n/a
 test-amd64-i386-livepatch 1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-freebsd10-i386  1 build-check(1)   blocked  n/a
 test-amd64-i386-examine   1 build-check(1)   blocked  n/a
 test-amd64-amd64-pygrub   1 build-check(1)   blocked  n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-debianhvm-amd64  1 build-check(1)blocked n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-amd64-pvgrub  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64  1 build-check(1) blocked n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a
 test-xtf-amd64-amd64-21 build-check(1)   blocked  n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ovmf-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-qemuu-nested-intel  1 build-check(1)  blocked n/a
 test-amd64-i386-xl-qemut-win7-amd64  1 build-check(1)  blocked n/a
 test-amd64-amd64-xl-shadow1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qcow2 1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-i386-pair  1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-amd  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-shadow 1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemuu-ws16-amd64  1 build-check(1) blocked n/a
 test-amd64-amd64-xl-credit1   1 build-check(1)   blocked  n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1) blocked n/a
 test-xtf-amd64-amd64-51 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-pvhv2-intel  1 build-check(1)   blocked  n/a
 test-xtf-amd64-amd64-11 build-check(1)   blocked  n/a
 test-amd64-i386-qemut-rhel6hvm-amd  1 build-check(1)   blocked n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-examine  1 build-check(1)   blocked  n/a
 test-amd64-i386-xl1 build-check(1)   blocked  n/a
 test-amd64-amd64-migrupgrade  1 build-check(1)   blocked  n/a
 test-amd64-amd64-qemuu-nested-amd  1 

Re: [Xen-devel] [PATCH v4 6/7] xen/guest: prepare hypervisor ops to use alternative calls

2020-02-11 Thread Durrant, Paul
> -Original Message-
> From: Roger Pau Monne 
> Sent: 10 February 2020 18:28
> To: xen-devel@lists.xenproject.org
> Cc: Roger Pau Monne ; Durrant, Paul
> ; Wei Liu ; Jan Beulich
> ; Andrew Cooper 
> Subject: [PATCH v4 6/7] xen/guest: prepare hypervisor ops to use
> alternative calls
> 
> Adapt the hypervisor ops framework so it can be used with the
> alternative calls framework. So far no hooks are modified to make use
> of the alternatives patching, as they are not in any hot path.
> 
> No functional change intended.
> 
> Signed-off-by: Roger Pau Monné 

Reviewed-by: Paul Durrant 

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen/sched: remove pointless ASSERT() in credit2

2020-02-11 Thread Dario Faggioli
On Tue, 2020-02-11 at 10:36 +0100, Jürgen Groß wrote:
> On 11.02.20 10:29, Dario Faggioli wrote:
> > 
> > TBH, though, considering the nature of the check, I'd rather keep
> > the
> > ASSERT() and kill the BUG_ON().
> > 
> > I can do the patch myself if you don't want to respin it that way.
> 
> I'll respin.
>
Thanks!

On an not so related note... I have that other patch of yours, the one
about Credit2 runqueues on my list.

Sorry it's taking a while to review it properly. I'll try to get to it
ASAP.

Regards
-- 
Dario Faggioli, Ph.D
http://about.me/dario.faggioli
Virtualization Software Engineer
SUSE Labs, SUSE https://www.suse.com/
---
<> (Raistlin Majere)



signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2] xen/sched: remove pointless BUG_ON() in credit2

2020-02-11 Thread Dario Faggioli
On Tue, 2020-02-11 at 10:44 +0100, Juergen Gross wrote:
> The BUG_ON() at the top of csched2_context_saved() is completely
> pointless, as the ASSERT() just following it catches the same problem
> already.
> 
> Signed-off-by: Juergen Gross 
>
Reviewed-by: Dario Faggioli 

Thanks and Regards
-- 
Dario Faggioli, Ph.D
http://about.me/dario.faggioli
Virtualization Software Engineer
SUSE Labs, SUSE https://www.suse.com/
---
<> (Raistlin Majere)



signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH v2] xen/sched: remove pointless BUG_ON() in credit2

2020-02-11 Thread Juergen Gross
The BUG_ON() at the top of csched2_context_saved() is completely
pointless, as the ASSERT() just following it catches the same problem
already.

Signed-off-by: Juergen Gross 
---
 xen/common/sched/credit2.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index b965cd1c7b..7d104f15d0 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -2167,8 +2167,6 @@ csched2_context_saved(const struct scheduler *ops, struct 
sched_unit *unit)
 s_time_t now = NOW();
 LIST_HEAD(were_parked);
 
-BUG_ON( !is_idle_unit(unit) &&
-svc->rqd != c2rqd(ops, sched_unit_master(unit)));
 ASSERT(is_idle_unit(unit) ||
svc->rqd == c2rqd(ops, sched_unit_master(unit)));
 
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] xen/sched: remove pointless ASSERT() in credit2

2020-02-11 Thread Jürgen Groß

On 11.02.20 10:29, Dario Faggioli wrote:

On Mon, 2020-02-10 at 17:45 +0100, Juergen Gross wrote:

The ASSERT() at the top of csched2_context_saved() is completely
pointless, as the BUG_ON() just in front of it catches the same
problem
already.


Yep, I went double checking and this is my fault. :-(

Apparently, in ccf2ead7f52 ("xen: credit2: make the code less
experimental"), for this specific case, I added the assert but forgot
to remove the BUG_ON().

Thanks for noticing and acting on this. :-)

TBH, though, considering the nature of the check, I'd rather keep the
ASSERT() and kill the BUG_ON().

I can do the patch myself if you don't want to respin it that way.


I'll respin.


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] xen: make sure stop_machine_run() is always called in a tasklet

2020-02-11 Thread Juergen Gross
With core scheduling active it is mandatory for stop_machine_run() to
be called in a tasklet only, as otherwise a scheduling deadlock would
occur: stop_machine_run() does a cpu rendezvous by activating a tasklet
on all other cpus. In case stop_machine_run() was not called in an idle
vcpu it would block scheduling the idle vcpu on its siblings with core
scheduling being active, resulting in a hang.

Put a BUG_ON() into stop_machine_run() to test for being called in an
idle vcpu only and adapt the missing call site (ucode loading) to use a
tasklet for calling stop_machine_run().

Signed-off-by: Juergen Gross 
---
 xen/arch/x86/microcode.c  | 54 +--
 xen/common/stop_machine.c |  1 +
 2 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c
index c0fb690f79..8e61769377 100644
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -561,30 +561,18 @@ static int do_microcode_update(void *patch)
 return ret;
 }
 
-int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len)
+struct ucode_buf {
+unsigned int len;
+char buffer[];
+};
+
+static long microcode_update_helper(void *data)
 {
 int ret;
-void *buffer;
+struct ucode_buf *buffer = data;
 unsigned int cpu, updated;
 struct microcode_patch *patch;
 
-if ( len != (uint32_t)len )
-return -E2BIG;
-
-if ( microcode_ops == NULL )
-return -EINVAL;
-
-buffer = xmalloc_bytes(len);
-if ( !buffer )
-return -ENOMEM;
-
-ret = copy_from_guest(buffer, buf, len);
-if ( ret )
-{
-xfree(buffer);
-return -EFAULT;
-}
-
 /* cpu_online_map must not change during update */
 if ( !get_cpu_maps() )
 {
@@ -606,7 +594,7 @@ int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) 
buf, unsigned long len)
 return -EPERM;
 }
 
-patch = parse_blob(buffer, len);
+patch = parse_blob(buffer->buffer, buffer->len);
 xfree(buffer);
 if ( IS_ERR(patch) )
 {
@@ -699,6 +687,32 @@ int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) 
buf, unsigned long len)
 return ret;
 }
 
+int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len)
+{
+int ret;
+struct ucode_buf *buffer;
+
+if ( len != (uint32_t)len )
+return -E2BIG;
+
+if ( microcode_ops == NULL )
+return -EINVAL;
+
+buffer = xmalloc_flex_struct(struct ucode_buf, buffer, len);
+if ( !buffer )
+return -ENOMEM;
+
+ret = copy_from_guest(buffer->buffer, buf, len);
+if ( ret )
+{
+xfree(buffer);
+return -EFAULT;
+}
+buffer->len = len;
+
+return continue_hypercall_on_cpu(0, microcode_update_helper, buffer);
+}
+
 static int __init microcode_init(void)
 {
 /*
diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c
index 33d9602217..fe7f7d4447 100644
--- a/xen/common/stop_machine.c
+++ b/xen/common/stop_machine.c
@@ -74,6 +74,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned 
int cpu)
 int ret;
 
 BUG_ON(!local_irq_is_enabled());
+BUG_ON(!is_idle_vcpu(current));
 
 /* cpu_online_map must not change. */
 if ( !get_cpu_maps() )
-- 
2.16.4


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH] xen: do live patching only from main idle loop

2020-02-11 Thread Juergen Gross
One of the main design goals of core scheduling is to avoid actions
which are not directly related to the domain currently running on a
given cpu or core. Live patching is one of those actions which are
allowed taking place on a cpu only when the idle scheduling unit is
active on that cpu.

Unfortunately live patching tries to force the cpus into the idle loop
just by raising the schedule softirq, which will no longer be
guaranteed to work with core scheduling active. Additionally there are
still some places in the hypervisor calling check_for_livepatch_work()
without being in the idle loop.

It is easy to force a cpu into the main idle loop by scheduling a
tasklet on it. So switch live patching to use tasklets for switching to
idle and raising scheduling events. Additionally the calls of
check_for_livepatch_work() outside the main idle loop can be dropped.

As tasklets are only running on idle vcpus and stop_machine_run()
is activating tasklets on all cpus but the one it has been called on
to rendezvous, it is mandatory for stop_machine_run() to be called on
an idle vcpu, too, as otherwise there is no way for scheduling to
activate the idle vcpu for the tasklet on the sibling of the cpu
stop_machine_run() has been called on.

Signed-off-by: Juergen Gross 
---
 xen/arch/arm/domain.c   |  9 -
 xen/arch/arm/traps.c|  6 --
 xen/arch/x86/domain.c   |  9 -
 xen/arch/x86/hvm/svm/svm.c  |  2 +-
 xen/arch/x86/hvm/vmx/vmcs.c |  2 +-
 xen/arch/x86/pv/domain.c|  2 +-
 xen/arch/x86/setup.c|  2 +-
 xen/common/livepatch.c  | 39 ++-
 8 files changed, 46 insertions(+), 25 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index aa3df3b3ba..6627be2922 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -72,7 +72,11 @@ void idle_loop(void)
 
 /* Are we here for running vcpu context tasklets, or for idling? */
 if ( unlikely(tasklet_work_to_do(cpu)) )
+{
 do_tasklet();
+/* Livepatch work is always kicked off via a tasklet. */
+check_for_livepatch_work();
+}
 /*
  * Test softirqs twice --- first to see if should even try scrubbing
  * and then, after it is done, whether softirqs became pending
@@ -83,11 +87,6 @@ void idle_loop(void)
 do_idle();
 
 do_softirq();
-/*
- * We MUST be last (or before dsb, wfi). Otherwise after we get the
- * softirq we would execute dsb,wfi (and sleep) and not patch.
- */
-check_for_livepatch_work();
 }
 }
 
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 6f9bec22d3..30c4c1830b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -23,7 +23,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -2239,11 +2238,6 @@ static void check_for_pcpu_work(void)
 {
 local_irq_enable();
 do_softirq();
-/*
- * Must be the last one - as the IPI will trigger us to come here
- * and we want to patch the hypervisor with almost no stack.
- */
-check_for_livepatch_work();
 local_irq_disable();
 }
 }
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f53ae5ff86..2bc7c4fb2d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -141,7 +141,11 @@ static void idle_loop(void)
 
 /* Are we here for running vcpu context tasklets, or for idling? */
 if ( unlikely(tasklet_work_to_do(cpu)) )
+{
 do_tasklet();
+/* Livepatch work is always kicked off via a tasklet. */
+check_for_livepatch_work();
+}
 /*
  * Test softirqs twice --- first to see if should even try scrubbing
  * and then, after it is done, whether softirqs became pending
@@ -151,11 +155,6 @@ static void idle_loop(void)
 !softirq_pending(cpu) )
 pm_idle();
 do_softirq();
-/*
- * We MUST be last (or before pm_idle). Otherwise after we get the
- * softirq we would execute pm_idle (and sleep) and not patch.
- */
-check_for_livepatch_work();
 }
 }
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b7f67f9f03..32d8d847f2 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1032,7 +1032,7 @@ static void noreturn svm_do_resume(struct vcpu *v)
 
 hvm_do_resume(v);
 
-reset_stack_and_jump(svm_asm_do_resume);
+reset_stack_and_jump_nolp(svm_asm_do_resume);
 }
 
 void svm_vmenter_helper(const struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 65445afeb0..4c23645454 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1890,7 +1890,7 @@ void vmx_do_resume(struct vcpu *v)
 if ( host_cr4 != read_cr4() )
 __vmwrite(HOST_CR4, 

Re: [Xen-devel] [PATCH] xen/sched: remove pointless ASSERT() in credit2

2020-02-11 Thread Dario Faggioli
On Mon, 2020-02-10 at 17:45 +0100, Juergen Gross wrote:
> The ASSERT() at the top of csched2_context_saved() is completely
> pointless, as the BUG_ON() just in front of it catches the same
> problem
> already.
> 
Yep, I went double checking and this is my fault. :-(

Apparently, in ccf2ead7f52 ("xen: credit2: make the code less
experimental"), for this specific case, I added the assert but forgot
to remove the BUG_ON().

Thanks for noticing and acting on this. :-)

TBH, though, considering the nature of the check, I'd rather keep the
ASSERT() and kill the BUG_ON().

I can do the patch myself if you don't want to respin it that way.

Thanks again and Regards
-- 
Dario Faggioli, Ph.D
http://about.me/dario.faggioli
Virtualization Software Engineer
SUSE Labs, SUSE https://www.suse.com/
---
<> (Raistlin Majere)



signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v2 4/4] AMD/IOMMU: Treat head/tail pointers as byte offsets

2020-02-11 Thread Jan Beulich
On 10.02.2020 18:33, Andrew Cooper wrote:
> The MMIO registers as already byte offsets.  Using them in this form removes
> the need to shift their values for use.
> 
> It is also inefficient to store both entries and alloc_size (which only differ
> by entry_size).  Rename alloc_size to size, and drop entries entirely, which
> simplifies the allocation/deallocation helpers slightly.
> 
> Mark send_iommu_command() and invalidate_iommu_all() as static, as they have
> no external declaration or callers.
> 
> Signed-off-by: Andrew Cooper 

Reviewed-by: Jan Beulich 

> ---
> CC: Jan Beulich 
> CC: Wei Liu 
> CC: Roger Pau Monné 
> 
> v2:
>  * Mask head/tail pointers
>  * Drop unnecessary cast.

Thanks for adjusting these.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] Live-Patch application failure in core-scheduling mode

2020-02-11 Thread Jürgen Groß

On 11.02.20 10:07, Sergey Dyasli wrote:

On 07/02/2020 08:04, Jürgen Groß wrote:

On 06.02.20 15:02, Sergey Dyasli wrote:

On 06/02/2020 11:05, Sergey Dyasli wrote:

On 06/02/2020 09:57, Jürgen Groß wrote:

On 05.02.20 17:03, Sergey Dyasli wrote:

Hello,

I'm currently investigating a Live-Patch application failure in core-
scheduling mode and this is an example of what I usually get:
(it's easily reproducible)

   (XEN) [  342.528305] livepatch: lp: CPU8 - IPIing the other 15 CPUs
   (XEN) [  342.558340] livepatch: lp: Timed out on semaphore in CPU 
quiesce phase 13/15
   (XEN) [  342.558343] bad cpus: 6 9

   (XEN) [  342.559293] CPU:6
   (XEN) [  342.559562] Xen call trace:
   (XEN) [  342.559565][] R 
common/schedule.c#sched_wait_rendezvous_in+0xa4/0x270
   (XEN) [  342.559568][] F 
common/schedule.c#schedule+0x17a/0x260
   (XEN) [  342.559571][] F 
common/softirq.c#__do_softirq+0x5a/0x90
   (XEN) [  342.559574][] F 
arch/x86/domain.c#guest_idle_loop+0x35/0x60

   (XEN) [  342.559761] CPU:9
   (XEN) [  342.560026] Xen call trace:
   (XEN) [  342.560029][] R _spin_lock_irq+0x11/0x40
   (XEN) [  342.560032][] F 
common/schedule.c#sched_wait_rendezvous_in+0xc3/0x270
   (XEN) [  342.560036][] F 
common/schedule.c#schedule+0x17a/0x260
   (XEN) [  342.560039][] F 
common/softirq.c#__do_softirq+0x5a/0x90
   (XEN) [  342.560042][] F 
arch/x86/domain.c#idle_loop+0x55/0xb0

The first HT sibling is waiting for the second in the LP-application
context while the second waits for the first in the scheduler context.

Any suggestions on how to improve this situation are welcome.


Can you test the attached patch, please? It is only tested to boot, so
I did no livepatch tests with it.


Thank you for the patch! It seems to fix the issue in my manual testing.
I'm going to submit automatic LP testing for both thread/core modes.


Andrew suggested to test late ucode loading as well and so I did.
It uses stop_machine() to rendezvous cpus and it failed with a similar
backtrace for a problematic CPU. But in this case the system crashed
since there is no timeout involved:

  (XEN) [  155.025168] Xen call trace:
  (XEN) [  155.040095][] R _spin_unlock_irq+0x22/0x30
  (XEN) [  155.069549][] S 
common/schedule.c#sched_wait_rendezvous_in+0xa2/0x270
  (XEN) [  155.109696][] F 
common/schedule.c#sched_slave+0x198/0x260
  (XEN) [  155.145521][] F 
common/softirq.c#__do_softirq+0x5a/0x90
  (XEN) [  155.180223][] F 
x86_64/entry.S#process_softirqs+0x6/0x20

It looks like your patch provides a workaround for LP case, but other
cases like stop_machine() remain broken since the underlying issue with
the scheduler is still there.


And here is the fix for ucode loading (that was in fact the only case
where stop_machine_run() wasn't already called in a tasklet).

I have done a manual test loading new ucode with core scheduling
active.


The patch seems to fix the issue, thanks!
Do you plan to post the 2 patches to the ML now for proper review?


Yes.


Juergen


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH] x86/pvh: Adjust dom0's starting state

2020-02-11 Thread Jan Beulich
On 10.02.2020 21:09, Wei Liu wrote:
> On Mon, Feb 10, 2020 at 06:39:21PM +, Andrew Cooper wrote:
>> Fixes: b25fb1a04e "xen/pvh: Fix segment selector ABI"
>> Signed-off-by: Andrew Cooper 
> 
> Reviewed-by: Wei Liu 

Acked-by: Jan Beulich 


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v8 1/5] x86/p2m: Allow p2m_get_page_from_gfn to return shared entries

2020-02-11 Thread Jan Beulich
On 10.02.2020 20:21, Tamas K Lengyel wrote:
> The owner domain of shared pages is dom_cow, use that for get_page
> otherwise the function fails to return the correct page under some
> situations. The check if dom_cow should be used was only performed in
> a subset of use-cases. Fixing the error and simplifying the existing check
> since we can't have any shared entries with dom_cow being NULL.
> 
> Signed-off-by: Tamas K Lengyel 

I find it quite disappointing that the blank lines requested to be
added ...

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -574,11 +574,12 @@ struct page_info *p2m_get_page_from_gfn(
>  if ( fdom == NULL )
>  page = NULL;
>  }
> -else if ( !get_page(page, p2m->domain) &&
> -  /* Page could be shared */
> -  (!dom_cow || !p2m_is_shared(*t) ||
> -   !get_page(page, dom_cow)) )
> -page = NULL;
> +else
> +{
> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : 
> dom_cow;
> +if ( !get_page(page, d) )

.. above here and ...

> @@ -594,8 +595,9 @@ struct page_info *p2m_get_page_from_gfn(
>  mfn = get_gfn_type_access(p2m, gfn_x(gfn), t, a, q, NULL);
>  if ( p2m_is_ram(*t) && mfn_valid(mfn) )
>  {
> +struct domain *d = !p2m_is_shared(*t) ? p2m->domain : dom_cow;
>  page = mfn_to_page(mfn);

... above here still haven't appeared. No matter that it's easy to
do so while committing, when you send a new version you should
really address such remarks yourself, I think.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] Live-Patch application failure in core-scheduling mode

2020-02-11 Thread Sergey Dyasli
On 07/02/2020 08:04, Jürgen Groß wrote:
> On 06.02.20 15:02, Sergey Dyasli wrote:
>> On 06/02/2020 11:05, Sergey Dyasli wrote:
>>> On 06/02/2020 09:57, Jürgen Groß wrote:
 On 05.02.20 17:03, Sergey Dyasli wrote:
> Hello,
>
> I'm currently investigating a Live-Patch application failure in core-
> scheduling mode and this is an example of what I usually get:
> (it's easily reproducible)
>
>   (XEN) [  342.528305] livepatch: lp: CPU8 - IPIing the other 15 CPUs
>   (XEN) [  342.558340] livepatch: lp: Timed out on semaphore in CPU 
> quiesce phase 13/15
>   (XEN) [  342.558343] bad cpus: 6 9
>
>   (XEN) [  342.559293] CPU:6
>   (XEN) [  342.559562] Xen call trace:
>   (XEN) [  342.559565][] R 
> common/schedule.c#sched_wait_rendezvous_in+0xa4/0x270
>   (XEN) [  342.559568][] F 
> common/schedule.c#schedule+0x17a/0x260
>   (XEN) [  342.559571][] F 
> common/softirq.c#__do_softirq+0x5a/0x90
>   (XEN) [  342.559574][] F 
> arch/x86/domain.c#guest_idle_loop+0x35/0x60
>
>   (XEN) [  342.559761] CPU:9
>   (XEN) [  342.560026] Xen call trace:
>   (XEN) [  342.560029][] R 
> _spin_lock_irq+0x11/0x40
>   (XEN) [  342.560032][] F 
> common/schedule.c#sched_wait_rendezvous_in+0xc3/0x270
>   (XEN) [  342.560036][] F 
> common/schedule.c#schedule+0x17a/0x260
>   (XEN) [  342.560039][] F 
> common/softirq.c#__do_softirq+0x5a/0x90
>   (XEN) [  342.560042][] F 
> arch/x86/domain.c#idle_loop+0x55/0xb0
>
> The first HT sibling is waiting for the second in the LP-application
> context while the second waits for the first in the scheduler context.
>
> Any suggestions on how to improve this situation are welcome.

 Can you test the attached patch, please? It is only tested to boot, so
 I did no livepatch tests with it.
>>>
>>> Thank you for the patch! It seems to fix the issue in my manual testing.
>>> I'm going to submit automatic LP testing for both thread/core modes.
>>
>> Andrew suggested to test late ucode loading as well and so I did.
>> It uses stop_machine() to rendezvous cpus and it failed with a similar
>> backtrace for a problematic CPU. But in this case the system crashed
>> since there is no timeout involved:
>>
>>  (XEN) [  155.025168] Xen call trace:
>>  (XEN) [  155.040095][] R 
>> _spin_unlock_irq+0x22/0x30
>>  (XEN) [  155.069549][] S 
>> common/schedule.c#sched_wait_rendezvous_in+0xa2/0x270
>>  (XEN) [  155.109696][] F 
>> common/schedule.c#sched_slave+0x198/0x260
>>  (XEN) [  155.145521][] F 
>> common/softirq.c#__do_softirq+0x5a/0x90
>>  (XEN) [  155.180223][] F 
>> x86_64/entry.S#process_softirqs+0x6/0x20
>>
>> It looks like your patch provides a workaround for LP case, but other
>> cases like stop_machine() remain broken since the underlying issue with
>> the scheduler is still there.
>
> And here is the fix for ucode loading (that was in fact the only case
> where stop_machine_run() wasn't already called in a tasklet).
>
> I have done a manual test loading new ucode with core scheduling
> active.

The patch seems to fix the issue, thanks!
Do you plan to post the 2 patches to the ML now for proper review?

--
Sergey

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [ovmf test] 146841: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146841 ovmf real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146841/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-ovmf-amd64 10 debian-hvm-install fail REGR. vs. 
145767
 build-i3866 xen-buildfail REGR. vs. 145767

Tests which did not succeed, but are not blocking:
 build-i386-libvirt1 build-check(1)   blocked  n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)  blocked n/a

version targeted for testing:
 ovmf ccb4c38a505cc830625d9ba10622cd910f03490c
baseline version:
 ovmf 70911f1f4aee0366b6122f2b90d367ec0f066beb

Last test of basis   145767  2020-01-08 00:39:09 Z   34 days
Failing since145774  2020-01-08 02:50:20 Z   34 days  119 attempts
Testing same since   146841  2020-02-11 03:00:51 Z0 days1 attempts


People who touched revisions under test:
  Aaron Li 
  Albecki, Mateusz 
  Amol N Sukerkar 
  Anthony PERARD 
  Antoine Coeur 
  Ard Biesheuvel 
  Ashish Singhal 
  Bob Feng 
  Bret Barkelew 
  Brian R Haug 
  Eric Dong 
  Fan, ZhijuX 
  Guo Dong 
  Hao A Wu 
  Heng Luo 
  Jason Voelz 
  Jeff Brasen 
  Jian J Wang 
  Kinney, Michael D 
  Krzysztof Koch 
  Laszlo Ersek 
  Leif Lindholm 
  Li, Aaron 
  Liming Gao 
  Liu, Zhiguang 
  Mateusz Albecki 
  Michael D Kinney 
  Michael Kubacki 
  Pavana.K 
  Philippe Mathieu-Daud? 
  Philippe Mathieu-Daude 
  Philippe Mathieu-Daudé 
  Philippe Mathieu-Daudé 
  Pierre Gondois 
  Sean Brogan 
  Siyuan Fu 
  Siyuan, Fu 
  Steven 
  Steven Shi 
  Sudipto Paul 
  Vitaly Cheptsov 
  Vitaly Cheptsov via Groups.Io 
  Wei6 Xu 
  Xu, Wei6 
  Zhichao Gao 
  Zhiguang Liu 
  Zhiju.Fan 

jobs:
 build-amd64-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-i386   fail
 build-amd64-libvirt  pass
 build-i386-libvirt   blocked 
 build-amd64-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-xl-qemuu-ovmf-amd64 fail
 test-amd64-i386-xl-qemuu-ovmf-amd64  blocked 



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Not pushing.

(No revision log; it would be 4653 lines long.)

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [libvirt test] 146843: regressions - FAIL

2020-02-11 Thread osstest service owner
flight 146843 libvirt real [real]
http://logs.test-lab.xenproject.org/osstest/logs/146843/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-i386-libvirt6 libvirt-buildfail REGR. vs. 146182
 build-arm64-libvirt   6 libvirt-buildfail REGR. vs. 146182
 build-armhf-libvirt   6 libvirt-buildfail REGR. vs. 146182
 build-amd64   6 xen-buildfail REGR. vs. 146182
 build-amd64-xsm   6 xen-buildfail REGR. vs. 146182
 build-i386-xsm6 xen-buildfail REGR. vs. 146182

Tests which did not succeed, but are not blocking:
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-qcow2  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 build-amd64-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a

version targeted for testing:
 libvirt  0d0d60ddc5e58359cff5be8dfd6dd27e98da0282
baseline version:
 libvirt  a1cd25b919509be2645dbe6f952d5263e0d4e4e5

Last test of basis   146182  2020-01-17 06:00:23 Z   25 days
Failing since146211  2020-01-18 04:18:52 Z   24 days   25 attempts
Testing same since   146843  2020-02-11 04:18:44 Z0 days1 attempts


People who touched revisions under test:
  Andrea Bolognani 
  Boris Fiuczynski 
  Christian Ehrhardt 
  Daniel Henrique Barboza 
  Daniel P. Berrangé 
  Dario Faggioli 
  Erik Skultety 
  Han Han 
  Jim Fehlig 
  Jiri Denemark 
  Jonathon Jongsma 
  Julio Faracco 
  Ján Tomko 
  Laine Stump 
  Marek Marczykowski-Górecki 
  Michal Privoznik 
  Nikolay Shirokovskiy 
  Pavel Hrdina 
  Peter Krempa 
  Richard W.M. Jones 
  Sahid Orentino Ferdjaoui 
  Stefan Berger 
  Stefan Berger 
  Thomas Huth 
  zhenwei pi 

jobs:
 build-amd64-xsm  fail
 build-arm64-xsm  pass
 build-i386-xsm   fail
 build-amd64  fail
 build-arm64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  blocked 
 build-arm64-libvirt  fail
 build-armhf-libvirt  fail
 build-i386-libvirt   fail
 build-amd64-pvopspass
 build-arm64-pvopspass
 build-armhf-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm   blocked 
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsmblocked 
 test-amd64-amd64-libvirt-xsm blocked 
 test-arm64-arm64-libvirt-xsm blocked 
 test-amd64-i386-libvirt-xsm  blocked 
 test-amd64-amd64-libvirt blocked 
 test-arm64-arm64-libvirt blocked 
 test-armhf-armhf-libvirt blocked 
 test-amd64-i386-libvirt  blocked 
 test-amd64-amd64-libvirt-pairblocked 
 test-amd64-i386-libvirt-pair blocked 
 test-arm64-arm64-libvirt-qcow2   blocked 
 test-armhf-armhf-libvirt-raw blocked 
 test-amd64-amd64-libvirt-vhd blocked 



sg-report-flight on