>>> On 02.06.16 at 13:03, <wei.l...@citrix.com> wrote:
> On Thu, Jun 02, 2016 at 04:38:47AM -0600, Jan Beulich wrote:
>> >>> On 02.06.16 at 12:22, <wei.l...@citrix.com> wrote:
>> > On Thu, Jun 02, 2016 at 07:31:06AM +0000, Xu, Quan wrote:
>> >> On May 27, 2016 10:06 PM, Jan Beulich <jbeul...@suse.com> wrote:
>> >> > >>> On 27.05.16 at 15:34, <wei.l...@citrix.com> wrote:
>> >> > > On Fri, May 27, 2016 at 06:16:30AM -0600, Jan Beulich wrote:
>> >> > >> >>> On 27.05.16 at 12:39, <wei.l...@citrix.com> wrote:
>> >> > >> > Is this a regression? Does it work on previous versions of Xen?
>> >> > >>
>> >> > >> I think this is what was already reported by other Intel people, see
>> >> > >> e.g. Quan's most recent reply:
>> >> > >> http://lists.xenproject.org/archives/html/xen-devel/2016-05/msg01896.
>> >> > >> html It is not clear where the problem is, and not seeing the issue
>> >> > >> myself makes it hard to analyze. In any event this quite likely is a
>> >> > >> regression.
>> >> > >>
>> >> > >
>> >> > > My reading of that email thread and all relevant links (including the
>> >> > > KVM bug report) is that there is a regression vf driver, but not in 
>> >> > > Xen.
>> >> > 
>> >> > Just from reading that I would tend to agree. But the report here is 
>> >> > about
>> >> > Win2K8.
>> >> 
>> >> Do you know which commit is a regression one? I try to find out the 
>> > regression commit.  That may be helpful to find out the root cause.
>> >> 
>> >> Btw, some feedback from QA team, rhel 6.4 VM  doesn't work, but rhel 7.2 
>> >> VM 
> does.
>> > 
>> > Isn't this at least an indication that the guest could be buggy here?
>> > It could also be both the hypervsior and guest have bugs. But we're just
>> > not sure at this point.
>> 
>> Indeed, and (with the many fixes that went in already) I really
>> suspect a combination of both, or some of the involved hypervisor
>> changes having unmasked some guest issue. Regardless, I'm
>> afraid this ought to be treated as a blocker for the release at
>> least until we understand what the issue is. But otoh making it a
>> blocker probably makes sense only if we can expect progress
>> (which we haven't really made for quite long a time).
>> 
> 
> This issue is on my list, but the information gathered so far isn't
> convincing enough to make it a blocker.
> 
> And yes, we need meaningful progress to make it a blocker. To make it
> so, commitment from various parties is needed. Let's start with setting
> out things to look at, who is going to investigate what, and a possible
> timeline for each item.
> 
> Jan, can you come up with a list of what sort of information you need?

Well, I had hoped to avoid that. But now that you ask for it,
providing an initial debugging patch seems better than a
description which may get misunderstood. Attached both a
hypervisor and a qemu patch. Their plus debug key M and i
output is what I'd like to start with.

Jan

> And then maybe Quan and Pengtao can give an estimation on how long it
> takes to gather all necessary information and move on to next stage.
> 
> Wei.
> 
>> Jan
>> 



--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -276,6 +276,7 @@ static int msixtbl_write(struct vcpu *v,
     if ( !entry )
         goto out;
     nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
+printk("%pv: write MSI-X#%u: [%lx]=%0*lx\n", v, nr_entry, address, (int)len * 
2, val);//temp
 
     offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
     if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
@@ -321,7 +322,17 @@ static int msixtbl_write(struct vcpu *v,
 
     ASSERT(msi_desc == desc->msi_desc);
    
+{//temp
+ bool_t h = msi_desc->msi_attrib.host_masked;
+ bool_t g = msi_desc->msi_attrib.guest_masked;
+ bool_t ha = entry->pdev->msix->host_maskall;
+ bool_t ga = entry->pdev->msix->guest_maskall;
     guest_mask_msi_irq(desc, !!(val & PCI_MSIX_VECTOR_BITMASK));
+ printk("%pv: MSI-X#%u %d(%d) / %d(%d) -> %d(%d) / %d(%d)\n",
+        v, nr_entry, h, ha, g, ga,
+        msi_desc->msi_attrib.host_masked, entry->pdev->msix->host_maskall,
+        msi_desc->msi_attrib.guest_masked, entry->pdev->msix->guest_maskall);
+}
 
 unlock:
     spin_unlock_irqrestore(&desc->lock, flags);
@@ -330,6 +341,7 @@ unlock:
 
 out:
     rcu_read_unlock(&msixtbl_rcu_lock);
+printk("%pv: write MSI-X [%lx] -> %d\n", v, address, r);//temp
     return r;
 }
 
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -438,6 +438,10 @@ static bool_t msi_set_mask_bit(struct ir
             if ( likely(control & PCI_MSIX_FLAGS_ENABLE) )
                 break;
 
+if(pdev->info.is_virtfn) {//temp
+ printk("%04x:%02x:%02x.%o#%u: %d/%d >> %d/%d [%p]\n", seg, bus, slot, func, 
entry->msi_attrib.entry_nr,
+        entry->msi_attrib.host_masked, entry->msi_attrib.guest_masked, host, 
guest, __builtin_return_address(0));
+}
             entry->msi_attrib.host_masked = host;
             entry->msi_attrib.guest_masked = guest;
 
@@ -1305,6 +1309,11 @@ int pci_msi_conf_write_intercept(struct
             pdev->msix->guest_maskall = !!(*data & PCI_MSIX_FLAGS_MASKALL);
             if ( pdev->msix->host_maskall )
                 *data |= PCI_MSIX_FLAGS_MASKALL;
+if(pdev->info.is_virtfn) {//temp
+ printk("%04x:%02x:%02x.%o: ctrl -> %04x (d%d:%lx,d%d)\n", seg, bus, slot, 
func,
+        (uint16_t)*data, current->domain->domain_id, 
guest_cpu_user_regs()->eip,
+        pdev->domain ? pdev->domain->domain_id : -1);
+}
 
             return 1;
         }
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -10,6 +10,7 @@ void xen_pt_log(const PCIDevice *d, cons
 
 #define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a)
 
+#define XEN_PT_LOGGING_ENABLED//temp
 #ifdef XEN_PT_LOGGING_ENABLED
 #  define XEN_PT_LOG(d, _f, _a...)  xen_pt_log(d, "%s: " _f, __func__, ##_a)
 #  define XEN_PT_WARN(d, _f, _a...) \
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -1474,6 +1474,8 @@ static int xen_pt_msixctrl_reg_write(Xen
     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 
+XEN_PT_LOG(&s->dev, "MSI-X ctrl %04x (%04x/%04x)\n",//temp
+           *val, *data, XEN_PT_MERGE_VALUE(*val, dev_value, 
throughable_mask));//temp
     /* create value for writing to I/O device register */
     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
 
@@ -2027,7 +2029,7 @@ int xen_pt_config_init(XenPCIPassthrough
                                                   reg_grp_offset,
                                                   &reg_grp_entry->size);
             if (rc < 0) {
-                XEN_PT_LOG(&s->dev, "Failed to initialize %d/%ld, type=0x%x, 
rc:%d\n",
+                XEN_PT_LOG(&s->dev, "Failed to initialize %d/%zd, type=0x%x, 
rc:%d\n",
                            i, ARRAY_SIZE(xen_pt_emu_reg_grps),
                            xen_pt_emu_reg_grps[i].grp_type, rc);
                 xen_pt_config_delete(s);
@@ -2044,7 +2046,7 @@ int xen_pt_config_init(XenPCIPassthrough
                     /* initialize capability register */
                     rc = xen_pt_config_reg_init(s, reg_grp_entry, regs);
                     if (rc < 0) {
-                        XEN_PT_LOG(&s->dev, "Failed to initialize %d/%ld reg 
0x%x in grp_type=0x%x (%d/%ld), rc=%d\n",
+                        XEN_PT_LOG(&s->dev, "Failed to initialize %d/%zd reg 
0x%x in grp_type=0x%x (%d/%zd), rc=%d\n",
                                    j, 
ARRAY_SIZE(xen_pt_emu_reg_grps[i].emu_regs),
                                    regs->offset, 
xen_pt_emu_reg_grps[i].grp_type,
                                    i, ARRAY_SIZE(xen_pt_emu_reg_grps), rc);
--- a/hw/xen/xen_pt_msi.c
+++ b/hw/xen/xen_pt_msi.c
@@ -444,6 +444,7 @@ static void pci_msix_write(void *opaque,
     entry = &msix->msix_entry[entry_nr];
     offset = addr % PCI_MSIX_ENTRY_SIZE;
 
+XEN_PT_LOG(&s->dev, "[%"PRIx64"]=%08"PRIx64" (%u,%x)\n", addr, val, entry_nr, 
offset);//temp
     if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
         if (get_entry_value(entry, offset) == val
             && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to