Re: [PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf

2022-05-05 Thread Vasant Hegde via iommu
Hi Joerg,


On 5/2/2022 4:24 PM, Joerg Roedel wrote:
> Hi Vasant,
> 
> On Fri, Apr 29, 2022 at 08:15:49PM +0530, Vasant Hegde wrote:
>> We still need to parse IVHD to find max devices supported by each PCI segment
>> (same as the way its doing it today). Hence we need all these variables.
> 
> From what I have seen since a few years the IVRS tables enumerate the
> whole PCI segment, up to device ff:1f.7. This results in the maximum
> being allocated for all data structures anyway. Therefore we can
> probably think about skipping the scan to find the largest bdf and just
> assume it is ff:1f.7, saving us all the size-tracking variables?

With PCI segment, I think we will have segments with less than ff:1f:7.
Hence we need these variables.

-Vasant

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf

2022-05-02 Thread Joerg Roedel
Hi Vasant,

On Fri, Apr 29, 2022 at 08:15:49PM +0530, Vasant Hegde wrote:
> We still need to parse IVHD to find max devices supported by each PCI segment
> (same as the way its doing it today). Hence we need all these variables.

>From what I have seen since a few years the IVRS tables enumerate the
whole PCI segment, up to device ff:1f.7. This results in the maximum
being allocated for all data structures anyway. Therefore we can
probably think about skipping the scan to find the largest bdf and just
assume it is ff:1f.7, saving us all the size-tracking variables?

Regards,

Joerg
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf

2022-04-29 Thread Vasant Hegde via iommu
Joerg,


On 4/28/2022 3:40 PM, Joerg Roedel wrote:
> On Mon, Apr 25, 2022 at 05:03:48PM +0530, Vasant Hegde wrote:
>> +/* Largest PCI device id we expect translation requests for */
>> +u16 last_bdf;
> 
> How does the IVRS table look like on these systems? Do they still
> enumerate the whole PCI Bus/Dev/Fn space? If so I am fine with getting
> rid of last_bdf alltogether and just allocate the data structures with
> their maximum size.
> 

We still need to parse IVHD to find max devices supported by each PCI segment
(same as the way its doing it today). Hence we need all these variables.


-Vasant
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf

2022-04-28 Thread Joerg Roedel
On Mon, Apr 25, 2022 at 05:03:48PM +0530, Vasant Hegde wrote:
> + /* Largest PCI device id we expect translation requests for */
> + u16 last_bdf;

How does the IVRS table look like on these systems? Do they still
enumerate the whole PCI Bus/Dev/Fn space? If so I am fine with getting
rid of last_bdf alltogether and just allocate the data structures with
their maximum size.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf

2022-04-25 Thread Vasant Hegde via iommu
Current code uses global "amd_iommu_last_bdf" to track the last bdf
supported by the system. This value is used for various memory
allocation, device data flushing, etc.

Introduce per PCI segment last_bdf which will be used to track last bdf
supported by the given PCI segment and use this value for all per
segment memory allocations. Eventually it will replace global
"amd_iommu_last_bdf".

Co-developed-by: Suravee Suthikulpanit 
Signed-off-by: Suravee Suthikulpanit 
Signed-off-by: Vasant Hegde 
---
 drivers/iommu/amd/amd_iommu_types.h |  3 ++
 drivers/iommu/amd/init.c| 68 ++---
 2 files changed, 45 insertions(+), 26 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu_types.h 
b/drivers/iommu/amd/amd_iommu_types.h
index c4c9c35e2bf7..e39e7db54e69 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -544,6 +544,9 @@ struct amd_iommu_pci_seg {
/* PCI segment number */
u16 id;
 
+   /* Largest PCI device id we expect translation requests for */
+   u16 last_bdf;
+
/*
 * device table virtual address
 *
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index d613e20ea013..71f39551a83a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -550,6 +550,7 @@ static int __init find_last_devid_from_ivhd(struct 
ivhd_header *h)
 {
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
+   int last_devid = -EINVAL;
 
u32 ivhd_size = get_ivhd_header_size(h);
 
@@ -567,6 +568,7 @@ static int __init find_last_devid_from_ivhd(struct 
ivhd_header *h)
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
update_last_devid(0x);
+   return 0x;
break;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
@@ -574,6 +576,8 @@ static int __init find_last_devid_from_ivhd(struct 
ivhd_header *h)
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
update_last_devid(dev->devid);
+   if (dev->devid > last_devid)
+   last_devid = dev->devid;
break;
default:
break;
@@ -583,7 +587,7 @@ static int __init find_last_devid_from_ivhd(struct 
ivhd_header *h)
 
WARN_ON(p != end);
 
-   return 0;
+   return last_devid;
 }
 
 static int __init check_ivrs_checksum(struct acpi_table_header *table)
@@ -607,27 +611,31 @@ static int __init check_ivrs_checksum(struct 
acpi_table_header *table)
  * id which we need to handle. This is the first of three functions which parse
  * the ACPI table. So we check the checksum here.
  */
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 
pci_seg)
 {
u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
+   int last_devid, last_bdf = 0;
 
p += IVRS_HEADER_LENGTH;
 
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
-   if (h->type == amd_iommu_target_ivhd_type) {
-   int ret = find_last_devid_from_ivhd(h);
-
-   if (ret)
-   return ret;
+   if (h->pci_seg == pci_seg &&
+   h->type == amd_iommu_target_ivhd_type) {
+   last_devid = find_last_devid_from_ivhd(h);
+
+   if (last_devid < 0)
+   return -EINVAL;
+   if (last_devid > last_bdf)
+   last_bdf = last_devid;
}
p += h->length;
}
WARN_ON(p != end);
 
-   return 0;
+   return last_bdf;
 }
 
 /
@@ -1551,14 +1559,28 @@ static int __init init_iommu_from_acpi(struct amd_iommu 
*iommu,
 }
 
 /* Allocate PCI segment data structure */
-static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id)
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
 {
struct amd_iommu_pci_seg *pci_seg;
+   int last_bdf;
+
+   /*
+* First parse ACPI tables to find the largest Bus/Dev/Func we need to
+* handle in this PCI segment. Upon this information the shared data
+* structures for the PCI segments in the system will be allocated.
+*/
+   last_bdf = find_last_devid_acpi(ivrs_base, id);
+   if (last_bdf < 0)
+   return NULL;
 
pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
if (pci_seg == NULL)