Re: [PATCH 5/9] iommu/vt-d: Per domain pasid table interfaces
Hi Yi, Thank you very much for reviewing my patches. On 05/01/2018 05:22 PM, Liu, Yi L wrote: >> From: Lu Baolu [mailto:baolu...@linux.intel.com] >> Sent: Tuesday, April 17, 2018 11:03 AM >> >> This patch adds the interfaces for per domain pasid table >> management. Currently we allocate one pasid table for all >> devices under the scope of an IOMMU. It's insecure in the >> cases where multiple devices under one single IOMMU unit >> support PASID feature. With per domain pasid table, we can >> achieve finer protection and isolation granularity. >> >> Cc: Ashok Raj>> Cc: Jacob Pan >> Cc: Kevin Tian >> Cc: Liu Yi L >> Suggested-by: Ashok Raj >> Signed-off-by: Lu Baolu >> --- >> drivers/iommu/intel-pasid.c | 75 >> + >> drivers/iommu/intel-pasid.h | 4 +++ >> include/linux/intel-iommu.h | 5 +++ >> 3 files changed, 84 insertions(+) >> >> diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c >> index 0690f39..b8691a6 100644 >> --- a/drivers/iommu/intel-pasid.c >> +++ b/drivers/iommu/intel-pasid.c >> @@ -13,6 +13,7 @@ >> #include >> #include >> #include >> +#include >> #include >> >> #include "intel-pasid.h" >> @@ -58,3 +59,77 @@ void *intel_pasid_lookup_id(int pasid) >> >> return p; >> } >> + >> +/* >> + * Interfaces for per domain pasid table management: >> + */ >> +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, >> +size_t entry_count) >> +{ >> +struct device_domain_info *info; >> +struct dmar_domain *domain; >> +struct page *pages; >> +int order; >> + >> +info = dev->archdata.iommu; >> +if (WARN_ON(!info || !dev_is_pci(dev) || >> +!info->pasid_supported || >> +!info->domain)) >> +return -EINVAL; >> + >> +domain = info->domain; >> + >> +if (entry_count > intel_pasid_max_id) >> +entry_count = intel_pasid_max_id; >> + >> +order = get_order(entry_size * entry_count); >> +pages = alloc_pages_node(domain->nid, GFP_KERNEL | __GFP_ZERO, order); >> +if (!pages) >> +return -ENOMEM; >> + >> +spin_lock(_lock); >> +if (domain->pasid_table) { > Can the check be moved prior to the page allocation? I chose allocation and then assignment with lock to avoid race and possible page allocation failure. > >> +__free_pages(pages, order); >> +} else { >> +domain->pasid_table = page_address(pages); >> +domain->order = order; >> +domain->max_pasid = entry_count; >> +} >> +domain->pasid_users++; >> +spin_unlock(_lock); >> + >> +return 0; >> +} >> + >> +void intel_pasid_free_table(struct device *dev) >> +{ >> +struct dmar_domain *domain; >> + >> +domain = get_valid_domain_for_dev(dev); >> +if (!domain || !dev_is_pci(dev)) >> +return; >> + >> +spin_lock(_lock); >> +if (domain->pasid_table) { >> +domain->pasid_users--; >> +if (!domain->pasid_users) { >> +free_pages((unsigned long)domain->pasid_table, >> + domain->order); >> +domain->pasid_table = NULL; >> +domain->order = 0; >> +domain->max_pasid = 0; >> +} >> +} >> +spin_unlock(_lock); >> +} >> + >> +void *intel_pasid_get_table(struct device *dev) > Will intel_iommu_get_pasid_table() more accurate? This function defines in intel-pasid.c. The name pattern for global functions defined in this function is intel_pasid_xxx_(). > > Regards, > Yi Liu > Best regards, Lu Baolu
Re: [PATCH 5/9] iommu/vt-d: Per domain pasid table interfaces
Hi Yi, Thank you very much for reviewing my patches. On 05/01/2018 05:22 PM, Liu, Yi L wrote: >> From: Lu Baolu [mailto:baolu...@linux.intel.com] >> Sent: Tuesday, April 17, 2018 11:03 AM >> >> This patch adds the interfaces for per domain pasid table >> management. Currently we allocate one pasid table for all >> devices under the scope of an IOMMU. It's insecure in the >> cases where multiple devices under one single IOMMU unit >> support PASID feature. With per domain pasid table, we can >> achieve finer protection and isolation granularity. >> >> Cc: Ashok Raj >> Cc: Jacob Pan >> Cc: Kevin Tian >> Cc: Liu Yi L >> Suggested-by: Ashok Raj >> Signed-off-by: Lu Baolu >> --- >> drivers/iommu/intel-pasid.c | 75 >> + >> drivers/iommu/intel-pasid.h | 4 +++ >> include/linux/intel-iommu.h | 5 +++ >> 3 files changed, 84 insertions(+) >> >> diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c >> index 0690f39..b8691a6 100644 >> --- a/drivers/iommu/intel-pasid.c >> +++ b/drivers/iommu/intel-pasid.c >> @@ -13,6 +13,7 @@ >> #include >> #include >> #include >> +#include >> #include >> >> #include "intel-pasid.h" >> @@ -58,3 +59,77 @@ void *intel_pasid_lookup_id(int pasid) >> >> return p; >> } >> + >> +/* >> + * Interfaces for per domain pasid table management: >> + */ >> +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, >> +size_t entry_count) >> +{ >> +struct device_domain_info *info; >> +struct dmar_domain *domain; >> +struct page *pages; >> +int order; >> + >> +info = dev->archdata.iommu; >> +if (WARN_ON(!info || !dev_is_pci(dev) || >> +!info->pasid_supported || >> +!info->domain)) >> +return -EINVAL; >> + >> +domain = info->domain; >> + >> +if (entry_count > intel_pasid_max_id) >> +entry_count = intel_pasid_max_id; >> + >> +order = get_order(entry_size * entry_count); >> +pages = alloc_pages_node(domain->nid, GFP_KERNEL | __GFP_ZERO, order); >> +if (!pages) >> +return -ENOMEM; >> + >> +spin_lock(_lock); >> +if (domain->pasid_table) { > Can the check be moved prior to the page allocation? I chose allocation and then assignment with lock to avoid race and possible page allocation failure. > >> +__free_pages(pages, order); >> +} else { >> +domain->pasid_table = page_address(pages); >> +domain->order = order; >> +domain->max_pasid = entry_count; >> +} >> +domain->pasid_users++; >> +spin_unlock(_lock); >> + >> +return 0; >> +} >> + >> +void intel_pasid_free_table(struct device *dev) >> +{ >> +struct dmar_domain *domain; >> + >> +domain = get_valid_domain_for_dev(dev); >> +if (!domain || !dev_is_pci(dev)) >> +return; >> + >> +spin_lock(_lock); >> +if (domain->pasid_table) { >> +domain->pasid_users--; >> +if (!domain->pasid_users) { >> +free_pages((unsigned long)domain->pasid_table, >> + domain->order); >> +domain->pasid_table = NULL; >> +domain->order = 0; >> +domain->max_pasid = 0; >> +} >> +} >> +spin_unlock(_lock); >> +} >> + >> +void *intel_pasid_get_table(struct device *dev) > Will intel_iommu_get_pasid_table() more accurate? This function defines in intel-pasid.c. The name pattern for global functions defined in this function is intel_pasid_xxx_(). > > Regards, > Yi Liu > Best regards, Lu Baolu
RE: [PATCH 5/9] iommu/vt-d: Per domain pasid table interfaces
> From: Lu Baolu [mailto:baolu...@linux.intel.com] > Sent: Tuesday, April 17, 2018 11:03 AM > > This patch adds the interfaces for per domain pasid table > management. Currently we allocate one pasid table for all > devices under the scope of an IOMMU. It's insecure in the > cases where multiple devices under one single IOMMU unit > support PASID feature. With per domain pasid table, we can > achieve finer protection and isolation granularity. > > Cc: Ashok Raj> Cc: Jacob Pan > Cc: Kevin Tian > Cc: Liu Yi L > Suggested-by: Ashok Raj > Signed-off-by: Lu Baolu > --- > drivers/iommu/intel-pasid.c | 75 > + > drivers/iommu/intel-pasid.h | 4 +++ > include/linux/intel-iommu.h | 5 +++ > 3 files changed, 84 insertions(+) > > diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c > index 0690f39..b8691a6 100644 > --- a/drivers/iommu/intel-pasid.c > +++ b/drivers/iommu/intel-pasid.c > @@ -13,6 +13,7 @@ > #include > #include > #include > +#include > #include > > #include "intel-pasid.h" > @@ -58,3 +59,77 @@ void *intel_pasid_lookup_id(int pasid) > > return p; > } > + > +/* > + * Interfaces for per domain pasid table management: > + */ > +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, > + size_t entry_count) > +{ > + struct device_domain_info *info; > + struct dmar_domain *domain; > + struct page *pages; > + int order; > + > + info = dev->archdata.iommu; > + if (WARN_ON(!info || !dev_is_pci(dev) || > + !info->pasid_supported || > + !info->domain)) > + return -EINVAL; > + > + domain = info->domain; > + > + if (entry_count > intel_pasid_max_id) > + entry_count = intel_pasid_max_id; > + > + order = get_order(entry_size * entry_count); > + pages = alloc_pages_node(domain->nid, GFP_KERNEL | __GFP_ZERO, order); > + if (!pages) > + return -ENOMEM; > + > + spin_lock(_lock); > + if (domain->pasid_table) { Can the check be moved prior to the page allocation? > + __free_pages(pages, order); > + } else { > + domain->pasid_table = page_address(pages); > + domain->order = order; > + domain->max_pasid = entry_count; > + } > + domain->pasid_users++; > + spin_unlock(_lock); > + > + return 0; > +} > + > +void intel_pasid_free_table(struct device *dev) > +{ > + struct dmar_domain *domain; > + > + domain = get_valid_domain_for_dev(dev); > + if (!domain || !dev_is_pci(dev)) > + return; > + > + spin_lock(_lock); > + if (domain->pasid_table) { > + domain->pasid_users--; > + if (!domain->pasid_users) { > + free_pages((unsigned long)domain->pasid_table, > +domain->order); > + domain->pasid_table = NULL; > + domain->order = 0; > + domain->max_pasid = 0; > + } > + } > + spin_unlock(_lock); > +} > + > +void *intel_pasid_get_table(struct device *dev) Will intel_iommu_get_pasid_table() more accurate? Regards, Yi Liu > +{ > + struct dmar_domain *domain; > + > + domain = get_valid_domain_for_dev(dev); > + if (!domain) > + return NULL; > + > + return domain->pasid_table; > +} > diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h > index 0c36af0..a90c60b 100644 > --- a/drivers/iommu/intel-pasid.h > +++ b/drivers/iommu/intel-pasid.h > @@ -26,5 +26,9 @@ extern u32 intel_pasid_max_id; > int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); > void intel_pasid_free_id(int pasid); > void *intel_pasid_lookup_id(int pasid); > +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, > + size_t entry_count); > +void intel_pasid_free_table(struct device *dev); > +void *intel_pasid_get_table(struct device *dev); > > #endif /* __INTEL_PASID_H */ > diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h > index a4463f0..bee7a3f 100644 > --- a/include/linux/intel-iommu.h > +++ b/include/linux/intel-iommu.h > @@ -424,6 +424,11 @@ struct dmar_domain { >*/ > u64 max_addr; /* maximum mapped address */ > > + void*pasid_table; /* pointer of pasid table */ > + unsigned intpasid_users;/* User number of pasid table */ > + int order; /* the page order of tables */ > + int max_pasid; /* max pasid */ > + > struct iommu_domain domain; /* >* generic domain data structure for >
RE: [PATCH 5/9] iommu/vt-d: Per domain pasid table interfaces
> From: Lu Baolu [mailto:baolu...@linux.intel.com] > Sent: Tuesday, April 17, 2018 11:03 AM > > This patch adds the interfaces for per domain pasid table > management. Currently we allocate one pasid table for all > devices under the scope of an IOMMU. It's insecure in the > cases where multiple devices under one single IOMMU unit > support PASID feature. With per domain pasid table, we can > achieve finer protection and isolation granularity. > > Cc: Ashok Raj > Cc: Jacob Pan > Cc: Kevin Tian > Cc: Liu Yi L > Suggested-by: Ashok Raj > Signed-off-by: Lu Baolu > --- > drivers/iommu/intel-pasid.c | 75 > + > drivers/iommu/intel-pasid.h | 4 +++ > include/linux/intel-iommu.h | 5 +++ > 3 files changed, 84 insertions(+) > > diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c > index 0690f39..b8691a6 100644 > --- a/drivers/iommu/intel-pasid.c > +++ b/drivers/iommu/intel-pasid.c > @@ -13,6 +13,7 @@ > #include > #include > #include > +#include > #include > > #include "intel-pasid.h" > @@ -58,3 +59,77 @@ void *intel_pasid_lookup_id(int pasid) > > return p; > } > + > +/* > + * Interfaces for per domain pasid table management: > + */ > +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, > + size_t entry_count) > +{ > + struct device_domain_info *info; > + struct dmar_domain *domain; > + struct page *pages; > + int order; > + > + info = dev->archdata.iommu; > + if (WARN_ON(!info || !dev_is_pci(dev) || > + !info->pasid_supported || > + !info->domain)) > + return -EINVAL; > + > + domain = info->domain; > + > + if (entry_count > intel_pasid_max_id) > + entry_count = intel_pasid_max_id; > + > + order = get_order(entry_size * entry_count); > + pages = alloc_pages_node(domain->nid, GFP_KERNEL | __GFP_ZERO, order); > + if (!pages) > + return -ENOMEM; > + > + spin_lock(_lock); > + if (domain->pasid_table) { Can the check be moved prior to the page allocation? > + __free_pages(pages, order); > + } else { > + domain->pasid_table = page_address(pages); > + domain->order = order; > + domain->max_pasid = entry_count; > + } > + domain->pasid_users++; > + spin_unlock(_lock); > + > + return 0; > +} > + > +void intel_pasid_free_table(struct device *dev) > +{ > + struct dmar_domain *domain; > + > + domain = get_valid_domain_for_dev(dev); > + if (!domain || !dev_is_pci(dev)) > + return; > + > + spin_lock(_lock); > + if (domain->pasid_table) { > + domain->pasid_users--; > + if (!domain->pasid_users) { > + free_pages((unsigned long)domain->pasid_table, > +domain->order); > + domain->pasid_table = NULL; > + domain->order = 0; > + domain->max_pasid = 0; > + } > + } > + spin_unlock(_lock); > +} > + > +void *intel_pasid_get_table(struct device *dev) Will intel_iommu_get_pasid_table() more accurate? Regards, Yi Liu > +{ > + struct dmar_domain *domain; > + > + domain = get_valid_domain_for_dev(dev); > + if (!domain) > + return NULL; > + > + return domain->pasid_table; > +} > diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h > index 0c36af0..a90c60b 100644 > --- a/drivers/iommu/intel-pasid.h > +++ b/drivers/iommu/intel-pasid.h > @@ -26,5 +26,9 @@ extern u32 intel_pasid_max_id; > int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); > void intel_pasid_free_id(int pasid); > void *intel_pasid_lookup_id(int pasid); > +int intel_pasid_alloc_table(struct device *dev, size_t entry_size, > + size_t entry_count); > +void intel_pasid_free_table(struct device *dev); > +void *intel_pasid_get_table(struct device *dev); > > #endif /* __INTEL_PASID_H */ > diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h > index a4463f0..bee7a3f 100644 > --- a/include/linux/intel-iommu.h > +++ b/include/linux/intel-iommu.h > @@ -424,6 +424,11 @@ struct dmar_domain { >*/ > u64 max_addr; /* maximum mapped address */ > > + void*pasid_table; /* pointer of pasid table */ > + unsigned intpasid_users;/* User number of pasid table */ > + int order; /* the page order of tables */ > + int max_pasid; /* max pasid */ > + > struct iommu_domain domain; /* >* generic domain data structure for >* iommu core > -- > 2.7.4