Re: [PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
Hi Jacob, On 9/10/20 12:58 AM, Jacob Pan wrote: > On Tue, 1 Sep 2020 18:49:38 +0200 > Auger Eric wrote: > >> Hi Jacob, >> >> On 8/22/20 6:35 AM, Jacob Pan wrote: >>> Relations among IOASID users largely follow a publisher-subscriber >>> pattern. E.g. to support guest SVA on Intel Scalable I/O >>> Virtualization (SIOV) enabled platforms, VFIO, IOMMU, device >>> drivers, KVM are all users of IOASIDs. When a state change occurs, >>> VFIO publishes the change event that needs to be processed by other >>> users/subscribers. >>> >>> This patch introduced two types of notifications: global and per >>> ioasid_set. The latter is intended for users who only needs to >>> handle events related to the IOASID of a given set. >>> For more information, refer to the kernel documentation at >>> Documentation/ioasid.rst. >>> >>> Signed-off-by: Liu Yi L >>> Signed-off-by: Wu Hao >>> Signed-off-by: Jacob Pan >>> --- >>> drivers/iommu/ioasid.c | 280 >>> - >>> include/linux/ioasid.h | 70 + 2 files changed, 348 >>> insertions(+), 2 deletions(-) >>> >>> diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c >>> index c0aef38a4fde..6ddc09a7fe74 100644 >>> --- a/drivers/iommu/ioasid.c >>> +++ b/drivers/iommu/ioasid.c >>> @@ -9,8 +9,35 @@ >>> #include >>> #include >>> #include >>> +#include >>> >>> static DEFINE_XARRAY_ALLOC(ioasid_sets); >>> +/* >>> + * An IOASID could have multiple consumers where each consumeer >>> may have >> can have multiple consumers > Sounds good, I used past tense to describe a possibility :) > >>> + * hardware contexts associated with IOASIDs. >>> + * When a status change occurs, such as IOASID is being freed, >>> notifier chains >> s/such as IOASID is being freed/, like on IOASID deallocation, > Better, will do. > >>> + * are used to keep the consumers in sync. >>> + * This is a publisher-subscriber pattern where publisher can >>> change the >>> + * state of each IOASID, e.g. alloc/free, bind IOASID to a device >>> and mm. >>> + * On the other hand, subscribers gets notified for the state >>> change and >>> + * keep local states in sync. >>> + * >>> + * Currently, the notifier is global. A further optimization could >>> be per >>> + * IOASID set notifier chain. >>> + */ >>> +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); >>> + >>> +/* List to hold pending notification block registrations */ >>> +static LIST_HEAD(ioasid_nb_pending_list); >>> +static DEFINE_SPINLOCK(ioasid_nb_lock); >>> +struct ioasid_set_nb { >>> + struct list_headlist; >>> + struct notifier_block *nb; >>> + void*token; >>> + struct ioasid_set *set; >>> + boolactive; >>> +}; >>> + >>> enum ioasid_state { >>> IOASID_STATE_INACTIVE, >>> IOASID_STATE_ACTIVE, >>> @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); >>> ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, >>> ioasid_t max, void *private) >>> { >>> + struct ioasid_nb_args args; >>> struct ioasid_data *data; >>> void *adata; >>> ioasid_t id = INVALID_IOASID; >>> @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, >>> ioasid_t min, ioasid_t max, goto exit_free; >>> } >>> set->nr_ioasids++; >>> - goto done_unlock; >>> + args.id = id; >>> + /* Set private ID is not attached during allocation */ >>> + args.spid = INVALID_IOASID; >>> + args.set = set; >>> + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, &args); >>> >>> + spin_unlock(&ioasid_allocator_lock); >>> + return id; >> spurious change > Good catch. should just goto done_unlock. > >>> exit_free: >>> kfree(data); >>> done_unlock: >>> @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data >>> *data) >>> static void ioasid_free_locked(struct ioasid_set *set, ioasid_t >>> ioasid) { >>> + struct ioasid_nb_args args; >>> struct ioasid_data *data; >>> >>> data = xa_load(&active_allocator->xa, ioasid); >>> @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct >>> ioasid_set *set, ioasid_t ioasid) pr_warn("Cannot free IOASID %u >>> due to set ownership\n", ioasid); return; >>> } >>> + >> spurious new line > got it > >>> data->state = IOASID_STATE_FREE_PENDING; >>> + /* Notify all users that this IOASID is being freed */ >>> + args.id = ioasid; >>> + args.spid = data->spid; >>> + args.pdata = data->private; >>> + args.set = data->set; >>> + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, >>> &args); >>> + /* Notify the ioasid_set for per set users */ >>> + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); >>> >>> if (!refcount_dec_and_test(&data->users)) >>> return; >> Shouldn't we call the notifier only when ref count == 0? > Not in the current scheme. The idea is to notify all users the PASID is > being freed, then each user can drop its reference. When refcount == 0, > the PASID will be returned to the po
Re: [PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
On Tue, 1 Sep 2020 18:49:38 +0200 Auger Eric wrote: > Hi Jacob, > > On 8/22/20 6:35 AM, Jacob Pan wrote: > > Relations among IOASID users largely follow a publisher-subscriber > > pattern. E.g. to support guest SVA on Intel Scalable I/O > > Virtualization (SIOV) enabled platforms, VFIO, IOMMU, device > > drivers, KVM are all users of IOASIDs. When a state change occurs, > > VFIO publishes the change event that needs to be processed by other > > users/subscribers. > > > > This patch introduced two types of notifications: global and per > > ioasid_set. The latter is intended for users who only needs to > > handle events related to the IOASID of a given set. > > For more information, refer to the kernel documentation at > > Documentation/ioasid.rst. > > > > Signed-off-by: Liu Yi L > > Signed-off-by: Wu Hao > > Signed-off-by: Jacob Pan > > --- > > drivers/iommu/ioasid.c | 280 > > - > > include/linux/ioasid.h | 70 + 2 files changed, 348 > > insertions(+), 2 deletions(-) > > > > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c > > index c0aef38a4fde..6ddc09a7fe74 100644 > > --- a/drivers/iommu/ioasid.c > > +++ b/drivers/iommu/ioasid.c > > @@ -9,8 +9,35 @@ > > #include > > #include > > #include > > +#include > > > > static DEFINE_XARRAY_ALLOC(ioasid_sets); > > +/* > > + * An IOASID could have multiple consumers where each consumeer > > may have > can have multiple consumers Sounds good, I used past tense to describe a possibility :) > > + * hardware contexts associated with IOASIDs. > > + * When a status change occurs, such as IOASID is being freed, > > notifier chains > s/such as IOASID is being freed/, like on IOASID deallocation, Better, will do. > > + * are used to keep the consumers in sync. > > + * This is a publisher-subscriber pattern where publisher can > > change the > > + * state of each IOASID, e.g. alloc/free, bind IOASID to a device > > and mm. > > + * On the other hand, subscribers gets notified for the state > > change and > > + * keep local states in sync. > > + * > > + * Currently, the notifier is global. A further optimization could > > be per > > + * IOASID set notifier chain. > > + */ > > +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); > > + > > +/* List to hold pending notification block registrations */ > > +static LIST_HEAD(ioasid_nb_pending_list); > > +static DEFINE_SPINLOCK(ioasid_nb_lock); > > +struct ioasid_set_nb { > > + struct list_headlist; > > + struct notifier_block *nb; > > + void*token; > > + struct ioasid_set *set; > > + boolactive; > > +}; > > + > > enum ioasid_state { > > IOASID_STATE_INACTIVE, > > IOASID_STATE_ACTIVE, > > @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); > > ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, > > ioasid_t max, void *private) > > { > > + struct ioasid_nb_args args; > > struct ioasid_data *data; > > void *adata; > > ioasid_t id = INVALID_IOASID; > > @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, > > ioasid_t min, ioasid_t max, goto exit_free; > > } > > set->nr_ioasids++; > > - goto done_unlock; > > + args.id = id; > > + /* Set private ID is not attached during allocation */ > > + args.spid = INVALID_IOASID; > > + args.set = set; > > + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, &args); > > > > + spin_unlock(&ioasid_allocator_lock); > > + return id; > spurious change Good catch. should just goto done_unlock. > > exit_free: > > kfree(data); > > done_unlock: > > @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data > > *data) > > static void ioasid_free_locked(struct ioasid_set *set, ioasid_t > > ioasid) { > > + struct ioasid_nb_args args; > > struct ioasid_data *data; > > > > data = xa_load(&active_allocator->xa, ioasid); > > @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct > > ioasid_set *set, ioasid_t ioasid) pr_warn("Cannot free IOASID %u > > due to set ownership\n", ioasid); return; > > } > > + > spurious new line got it > > data->state = IOASID_STATE_FREE_PENDING; > > + /* Notify all users that this IOASID is being freed */ > > + args.id = ioasid; > > + args.spid = data->spid; > > + args.pdata = data->private; > > + args.set = data->set; > > + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, > > &args); > > + /* Notify the ioasid_set for per set users */ > > + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); > > > > if (!refcount_dec_and_test(&data->users)) > > return; > Shouldn't we call the notifier only when ref count == 0? Not in the current scheme. The idea is to notify all users the PASID is being freed, then each user can drop its reference. When refcount == 0, the PASID will be returned to the pool. > > @@ -514,6 +558,28 @@ void ioasid_free(struct ioasid_set *set, > > ioasid
Re: [PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
On Tue, 25 Aug 2020 12:26:17 +0200 Jean-Philippe Brucker wrote: > On Fri, Aug 21, 2020 at 09:35:15PM -0700, Jacob Pan wrote: > > Relations among IOASID users largely follow a publisher-subscriber > > pattern. E.g. to support guest SVA on Intel Scalable I/O > > Virtualization (SIOV) enabled platforms, VFIO, IOMMU, device > > drivers, KVM are all users of IOASIDs. When a state change occurs, > > VFIO publishes the change event that needs to be processed by other > > users/subscribers. > > > > This patch introduced two types of notifications: global and per > > ioasid_set. The latter is intended for users who only needs to > > handle events related to the IOASID of a given set. > > For more information, refer to the kernel documentation at > > Documentation/ioasid.rst. > > > > Signed-off-by: Liu Yi L > > Signed-off-by: Wu Hao > > Signed-off-by: Jacob Pan > > --- > > drivers/iommu/ioasid.c | 280 > > - > > include/linux/ioasid.h | 70 + 2 files changed, 348 > > insertions(+), 2 deletions(-) > > > > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c > > index c0aef38a4fde..6ddc09a7fe74 100644 > > --- a/drivers/iommu/ioasid.c > > +++ b/drivers/iommu/ioasid.c > > @@ -9,8 +9,35 @@ > > #include > > #include > > #include > > +#include > > > > static DEFINE_XARRAY_ALLOC(ioasid_sets); > > +/* > > + * An IOASID could have multiple consumers where each consumeer > > may have > > consumer > got it > > + * hardware contexts associated with IOASIDs. > > + * When a status change occurs, such as IOASID is being freed, > > notifier chains > > + * are used to keep the consumers in sync. > > + * This is a publisher-subscriber pattern where publisher can > > change the > > + * state of each IOASID, e.g. alloc/free, bind IOASID to a device > > and mm. > > + * On the other hand, subscribers gets notified for the state > > change and > > + * keep local states in sync. > > + * > > + * Currently, the notifier is global. A further optimization could > > be per > > + * IOASID set notifier chain. > > The patch adds both > right, the comment is old. I will remove the paragraph since it is in the doc. > > + */ > > +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); > > "ioasid_notifier" may be clearer > will do. > > + > > +/* List to hold pending notification block registrations */ > > +static LIST_HEAD(ioasid_nb_pending_list); > > +static DEFINE_SPINLOCK(ioasid_nb_lock); > > +struct ioasid_set_nb { > > + struct list_headlist; > > + struct notifier_block *nb; > > + void*token; > > + struct ioasid_set *set; > > + boolactive; > > +}; > > + > > enum ioasid_state { > > IOASID_STATE_INACTIVE, > > IOASID_STATE_ACTIVE, > > @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); > > ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, > > ioasid_t max, void *private) > > { > > + struct ioasid_nb_args args; > > struct ioasid_data *data; > > void *adata; > > ioasid_t id = INVALID_IOASID; > > @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, > > ioasid_t min, ioasid_t max, goto exit_free; > > } > > set->nr_ioasids++; > > - goto done_unlock; > > + args.id = id; > > + /* Set private ID is not attached during allocation */ > > + args.spid = INVALID_IOASID; > > + args.set = set; > > args.pdata is uninitialized > right, it should be args.pdata = data->private; > > + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, > > &args); > > No global notification? > There hasn't been a need since the only global notifier listener is vt-d driver which cares about FREE event only. > > > > + spin_unlock(&ioasid_allocator_lock); > > + return id; > > exit_free: > > kfree(data); > > done_unlock: > > @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data > > *data) > > static void ioasid_free_locked(struct ioasid_set *set, ioasid_t > > ioasid) { > > + struct ioasid_nb_args args; > > struct ioasid_data *data; > > > > data = xa_load(&active_allocator->xa, ioasid); > > @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct > > ioasid_set *set, ioasid_t ioasid) pr_warn("Cannot free IOASID %u > > due to set ownership\n", ioasid); return; > > } > > + > > data->state = IOASID_STATE_FREE_PENDING; > > + /* Notify all users that this IOASID is being freed */ > > + args.id = ioasid; > > + args.spid = data->spid; > > + args.pdata = data->private; > > + args.set = data->set; > > + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, > > &args); > > + /* Notify the ioasid_set for per set users */ > > + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); > > > > if (!refcount_dec_and_test(&data->users)) > > return; > > @@ -514,6 +558,28 @@ void ioasid_free(struct ioasid_set *set, > > ioasid_t ioasid) } > > EXPORT_SYMBOL_GPL(ioasid_free); > > > > +static
Re: [PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
Hi Jacob, On 8/22/20 6:35 AM, Jacob Pan wrote: > Relations among IOASID users largely follow a publisher-subscriber > pattern. E.g. to support guest SVA on Intel Scalable I/O Virtualization > (SIOV) enabled platforms, VFIO, IOMMU, device drivers, KVM are all users > of IOASIDs. When a state change occurs, VFIO publishes the change event > that needs to be processed by other users/subscribers. > > This patch introduced two types of notifications: global and per > ioasid_set. The latter is intended for users who only needs to handle > events related to the IOASID of a given set. > For more information, refer to the kernel documentation at > Documentation/ioasid.rst. > > Signed-off-by: Liu Yi L > Signed-off-by: Wu Hao > Signed-off-by: Jacob Pan > --- > drivers/iommu/ioasid.c | 280 > - > include/linux/ioasid.h | 70 + > 2 files changed, 348 insertions(+), 2 deletions(-) > > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c > index c0aef38a4fde..6ddc09a7fe74 100644 > --- a/drivers/iommu/ioasid.c > +++ b/drivers/iommu/ioasid.c > @@ -9,8 +9,35 @@ > #include > #include > #include > +#include > > static DEFINE_XARRAY_ALLOC(ioasid_sets); > +/* > + * An IOASID could have multiple consumers where each consumeer may have can have multiple consumers > + * hardware contexts associated with IOASIDs. > + * When a status change occurs, such as IOASID is being freed, notifier > chains s/such as IOASID is being freed/, like on IOASID deallocation, > + * are used to keep the consumers in sync. > + * This is a publisher-subscriber pattern where publisher can change the > + * state of each IOASID, e.g. alloc/free, bind IOASID to a device and mm. > + * On the other hand, subscribers gets notified for the state change and > + * keep local states in sync. > + * > + * Currently, the notifier is global. A further optimization could be per > + * IOASID set notifier chain. > + */ > +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); > + > +/* List to hold pending notification block registrations */ > +static LIST_HEAD(ioasid_nb_pending_list); > +static DEFINE_SPINLOCK(ioasid_nb_lock); > +struct ioasid_set_nb { > + struct list_headlist; > + struct notifier_block *nb; > + void*token; > + struct ioasid_set *set; > + boolactive; > +}; > + > enum ioasid_state { > IOASID_STATE_INACTIVE, > IOASID_STATE_ACTIVE, > @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); > ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, > void *private) > { > + struct ioasid_nb_args args; > struct ioasid_data *data; > void *adata; > ioasid_t id = INVALID_IOASID; > @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t > min, ioasid_t max, > goto exit_free; > } > set->nr_ioasids++; > - goto done_unlock; > + args.id = id; > + /* Set private ID is not attached during allocation */ > + args.spid = INVALID_IOASID; > + args.set = set; > + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, &args); > > + spin_unlock(&ioasid_allocator_lock); > + return id; spurious change > exit_free: > kfree(data); > done_unlock: > @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data *data) > > static void ioasid_free_locked(struct ioasid_set *set, ioasid_t ioasid) > { > + struct ioasid_nb_args args; > struct ioasid_data *data; > > data = xa_load(&active_allocator->xa, ioasid); > @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct ioasid_set *set, > ioasid_t ioasid) > pr_warn("Cannot free IOASID %u due to set ownership\n", ioasid); > return; > } > + spurious new line > data->state = IOASID_STATE_FREE_PENDING; > + /* Notify all users that this IOASID is being freed */ > + args.id = ioasid; > + args.spid = data->spid; > + args.pdata = data->private; > + args.set = data->set; > + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, &args); > + /* Notify the ioasid_set for per set users */ > + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); > > if (!refcount_dec_and_test(&data->users)) > return; Shouldn't we call the notifier only when ref count == 0? > @@ -514,6 +558,28 @@ void ioasid_free(struct ioasid_set *set, ioasid_t ioasid) > } > EXPORT_SYMBOL_GPL(ioasid_free); > > +static void ioasid_add_pending_nb(struct ioasid_set *set) > +{ > + struct ioasid_set_nb *curr; > + > + if (set->type != IOASID_SET_TYPE_MM) > + return; > + > + /* > + * Check if there are any pending nb requests for the given token, if so > + * add them to the notifier chain. > + */ > + spin_lock(&ioasid_nb_lock); > + list_for_each_entry(curr, &ioasid_nb_pending_list, list) { > + if
Re: [PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
On Fri, Aug 21, 2020 at 09:35:15PM -0700, Jacob Pan wrote: > Relations among IOASID users largely follow a publisher-subscriber > pattern. E.g. to support guest SVA on Intel Scalable I/O Virtualization > (SIOV) enabled platforms, VFIO, IOMMU, device drivers, KVM are all users > of IOASIDs. When a state change occurs, VFIO publishes the change event > that needs to be processed by other users/subscribers. > > This patch introduced two types of notifications: global and per > ioasid_set. The latter is intended for users who only needs to handle > events related to the IOASID of a given set. > For more information, refer to the kernel documentation at > Documentation/ioasid.rst. > > Signed-off-by: Liu Yi L > Signed-off-by: Wu Hao > Signed-off-by: Jacob Pan > --- > drivers/iommu/ioasid.c | 280 > - > include/linux/ioasid.h | 70 + > 2 files changed, 348 insertions(+), 2 deletions(-) > > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c > index c0aef38a4fde..6ddc09a7fe74 100644 > --- a/drivers/iommu/ioasid.c > +++ b/drivers/iommu/ioasid.c > @@ -9,8 +9,35 @@ > #include > #include > #include > +#include > > static DEFINE_XARRAY_ALLOC(ioasid_sets); > +/* > + * An IOASID could have multiple consumers where each consumeer may have consumer > + * hardware contexts associated with IOASIDs. > + * When a status change occurs, such as IOASID is being freed, notifier > chains > + * are used to keep the consumers in sync. > + * This is a publisher-subscriber pattern where publisher can change the > + * state of each IOASID, e.g. alloc/free, bind IOASID to a device and mm. > + * On the other hand, subscribers gets notified for the state change and > + * keep local states in sync. > + * > + * Currently, the notifier is global. A further optimization could be per > + * IOASID set notifier chain. The patch adds both > + */ > +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); "ioasid_notifier" may be clearer > + > +/* List to hold pending notification block registrations */ > +static LIST_HEAD(ioasid_nb_pending_list); > +static DEFINE_SPINLOCK(ioasid_nb_lock); > +struct ioasid_set_nb { > + struct list_headlist; > + struct notifier_block *nb; > + void*token; > + struct ioasid_set *set; > + boolactive; > +}; > + > enum ioasid_state { > IOASID_STATE_INACTIVE, > IOASID_STATE_ACTIVE, > @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); > ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, > void *private) > { > + struct ioasid_nb_args args; > struct ioasid_data *data; > void *adata; > ioasid_t id = INVALID_IOASID; > @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t > min, ioasid_t max, > goto exit_free; > } > set->nr_ioasids++; > - goto done_unlock; > + args.id = id; > + /* Set private ID is not attached during allocation */ > + args.spid = INVALID_IOASID; > + args.set = set; args.pdata is uninitialized > + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, &args); No global notification? > > + spin_unlock(&ioasid_allocator_lock); > + return id; > exit_free: > kfree(data); > done_unlock: > @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data *data) > > static void ioasid_free_locked(struct ioasid_set *set, ioasid_t ioasid) > { > + struct ioasid_nb_args args; > struct ioasid_data *data; > > data = xa_load(&active_allocator->xa, ioasid); > @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct ioasid_set *set, > ioasid_t ioasid) > pr_warn("Cannot free IOASID %u due to set ownership\n", ioasid); > return; > } > + > data->state = IOASID_STATE_FREE_PENDING; > + /* Notify all users that this IOASID is being freed */ > + args.id = ioasid; > + args.spid = data->spid; > + args.pdata = data->private; > + args.set = data->set; > + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, &args); > + /* Notify the ioasid_set for per set users */ > + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); > > if (!refcount_dec_and_test(&data->users)) > return; > @@ -514,6 +558,28 @@ void ioasid_free(struct ioasid_set *set, ioasid_t ioasid) > } > EXPORT_SYMBOL_GPL(ioasid_free); > > +static void ioasid_add_pending_nb(struct ioasid_set *set) > +{ > + struct ioasid_set_nb *curr; > + > + if (set->type != IOASID_SET_TYPE_MM) > + return; > + > + /* > + * Check if there are any pending nb requests for the given token, if so > + * add them to the notifier chain. > + */ > + spin_lock(&ioasid_nb_lock); > + list_for_each_entry(curr, &ioasid_nb_pending_list, list) { > + if (curr->token == set->token && !curr->active) {
[PATCH v2 6/9] iommu/ioasid: Introduce notification APIs
Relations among IOASID users largely follow a publisher-subscriber pattern. E.g. to support guest SVA on Intel Scalable I/O Virtualization (SIOV) enabled platforms, VFIO, IOMMU, device drivers, KVM are all users of IOASIDs. When a state change occurs, VFIO publishes the change event that needs to be processed by other users/subscribers. This patch introduced two types of notifications: global and per ioasid_set. The latter is intended for users who only needs to handle events related to the IOASID of a given set. For more information, refer to the kernel documentation at Documentation/ioasid.rst. Signed-off-by: Liu Yi L Signed-off-by: Wu Hao Signed-off-by: Jacob Pan --- drivers/iommu/ioasid.c | 280 - include/linux/ioasid.h | 70 + 2 files changed, 348 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index c0aef38a4fde..6ddc09a7fe74 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -9,8 +9,35 @@ #include #include #include +#include static DEFINE_XARRAY_ALLOC(ioasid_sets); +/* + * An IOASID could have multiple consumers where each consumeer may have + * hardware contexts associated with IOASIDs. + * When a status change occurs, such as IOASID is being freed, notifier chains + * are used to keep the consumers in sync. + * This is a publisher-subscriber pattern where publisher can change the + * state of each IOASID, e.g. alloc/free, bind IOASID to a device and mm. + * On the other hand, subscribers gets notified for the state change and + * keep local states in sync. + * + * Currently, the notifier is global. A further optimization could be per + * IOASID set notifier chain. + */ +static ATOMIC_NOTIFIER_HEAD(ioasid_chain); + +/* List to hold pending notification block registrations */ +static LIST_HEAD(ioasid_nb_pending_list); +static DEFINE_SPINLOCK(ioasid_nb_lock); +struct ioasid_set_nb { + struct list_headlist; + struct notifier_block *nb; + void*token; + struct ioasid_set *set; + boolactive; +}; + enum ioasid_state { IOASID_STATE_INACTIVE, IOASID_STATE_ACTIVE, @@ -394,6 +421,7 @@ EXPORT_SYMBOL_GPL(ioasid_find_by_spid); ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void *private) { + struct ioasid_nb_args args; struct ioasid_data *data; void *adata; ioasid_t id = INVALID_IOASID; @@ -445,8 +473,14 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, goto exit_free; } set->nr_ioasids++; - goto done_unlock; + args.id = id; + /* Set private ID is not attached during allocation */ + args.spid = INVALID_IOASID; + args.set = set; + atomic_notifier_call_chain(&set->nh, IOASID_ALLOC, &args); + spin_unlock(&ioasid_allocator_lock); + return id; exit_free: kfree(data); done_unlock: @@ -479,6 +513,7 @@ static void ioasid_do_free(struct ioasid_data *data) static void ioasid_free_locked(struct ioasid_set *set, ioasid_t ioasid) { + struct ioasid_nb_args args; struct ioasid_data *data; data = xa_load(&active_allocator->xa, ioasid); @@ -491,7 +526,16 @@ static void ioasid_free_locked(struct ioasid_set *set, ioasid_t ioasid) pr_warn("Cannot free IOASID %u due to set ownership\n", ioasid); return; } + data->state = IOASID_STATE_FREE_PENDING; + /* Notify all users that this IOASID is being freed */ + args.id = ioasid; + args.spid = data->spid; + args.pdata = data->private; + args.set = data->set; + atomic_notifier_call_chain(&ioasid_chain, IOASID_FREE, &args); + /* Notify the ioasid_set for per set users */ + atomic_notifier_call_chain(&set->nh, IOASID_FREE, &args); if (!refcount_dec_and_test(&data->users)) return; @@ -514,6 +558,28 @@ void ioasid_free(struct ioasid_set *set, ioasid_t ioasid) } EXPORT_SYMBOL_GPL(ioasid_free); +static void ioasid_add_pending_nb(struct ioasid_set *set) +{ + struct ioasid_set_nb *curr; + + if (set->type != IOASID_SET_TYPE_MM) + return; + + /* +* Check if there are any pending nb requests for the given token, if so +* add them to the notifier chain. +*/ + spin_lock(&ioasid_nb_lock); + list_for_each_entry(curr, &ioasid_nb_pending_list, list) { + if (curr->token == set->token && !curr->active) { + atomic_notifier_chain_register(&set->nh, curr->nb); + curr->set = set; + curr->active = true; + } + } + spin_unlock(&ioasid_nb_lock); +} + /** * ioasid_alloc_set - Allocate a new IOASID set for a given token * @@ -601,6 +667,13 @@ struct ioasid_s