Re: [PATCH v3 10/14] iommu/ioasid: Introduce notification APIs

2020-10-23 Thread Jean-Philippe Brucker
On Mon, Sep 28, 2020 at 02:38:37PM -0700, Jacob Pan wrote:
> Relations among IOASID users largely follow a publisher-subscriber
> pattern. E.g. to support guest SVA on Intel Scalable I/O Virtualization
> (SIOV) enabled platforms, VFIO, IOMMU, device drivers, KVM are all users
> of IOASIDs. When a state change occurs, VFIO publishes the change event
> that needs to be processed by other users/subscribers.
> 
> This patch introduced two types of notifications: global and per
> ioasid_set. The latter is intended for users who only needs to handle
> events related to the IOASID of a given set.
> For more information, refer to the kernel documentation at
> Documentation/ioasid.rst.
> 
> Signed-off-by: Liu Yi L 
> Signed-off-by: Wu Hao 
> Signed-off-by: Jacob Pan 
> ---
>  drivers/iommu/ioasid.c | 141 
> +
>  include/linux/ioasid.h |  57 +++-
>  2 files changed, 197 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> index 378fef8f23d9..894b17c06ead 100644
> --- a/drivers/iommu/ioasid.c
> +++ b/drivers/iommu/ioasid.c
> @@ -10,12 +10,35 @@
>  #include 
>  #include 
>  
> +/*
> + * An IOASID can have multiple consumers where each consumer may have
> + * hardware contexts associated with the IOASID.
> + * When a status change occurs, like on IOASID deallocation, notifier chains
> + * are used to keep the consumers in sync.
> + * This is a publisher-subscriber pattern where publisher can change the
> + * state of each IOASID, e.g. alloc/free, bind IOASID to a device and mm.
> + * On the other hand, subscribers get notified for the state change and
> + * keep local states in sync.
> + */
> +static ATOMIC_NOTIFIER_HEAD(ioasid_notifier);
> +/* List to hold pending notification block registrations */
> +static LIST_HEAD(ioasid_nb_pending_list);

nit: it might be tidier to deal with the pending list in the next patch,
since it's only relevant for mm set notifier

> +static DEFINE_SPINLOCK(ioasid_nb_lock);
> +
>  /* Default to PCIe standard 20 bit PASID */
>  #define PCI_PASID_MAX 0x10
>  static ioasid_t ioasid_capacity = PCI_PASID_MAX;
>  static ioasid_t ioasid_capacity_avail = PCI_PASID_MAX;
>  static DEFINE_XARRAY_ALLOC(ioasid_sets);
>  
> +struct ioasid_set_nb {
> + struct list_headlist;
> + struct notifier_block   *nb;
> + void*token;
> + struct ioasid_set   *set;
> + boolactive;
> +};
> +
>  enum ioasid_state {
>   IOASID_STATE_INACTIVE,
>   IOASID_STATE_ACTIVE,
> @@ -365,6 +388,42 @@ void ioasid_detach_data(ioasid_t ioasid)
>  }
>  EXPORT_SYMBOL_GPL(ioasid_detach_data);
>  
> +/**
> + * ioasid_notify - Send notification on a given IOASID for status change.
> + *
> + * @data:The IOASID data to which the notification will send
> + * @cmd: Notification event sent by IOASID external users, can be
> + *   IOASID_BIND or IOASID_UNBIND.
> + *
> + * @flags:   Special instructions, e.g. notify within a set or global by
> + *   IOASID_NOTIFY_FLAG_SET or IOASID_NOTIFY_FLAG_ALL flags
> + * Caller must hold ioasid_allocator_lock and reference to the IOASID
> + */
> +static int ioasid_notify(struct ioasid_data *data,
> +  enum ioasid_notify_val cmd, unsigned int flags)
> +{
> + struct ioasid_nb_args args = { 0 };
> + int ret = 0;
> +
> + /* IOASID_FREE/ALLOC are internal events emitted by IOASID core only */
> + if (cmd <= IOASID_NOTIFY_FREE)
> + return -EINVAL;

This function is now called with ALLOC and FREE

> +
> + if (flags & ~(IOASID_NOTIFY_FLAG_ALL | IOASID_NOTIFY_FLAG_SET))
> + return -EINVAL;
> +
> + args.id = data->id;
> + args.set = data->set;
> + args.pdata = data->private;
> + args.spid = data->spid;
> + if (flags & IOASID_NOTIFY_FLAG_ALL)
> + ret = atomic_notifier_call_chain(&ioasid_notifier, cmd, &args);
> + if (flags & IOASID_NOTIFY_FLAG_SET)
> + ret = atomic_notifier_call_chain(&data->set->nh, cmd, &args);

If both flags are set, we'll miss errors within the global notification.
Is that a problem?

> +
> + return ret;
> +}
> +
>  static ioasid_t ioasid_find_by_spid_locked(struct ioasid_set *set, ioasid_t 
> spid)
>  {
>   ioasid_t ioasid = INVALID_IOASID;
> @@ -417,6 +476,7 @@ int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
>   goto done_unlock;
>   }
>   data->spid = spid;
> + ioasid_notify(data, IOASID_NOTIFY_BIND, IOASID_NOTIFY_FLAG_SET);
>  
>  done_unlock:
>   spin_unlock(&ioasid_allocator_lock);
> @@ -436,6 +496,7 @@ void ioasid_detach_spid(ioasid_t ioasid)
>   goto done_unlock;
>   }
>   data->spid = INVALID_IOASID;
> + ioasid_notify(data, IOASID_NOTIFY_UNBIND, IOASID_NOTIFY_FLAG_SET);
>  
>  done_unlock:
>   spin_unlock(&ioasid_allocator_lock);
> @@ -469,6 +530,28 @@ static inline bool ioasid_set_is_valid(struct

[PATCH v3 10/14] iommu/ioasid: Introduce notification APIs

2020-09-28 Thread Jacob Pan
Relations among IOASID users largely follow a publisher-subscriber
pattern. E.g. to support guest SVA on Intel Scalable I/O Virtualization
(SIOV) enabled platforms, VFIO, IOMMU, device drivers, KVM are all users
of IOASIDs. When a state change occurs, VFIO publishes the change event
that needs to be processed by other users/subscribers.

This patch introduced two types of notifications: global and per
ioasid_set. The latter is intended for users who only needs to handle
events related to the IOASID of a given set.
For more information, refer to the kernel documentation at
Documentation/ioasid.rst.

Signed-off-by: Liu Yi L 
Signed-off-by: Wu Hao 
Signed-off-by: Jacob Pan 
---
 drivers/iommu/ioasid.c | 141 +
 include/linux/ioasid.h |  57 +++-
 2 files changed, 197 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 378fef8f23d9..894b17c06ead 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -10,12 +10,35 @@
 #include 
 #include 
 
+/*
+ * An IOASID can have multiple consumers where each consumer may have
+ * hardware contexts associated with the IOASID.
+ * When a status change occurs, like on IOASID deallocation, notifier chains
+ * are used to keep the consumers in sync.
+ * This is a publisher-subscriber pattern where publisher can change the
+ * state of each IOASID, e.g. alloc/free, bind IOASID to a device and mm.
+ * On the other hand, subscribers get notified for the state change and
+ * keep local states in sync.
+ */
+static ATOMIC_NOTIFIER_HEAD(ioasid_notifier);
+/* List to hold pending notification block registrations */
+static LIST_HEAD(ioasid_nb_pending_list);
+static DEFINE_SPINLOCK(ioasid_nb_lock);
+
 /* Default to PCIe standard 20 bit PASID */
 #define PCI_PASID_MAX 0x10
 static ioasid_t ioasid_capacity = PCI_PASID_MAX;
 static ioasid_t ioasid_capacity_avail = PCI_PASID_MAX;
 static DEFINE_XARRAY_ALLOC(ioasid_sets);
 
+struct ioasid_set_nb {
+   struct list_headlist;
+   struct notifier_block   *nb;
+   void*token;
+   struct ioasid_set   *set;
+   boolactive;
+};
+
 enum ioasid_state {
IOASID_STATE_INACTIVE,
IOASID_STATE_ACTIVE,
@@ -365,6 +388,42 @@ void ioasid_detach_data(ioasid_t ioasid)
 }
 EXPORT_SYMBOL_GPL(ioasid_detach_data);
 
+/**
+ * ioasid_notify - Send notification on a given IOASID for status change.
+ *
+ * @data:  The IOASID data to which the notification will send
+ * @cmd:   Notification event sent by IOASID external users, can be
+ * IOASID_BIND or IOASID_UNBIND.
+ *
+ * @flags: Special instructions, e.g. notify within a set or global by
+ * IOASID_NOTIFY_FLAG_SET or IOASID_NOTIFY_FLAG_ALL flags
+ * Caller must hold ioasid_allocator_lock and reference to the IOASID
+ */
+static int ioasid_notify(struct ioasid_data *data,
+enum ioasid_notify_val cmd, unsigned int flags)
+{
+   struct ioasid_nb_args args = { 0 };
+   int ret = 0;
+
+   /* IOASID_FREE/ALLOC are internal events emitted by IOASID core only */
+   if (cmd <= IOASID_NOTIFY_FREE)
+   return -EINVAL;
+
+   if (flags & ~(IOASID_NOTIFY_FLAG_ALL | IOASID_NOTIFY_FLAG_SET))
+   return -EINVAL;
+
+   args.id = data->id;
+   args.set = data->set;
+   args.pdata = data->private;
+   args.spid = data->spid;
+   if (flags & IOASID_NOTIFY_FLAG_ALL)
+   ret = atomic_notifier_call_chain(&ioasid_notifier, cmd, &args);
+   if (flags & IOASID_NOTIFY_FLAG_SET)
+   ret = atomic_notifier_call_chain(&data->set->nh, cmd, &args);
+
+   return ret;
+}
+
 static ioasid_t ioasid_find_by_spid_locked(struct ioasid_set *set, ioasid_t 
spid)
 {
ioasid_t ioasid = INVALID_IOASID;
@@ -417,6 +476,7 @@ int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
goto done_unlock;
}
data->spid = spid;
+   ioasid_notify(data, IOASID_NOTIFY_BIND, IOASID_NOTIFY_FLAG_SET);
 
 done_unlock:
spin_unlock(&ioasid_allocator_lock);
@@ -436,6 +496,7 @@ void ioasid_detach_spid(ioasid_t ioasid)
goto done_unlock;
}
data->spid = INVALID_IOASID;
+   ioasid_notify(data, IOASID_NOTIFY_UNBIND, IOASID_NOTIFY_FLAG_SET);
 
 done_unlock:
spin_unlock(&ioasid_allocator_lock);
@@ -469,6 +530,28 @@ static inline bool ioasid_set_is_valid(struct ioasid_set 
*set)
return xa_load(&ioasid_sets, set->id) == set;
 }
 
+static void ioasid_add_pending_nb(struct ioasid_set *set)
+{
+   struct ioasid_set_nb *curr;
+
+   if (set->type != IOASID_SET_TYPE_MM)
+   return;
+
+   /*
+* Check if there are any pending nb requests for the given token, if so
+* add them to the notifier chain.
+*/
+   spin_lock(&ioasid_nb_lock);
+   list_for_each_entry(curr, &ioasid_nb_pending_lis