The remote HFENCE SBI calls can now be implemented as operations
on the nested g-stage page table emulated for the guest.

Signed-off-by: Anup Patel <[email protected]>
---
 arch/riscv/include/asm/kvm_host.h        |   2 +
 arch/riscv/include/asm/kvm_tlb.h         |  37 ++++++-
 arch/riscv/include/asm/kvm_vcpu_nested.h |  14 +++
 arch/riscv/kvm/tlb.c                     | 124 +++++++++++++++++++++++
 arch/riscv/kvm/vcpu_nested_swtlb.c       |  76 ++++++++++++++
 arch/riscv/kvm/vcpu_sbi_replace.c        |  63 +++++++++++-
 6 files changed, 310 insertions(+), 6 deletions(-)

diff --git a/arch/riscv/include/asm/kvm_host.h 
b/arch/riscv/include/asm/kvm_host.h
index c510564a09a2..2f097459ee14 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -47,6 +47,8 @@
        KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_STEAL_UPDATE           KVM_ARCH_REQ(6)
 #define KVM_REQ_NESTED_SWTLB           KVM_ARCH_REQ(7)
+#define KVM_REQ_NESTED_HFENCE_GVMA_ALL KVM_ARCH_REQ(8)
+#define KVM_REQ_NESTED_HFENCE_VVMA_ALL KVM_ARCH_REQ(9)
 
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
 
diff --git a/arch/riscv/include/asm/kvm_tlb.h b/arch/riscv/include/asm/kvm_tlb.h
index a0e7099bcb85..591b8735000f 100644
--- a/arch/riscv/include/asm/kvm_tlb.h
+++ b/arch/riscv/include/asm/kvm_tlb.h
@@ -15,7 +15,11 @@ enum kvm_riscv_hfence_type {
        KVM_RISCV_HFENCE_VVMA_ASID_GVA,
        KVM_RISCV_HFENCE_VVMA_ASID_ALL,
        KVM_RISCV_HFENCE_VVMA_GVA,
-       KVM_RISCV_HFENCE_VVMA_ALL
+       KVM_RISCV_HFENCE_VVMA_ALL,
+       KVM_RISCV_NESTED_HFENCE_GVMA_GPA,
+       KVM_RISCV_NESTED_HFENCE_GVMA_VMID_GPA,
+       KVM_RISCV_NESTED_HFENCE_VVMA_GVA,
+       KVM_RISCV_NESTED_HFENCE_VVMA_ASID_GVA,
 };
 
 struct kvm_riscv_hfence {
@@ -56,6 +60,8 @@ void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_nested_hfence_gvma_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_nested_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
 
 void kvm_riscv_fence_i(struct kvm *kvm,
                       unsigned long hbase, unsigned long hmask);
@@ -82,4 +88,33 @@ void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
                               unsigned long hbase, unsigned long hmask,
                               unsigned long vmid);
 
+void kvm_riscv_nested_hfence_gvma_gpa(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     gpa_t gpa, gpa_t gpsz,
+                                     unsigned long order);
+void kvm_riscv_nested_hfence_gvma_all(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask);
+void kvm_riscv_nested_hfence_gvma_vmid_gpa(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          gpa_t gpa, gpa_t gpsz,
+                                          unsigned long order, unsigned long 
vmid);
+void kvm_riscv_nested_hfence_gvma_vmid_all(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long vmid);
+void kvm_riscv_nested_hfence_vvma_gva(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     unsigned long gva, unsigned long gvsz,
+                                     unsigned long order, unsigned long vmid);
+void kvm_riscv_nested_hfence_vvma_all(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     unsigned long vmid);
+void kvm_riscv_nested_hfence_vvma_asid_gva(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long gva, unsigned long 
gvsz,
+                                          unsigned long order, unsigned long 
asid,
+                                          unsigned long vmid);
+void kvm_riscv_nested_hfence_vvma_asid_all(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long asid, unsigned long 
vmid);
+
 #endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_nested.h 
b/arch/riscv/include/asm/kvm_vcpu_nested.h
index 665c60f09ee6..4935ab0db1a2 100644
--- a/arch/riscv/include/asm/kvm_vcpu_nested.h
+++ b/arch/riscv/include/asm/kvm_vcpu_nested.h
@@ -69,6 +69,20 @@ int kvm_riscv_vcpu_nested_swtlb_xlate(struct kvm_vcpu *vcpu,
                                      const struct kvm_cpu_trap *trap,
                                      struct kvm_gstage_mapping *out_map,
                                      struct kvm_cpu_trap *out_trap);
+void kvm_riscv_vcpu_nested_swtlb_vvma_flush(struct kvm_vcpu *vcpu,
+                                           unsigned long vaddr, unsigned long 
size,
+                                           unsigned long order, unsigned long 
vmid);
+void kvm_riscv_vcpu_nested_swtlb_vvma_flush_asid(struct kvm_vcpu *vcpu,
+                                                unsigned long vaddr, unsigned 
long size,
+                                                unsigned long order, unsigned 
long vmid,
+                                                unsigned long asid);
+void kvm_riscv_vcpu_nested_swtlb_gvma_flush(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, gpa_t size, unsigned 
long order);
+void kvm_riscv_vcpu_nested_swtlb_gvma_flush_vmid(struct kvm_vcpu *vcpu,
+                                                gpa_t addr, gpa_t size, 
unsigned long order,
+                                                unsigned long vmid);
+void kvm_riscv_vcpu_nested_swtlb_host_flush(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, gpa_t size, unsigned 
long order);
 void kvm_riscv_vcpu_nested_swtlb_process(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_nested_swtlb_request(struct kvm_vcpu *vcpu,
                                         const struct kvm_gstage_mapping 
*guest_map,
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index a95aa5336560..1b48a5ff81d1 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -210,6 +210,7 @@ void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu)
                nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
        else
                kvm_riscv_local_hfence_gvma_vmid_all(vmid);
+       kvm_riscv_vcpu_nested_swtlb_host_flush(vcpu, 0, 0, 0);
 }
 
 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
@@ -223,6 +224,16 @@ void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu 
*vcpu)
                kvm_riscv_local_hfence_vvma_all(vmid);
 }
 
+void kvm_riscv_nested_hfence_gvma_all_process(struct kvm_vcpu *vcpu)
+{
+       kvm_riscv_vcpu_nested_swtlb_gvma_flush(vcpu, 0, 0, 0);
+}
+
+void kvm_riscv_nested_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
+{
+       kvm_riscv_vcpu_nested_swtlb_vvma_flush(vcpu, 0, 0, 0, -1UL);
+}
+
 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
                                struct kvm_riscv_hfence *out_data)
 {
@@ -287,12 +298,14 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
                        else
                                kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, 
d.addr,
                                                                     d.size, 
d.order);
+                       kvm_riscv_vcpu_nested_swtlb_host_flush(vcpu, d.addr, 
d.size, d.order);
                        break;
                case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
                        if (kvm_riscv_nacl_available())
                                nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
                        else
                                kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
+                       kvm_riscv_vcpu_nested_swtlb_host_flush(vcpu, 0, 0, 0);
                        break;
                case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
                        kvm_riscv_vcpu_pmu_incr_fw(vcpu, 
SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
@@ -464,6 +477,117 @@ void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
                            KVM_REQ_HFENCE_VVMA_ALL, &data);
 }
 
+void kvm_riscv_nested_hfence_gvma_gpa(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     gpa_t gpa, gpa_t gpsz,
+                                     unsigned long order)
+{
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_NESTED_HFENCE_GVMA_GPA;
+       data.addr = gpa;
+       data.size = gpsz;
+       data.order = order;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_GVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_gvma_all(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask)
+{
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_NESTED_HFENCE_GVMA_ALL,
+                           KVM_REQ_NESTED_HFENCE_GVMA_ALL, NULL);
+}
+
+void kvm_riscv_nested_hfence_gvma_vmid_gpa(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          gpa_t gpa, gpa_t gpsz,
+                                          unsigned long order, unsigned long 
vmid)
+{
+       struct kvm_riscv_hfence data;
+
+       data.type = KVM_RISCV_NESTED_HFENCE_GVMA_VMID_GPA;
+       data.asid = 0;
+       data.vmid = vmid;
+       data.addr = gpa;
+       data.size = gpsz;
+       data.order = order;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_GVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_gvma_vmid_all(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long vmid)
+{
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_NESTED_HFENCE_GVMA_VMID_GPA;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_GVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_vvma_gva(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     unsigned long gva, unsigned long gvsz,
+                                     unsigned long order, unsigned long vmid)
+{
+       struct kvm_riscv_hfence data;
+
+       data.type = KVM_RISCV_NESTED_HFENCE_VVMA_GVA;
+       data.asid = 0;
+       data.vmid = vmid;
+       data.addr = gva;
+       data.size = gvsz;
+       data.order = order;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_VVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_vvma_all(struct kvm *kvm,
+                                     unsigned long hbase, unsigned long hmask,
+                                     unsigned long vmid)
+{
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_NESTED_HFENCE_VVMA_GVA;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_VVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_vvma_asid_gva(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long gva, unsigned long 
gvsz,
+                                          unsigned long order, unsigned long 
asid,
+                                          unsigned long vmid)
+{
+       struct kvm_riscv_hfence data;
+
+       data.type = KVM_RISCV_NESTED_HFENCE_VVMA_ASID_GVA;
+       data.asid = asid;
+       data.vmid = vmid;
+       data.addr = gva;
+       data.size = gvsz;
+       data.order = order;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_VVMA_ALL, &data);
+}
+
+void kvm_riscv_nested_hfence_vvma_asid_all(struct kvm *kvm,
+                                          unsigned long hbase, unsigned long 
hmask,
+                                          unsigned long asid, unsigned long 
vmid)
+{
+       struct kvm_riscv_hfence data = {0};
+
+       data.type = KVM_RISCV_NESTED_HFENCE_VVMA_ASID_GVA;
+       data.asid = asid;
+       data.vmid = vmid;
+       make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
+                           KVM_REQ_NESTED_HFENCE_VVMA_ALL, &data);
+}
+
 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
 {
        kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
diff --git a/arch/riscv/kvm/vcpu_nested_swtlb.c 
b/arch/riscv/kvm/vcpu_nested_swtlb.c
index 1d9faf50a61f..7dabfc1c3e16 100644
--- a/arch/riscv/kvm/vcpu_nested_swtlb.c
+++ b/arch/riscv/kvm/vcpu_nested_swtlb.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/kvm_host.h>
+#include <asm/kvm_nacl.h>
 
 int kvm_riscv_vcpu_nested_swtlb_xlate(struct kvm_vcpu *vcpu,
                                      const struct kvm_cpu_trap *trap,
@@ -14,6 +15,81 @@ int kvm_riscv_vcpu_nested_swtlb_xlate(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+void kvm_riscv_vcpu_nested_swtlb_vvma_flush(struct kvm_vcpu *vcpu,
+                                           unsigned long vaddr, unsigned long 
size,
+                                           unsigned long order, unsigned long 
vmid)
+{
+       struct kvm_vcpu_nested *ns = &vcpu->arch.nested;
+       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+
+       if (vmid != -1UL && ((ns->csr.hgatp & HGATP_VMID) >> HGATP_VMID_SHIFT) 
!= vmid)
+               return;
+
+       vmid = kvm_riscv_gstage_nested_vmid(READ_ONCE(v->vmid));
+       if (!vaddr && !size && !order) {
+               if (kvm_riscv_nacl_available())
+                       nacl_hfence_vvma_all(nacl_shmem(), vmid);
+               else
+                       kvm_riscv_local_hfence_vvma_all(vmid);
+       } else {
+               if (kvm_riscv_nacl_available())
+                       nacl_hfence_vvma(nacl_shmem(), vmid, vaddr, size, 
order);
+               else
+                       kvm_riscv_local_hfence_vvma_gva(vmid, vaddr, size, 
order);
+       }
+}
+
+void kvm_riscv_vcpu_nested_swtlb_vvma_flush_asid(struct kvm_vcpu *vcpu,
+                                                unsigned long vaddr, unsigned 
long size,
+                                                unsigned long order, unsigned 
long vmid,
+                                                unsigned long asid)
+{
+       struct kvm_vcpu_nested *ns = &vcpu->arch.nested;
+       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+
+       if (vmid != -1UL && ((ns->csr.hgatp & HGATP_VMID) >> HGATP_VMID_SHIFT) 
!= vmid)
+               return;
+
+       vmid = kvm_riscv_gstage_nested_vmid(READ_ONCE(v->vmid));
+       if (!vaddr && !size && !order) {
+               if (kvm_riscv_nacl_available())
+                       nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, asid);
+               else
+                       kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
+       } else {
+               if (kvm_riscv_nacl_available())
+                       nacl_hfence_vvma_asid(nacl_shmem(), vmid, asid,
+                                             vaddr, size, order);
+               else
+                       kvm_riscv_local_hfence_vvma_asid_gva(vmid, asid, vaddr,
+                                                            size, order);
+       }
+}
+
+void kvm_riscv_vcpu_nested_swtlb_gvma_flush(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, gpa_t size, unsigned 
long order)
+{
+       /* TODO: */
+}
+
+void kvm_riscv_vcpu_nested_swtlb_gvma_flush_vmid(struct kvm_vcpu *vcpu,
+                                                gpa_t addr, gpa_t size, 
unsigned long order,
+                                                unsigned long vmid)
+{
+       struct kvm_vcpu_nested *ns = &vcpu->arch.nested;
+
+       if (vmid != -1UL && ((ns->csr.hgatp & HGATP_VMID) >> HGATP_VMID_SHIFT) 
!= vmid)
+               return;
+
+       kvm_riscv_vcpu_nested_swtlb_gvma_flush(vcpu, addr, size, order);
+}
+
+void kvm_riscv_vcpu_nested_swtlb_host_flush(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, gpa_t size, unsigned 
long order)
+{
+       /* TODO: */
+}
+
 void kvm_riscv_vcpu_nested_swtlb_process(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_nested_swtlb *nst = &vcpu->arch.nested.swtlb;
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c 
b/arch/riscv/kvm/vcpu_sbi_replace.c
index 506a510b6bff..d60c7b05cd02 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -123,14 +123,67 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu 
*vcpu, struct kvm_run *run
                kvm_riscv_vcpu_pmu_incr_fw(vcpu, 
SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
+               /* Not supported if VCPU does not have H-extension */
+               if (!riscv_isa_extension_available(vcpu->arch.isa, h)) {
+                       retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+                       break;
+               }
+
+               if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+                       kvm_riscv_nested_hfence_gvma_all(vcpu->kvm, hbase, 
hmask);
+               else
+                       kvm_riscv_nested_hfence_gvma_gpa(vcpu->kvm, hbase, 
hmask,
+                                                        cp->a2, cp->a3, 
PAGE_SHIFT);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_GVMA_SENT);
+               break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
+               /* Not supported if VCPU does not have H-extension */
+               if (!riscv_isa_extension_available(vcpu->arch.isa, h)) {
+                       retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+                       break;
+               }
+
+               if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+                       kvm_riscv_nested_hfence_gvma_vmid_all(vcpu->kvm,
+                                                             hbase, hmask, 
cp->a4);
+               else
+                       kvm_riscv_nested_hfence_gvma_vmid_gpa(vcpu->kvm, hbase, 
hmask,
+                                                             cp->a2, cp->a3,
+                                                             PAGE_SHIFT, 
cp->a4);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, 
SBI_PMU_FW_HFENCE_GVMA_VMID_SENT);
+               break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
+               /* Not supported if VCPU does not have H-extension */
+               if (!riscv_isa_extension_available(vcpu->arch.isa, h)) {
+                       retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+                       break;
+               }
+
+               vmid = (vcpu->arch.nested.csr.hgatp & HGATP_VMID) >> 
HGATP_VMID_SHIFT;
+               if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+                       kvm_riscv_nested_hfence_vvma_all(vcpu->kvm, hbase, 
hmask, vmid);
+               else
+                       kvm_riscv_nested_hfence_vvma_gva(vcpu->kvm, hbase, 
hmask,
+                                                        cp->a2, cp->a3, 
PAGE_SHIFT, vmid);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
+               break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
-               /*
-                * Until nested virtualization is implemented, the
-                * SBI HFENCE calls should return not supported
-                * hence fallthrough.
-                */
+               /* Not supported if VCPU does not have H-extension */
+               if (!riscv_isa_extension_available(vcpu->arch.isa, h)) {
+                       retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+                       break;
+               }
+
+               vmid = (vcpu->arch.nested.csr.hgatp & HGATP_VMID) >> 
HGATP_VMID_SHIFT;
+               if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
+                       kvm_riscv_nested_hfence_vvma_asid_all(vcpu->kvm, hbase, 
hmask,
+                                                             cp->a4, vmid);
+               else
+                       kvm_riscv_nested_hfence_vvma_asid_gva(vcpu->kvm, hbase, 
hmask,
+                                                             cp->a2, cp->a3, 
PAGE_SHIFT,
+                                                             cp->a4, vmid);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, 
SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
+               break;
        default:
                retdata->err_val = SBI_ERR_NOT_SUPPORTED;
        }
-- 
2.43.0


Reply via email to