For demonstration purposes only.
Signed-off-by: Ralf Ramsauer <[email protected]>
---
hypervisor/arch/riscv/include/asm/cell.h | 1 +
hypervisor/arch/riscv/include/asm/ivshmem.h | 1 +
hypervisor/arch/riscv/include/asm/percpu.h | 4 +-
hypervisor/arch/riscv/include/asm/plic.h | 7 +
hypervisor/arch/riscv/ivshmem.c | 36 ++++-
hypervisor/arch/riscv/plic.c | 138 +++++++++++++++++++-
hypervisor/arch/riscv/traps.c | 1 +
7 files changed, 181 insertions(+), 7 deletions(-)
diff --git a/hypervisor/arch/riscv/include/asm/cell.h
b/hypervisor/arch/riscv/include/asm/cell.h
index 58412414..fcfdaa32 100644
--- a/hypervisor/arch/riscv/include/asm/cell.h
+++ b/hypervisor/arch/riscv/include/asm/cell.h
@@ -23,6 +23,7 @@ struct arch_cell {
struct paging_structures mm;
u32 irq_bitmap[PLIC_MAX_IRQS / (sizeof(u32) * 8)];
+ u32 virq_present_bitmap[PLIC_MAX_IRQS / (sizeof(u32) * 8)];
};
#endif /* !_JAILHOUSE_ASM_CELL_H */
diff --git a/hypervisor/arch/riscv/include/asm/ivshmem.h
b/hypervisor/arch/riscv/include/asm/ivshmem.h
index 03251590..8b193947 100644
--- a/hypervisor/arch/riscv/include/asm/ivshmem.h
+++ b/hypervisor/arch/riscv/include/asm/ivshmem.h
@@ -11,4 +11,5 @@
*/
struct arch_ivshmem_irq_cache {
+ u16 id[IVSHMEM_MSIX_VECTORS];
};
diff --git a/hypervisor/arch/riscv/include/asm/percpu.h
b/hypervisor/arch/riscv/include/asm/percpu.h
index 4eda15b6..bcafff51 100644
--- a/hypervisor/arch/riscv/include/asm/percpu.h
+++ b/hypervisor/arch/riscv/include/asm/percpu.h
@@ -43,6 +43,8 @@ enum sbi_hart_state {
} hsm; \
bool wait_for_power_on; \
bool reset; \
- bool park;
+ bool park; \
+ u32 virq_enabled_bitmap[PLIC_MAX_IRQS / (sizeof(u32) * 8)]; \
+ u32 virq_pending_bitmap[PLIC_MAX_IRQS / (sizeof(u32) * 8)];
#define ARCH_PERCPU_FIELDS
diff --git a/hypervisor/arch/riscv/include/asm/plic.h
b/hypervisor/arch/riscv/include/asm/plic.h
index c5414e9e..016e9b99 100644
--- a/hypervisor/arch/riscv/include/asm/plic.h
+++ b/hypervisor/arch/riscv/include/asm/plic.h
@@ -18,4 +18,11 @@
extern int plic_set_pending(void);
bool irqchip_irq_in_cell(struct cell *cell, unsigned int irq);
+void plic_register_virq(unsigned int irq);
+void plic_unregister_virq(unsigned int irq);
+
+void plic_send_virq(struct cell *cell, unsigned int irq);
+
+void plic_process_pending_virqs(void);
+
#endif /* _PLIC_H */
diff --git a/hypervisor/arch/riscv/ivshmem.c b/hypervisor/arch/riscv/ivshmem.c
index e5dd7973..3c645123 100644
--- a/hypervisor/arch/riscv/ivshmem.c
+++ b/hypervisor/arch/riscv/ivshmem.c
@@ -1,21 +1,35 @@
/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2020
+ * Copyright (c) Siemens AG, 2016-2019
+ * Copyright (c) OTH Regensburg, 2022
*
* Authors:
* Jan Kiszka <[email protected]>
+ * Ralf Ramsauer <[email protected]>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
-#include <jailhouse/entry.h>
#include <jailhouse/ivshmem.h>
+#include <jailhouse/cell.h>
+#include <asm/processor.h>
void arch_ivshmem_trigger_interrupt(struct ivshmem_endpoint *ive,
unsigned int vector)
{
+ unsigned int irq_id = ive->irq_cache.id[vector];
+
+ if (irq_id) {
+ /*
+ * Ensure that all data written by the sending guest is visible
+ * to the target before triggering the interrupt.
+ */
+ memory_barrier();
+
+ plic_send_virq(ive->device->cell, irq_id);
+ }
}
int arch_ivshmem_update_msix(struct ivshmem_endpoint *ive, unsigned int vector,
@@ -26,4 +40,22 @@ int arch_ivshmem_update_msix(struct ivshmem_endpoint *ive,
unsigned int vector,
void arch_ivshmem_update_intx(struct ivshmem_endpoint *ive, bool enabled)
{
+ u8 pin = ive->cspace[PCI_CFG_INT/4] >> 8;
+ struct pci_device *device = ive->device;
+ unsigned int virq;
+
+ /*
+ * Lock used as barrier, ensuring all interrupts triggered after return
+ * use the new setting.
+ */
+ virq = device->cell->config->vpci_irq_base + pin - 1;
+ spin_lock(&ive->irq_lock);
+ if (enabled) {
+ ive->irq_cache.id[0] = virq;
+ plic_register_virq(virq);
+ } else {
+ ive->irq_cache.id[0] = 0;
+ plic_unregister_virq(virq);
+ }
+ spin_unlock(&ive->irq_lock);
}
diff --git a/hypervisor/arch/riscv/plic.c b/hypervisor/arch/riscv/plic.c
index 84f95c0b..1b9e0c3e 100644
--- a/hypervisor/arch/riscv/plic.c
+++ b/hypervisor/arch/riscv/plic.c
@@ -193,6 +193,11 @@ inline bool irqchip_irq_in_cell(struct cell *cell,
unsigned int irq)
return irq_bitmap_test(cell->arch.irq_bitmap, irq);
}
+static inline bool irqchip_virq_in_cell(struct cell *cell, unsigned int irq)
+{
+ return irq_bitmap_test(cell->arch.virq_present_bitmap, irq);
+}
+
int plic_set_pending(void)
{
int my_context;
@@ -239,9 +244,38 @@ static inline void plic_passthru(const struct mmio_access
*access)
plic_write(access->address, access->value);
}
+static bool plic_inject_pending_virqs(void)
+{
+ struct public_per_cpu *pcpu = this_cpu_public();
+ u32 idx, irq = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(pcpu->virq_pending_bitmap); idx++) {
+ irq = pcpu->virq_pending_bitmap[idx];
+ if (!irq)
+ continue;
+
+ /*
+ * FIXME: For the moment, simply inject the first pending IRQ.
+ * Later, we need to prioritise those IRQs. Haha. Per call of
+ * this routine, we can only inject ONE single IRQ. That's not
+ * an issue, as the guest will trap again after acknowledging
+ * the last irq. So there will be no misses of pending IRQs.
+ */
+
+ irq = ffsl(irq) + idx * 32;
+
+ pending[pcpu->phys_id] = irq;
+ irq_bitmap_clear(pcpu->virq_pending_bitmap, irq);
+ return true;
+ }
+
+ return false;
+}
+
static inline enum mmio_result
plic_handle_context_claim(struct mmio_access *access, unsigned long hart)
{
+ /* clear pending bit */
if (!access->is_write) {
access->value = pending[hart];
return MMIO_HANDLED;
@@ -254,7 +288,9 @@ plic_handle_context_claim(struct mmio_access *access,
unsigned long hart)
return MMIO_ERROR;
}
- plic_write(access->address, access->value);
+ /* TODO: vIRQ could have been disabled before acknowledgement */
+ if (!irq_bitmap_test(this_cell()->arch.virq_present_bitmap,
access->value))
+ plic_write(access->address, access->value);
/* Check if there's another physical IRQ pending */
/* TODO: This is where we would need to prioritise vIRQs */
@@ -262,6 +298,11 @@ plic_handle_context_claim(struct mmio_access *access,
unsigned long hart)
if (pending[hart])
return MMIO_HANDLED;
+ /* TODO: vIRQ has the lowest prio at the moment */
+ plic_inject_pending_virqs();
+ if (pending[hart])
+ return MMIO_HANDLED;
+
guest_clear_ext();
ext_enable();
@@ -322,6 +363,12 @@ static enum mmio_result plic_handle_prio(struct
mmio_access *access)
irq = access->address / REG_SZ;
+ if (irqchip_virq_in_cell(this_cell(), irq)) {
+ // TODO: Allow priorities
+ printk("PLIC: virq priorities not supported!\n");
+ return MMIO_HANDLED;
+ }
+
if (!irqchip_irq_in_cell(this_cell(), irq))
return MMIO_ERROR;
@@ -338,8 +385,8 @@ static enum mmio_result plic_handle_prio(struct mmio_access
*access)
static enum mmio_result plic_handle_enable(struct mmio_access *access)
{
+ u32 *virq_enabled, irq_allowed_bitmap, virq_allowed_bitmap;
struct public_per_cpu *pc;
- u32 irq_allowed_bitmap;
unsigned int idx, cpu;
short int ctx;
@@ -389,20 +436,28 @@ allowed:
*/
idx = ((access->address - PLIC_ENABLE_BASE) % PLIC_ENABLE_OFF)
* 8 / PLIC_BITS_PER_REG;
+ // TODO: Should this be locked? virq_allowed_bitmap could be changed
+ // during execution
+ virq_enabled = &pc->virq_enabled_bitmap[idx];
if (!access->is_write) {
- access->value = plic_read(access->address);
+ access->value = plic_read(access->address) | *virq_enabled;
return MMIO_HANDLED;
}
/* write case */
irq_allowed_bitmap = this_cell()->arch.irq_bitmap[idx];
+ virq_allowed_bitmap = this_cell()->arch.virq_present_bitmap[idx];
- if (access->value & ~irq_allowed_bitmap) {
+ if (access->value & ~(irq_allowed_bitmap | virq_allowed_bitmap)) {
printk("FATAL: Cell enabled non-assigned IRQ\n");
return MMIO_ERROR;
}
+ *virq_enabled = access->value & virq_allowed_bitmap;
+
+ /* Only forward physical IRQs to the PLIC */
+ access->value &= irq_allowed_bitmap;
plic_passthru(access);
return MMIO_HANDLED;
@@ -443,6 +498,14 @@ static int plic_cell_init(struct cell *cell)
mmio_region_register(cell, plic_phys(), plic_size(), plic_handler,
cell);
+ /*
+ * TODO: Do we need that, or can we assume that this arrives already
+ * zeroed?
+ */
+ memset(cell->arch.irq_bitmap, 0, sizeof(cell->arch.irq_bitmap));
+ memset(cell->arch.virq_present_bitmap, 0,
+ sizeof(cell->arch.virq_present_bitmap));
+
for_each_irqchip(chip, cell->config, n) {
/* Only support one single PLIC at the moment */
if (chip->address !=
@@ -573,4 +636,71 @@ static void plic_config_commit(struct cell *cell)
}
}
+void plic_process_pending_virqs(void)
+{
+ /*
+ * We can only inject IRQs if there's no other IRQ waiting. No problem:
+ * If other IRQs are currently being handled, the cell must somewhen
+ * acknowledge the interrupt. On acknowledgement, this routine is
+ * called again, so we won't miss the IRQ.
+ */
+ if (guest_ext_pending())
+ return;
+
+ if (!plic_inject_pending_virqs())
+ return;
+
+ ext_disable();
+ guest_inject_ext();
+}
+
+void plic_send_virq(struct cell *cell, unsigned int irq)
+{
+ struct public_per_cpu *pcpu;
+ unsigned int cpu;
+
+ //printk("PLIC: sending vIRQ %u from %s to %s\n", irq,
this_cell()->config->name, cell->config->name);
+
+ if (!irq_bitmap_test(cell->arch.virq_present_bitmap, irq)) {
+ printk("vIRQ not present in destination\n");
+ return;
+ }
+
+ // Do we need to lock this section? A vIRQ could be disabled during
injection
+ for_each_cpu(cpu, &cell->cpu_set) {
+ pcpu = public_per_cpu(cpu);
+ if (irq_bitmap_test(pcpu->virq_enabled_bitmap, irq)) {
+ irq_bitmap_set(pcpu->virq_pending_bitmap, irq);
+ memory_barrier();
+ arch_send_event(pcpu);
+ break;
+ }
+ }
+}
+
+void plic_register_virq(unsigned int irq)
+{
+ struct cell *cell = this_cell();
+
+ if (irqchip_irq_in_cell(cell, irq)) {
+ printk("FATAL: plic: Unable to register vIRQ %u\n", irq);
+ panic_stop();
+ }
+
+ irq_bitmap_set(cell->arch.virq_present_bitmap, irq);
+}
+
+void plic_unregister_virq(unsigned int irq)
+{
+ struct cell *cell = this_cell();
+ unsigned int cpu;
+
+ if (!irq_bitmap_test(cell->arch.virq_present_bitmap, irq))
+ return;
+
+ irq_bitmap_clear(cell->arch.virq_present_bitmap, irq);
+ for_each_cpu(cpu, &cell->cpu_set)
+ irq_bitmap_clear(public_per_cpu(cpu)->virq_enabled_bitmap, irq);
+}
+
DEFINE_UNIT (plic, "RISC-V PLIC");
diff --git a/hypervisor/arch/riscv/traps.c b/hypervisor/arch/riscv/traps.c
index 8f59a675..6b768c9b 100644
--- a/hypervisor/arch/riscv/traps.c
+++ b/hypervisor/arch/riscv/traps.c
@@ -154,6 +154,7 @@ static int handle_ipi(void)
* IPI is acknowledged here, as from now on, further IPIs might already
* be sent by remote CPUs.
*/
+ plic_process_pending_virqs();
spin_unlock(&pcpu->control_lock);
if (check_events)
--
2.36.1
--
You received this message because you are subscribed to the Google Groups
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/jailhouse-dev/20220627132905.4338-41-ralf.ramsauer%40oth-regensburg.de.