Signed-off-by: Andrea Bastoni <[email protected]>
---
hypervisor/arch/x86/amd_iommu.c | 14 ++++++++++++++
hypervisor/arch/x86/ivshmem.c | 2 ++
hypervisor/arch/x86/setup.c | 1 +
hypervisor/arch/x86/vmx.c | 1 +
4 files changed, 18 insertions(+)
diff --git a/hypervisor/arch/x86/amd_iommu.c b/hypervisor/arch/x86/amd_iommu.c
index 40ec4e20..8ee8031d 100644
--- a/hypervisor/arch/x86/amd_iommu.c
+++ b/hypervisor/arch/x86/amd_iommu.c
@@ -151,6 +151,7 @@ static unsigned int iommu_units_count;
bool iommu_cell_emulates_ir(struct cell *cell)
{
+ (void)cell;
return false;
}
@@ -211,6 +212,7 @@ u64 amd_iommu_get_memory_region_flags(const struct
jailhouse_memory *mem)
int iommu_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
+ (void)cell;
/*
* Check that the address is not outside the scope of the page tables.
* With 4 levels, we only support 48 address bits.
@@ -225,6 +227,8 @@ int iommu_map_memory_region(struct cell *cell,
int iommu_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
+ (void)cell;
+ (void)mem;
/* vcpu_map_memory_region already did the actual work. */
return 0;
}
@@ -380,6 +384,7 @@ void iommu_remove_pci_device(struct pci_device *device)
static void amd_iommu_cell_exit(struct cell *cell)
{
+ (void)cell;
}
static void wait_for_zero(volatile u64 *sem, unsigned long mask)
@@ -504,6 +509,10 @@ struct apic_irq_message
iommu_get_remapped_root_int(unsigned int iommu,
{
struct apic_irq_message dummy = { .valid = 0 };
+ (void)iommu;
+ (void)device_id;
+ (void)vector;
+ (void)remap_index;
/* TODO: Implement */
return dummy;
}
@@ -511,6 +520,10 @@ struct apic_irq_message
iommu_get_remapped_root_int(unsigned int iommu,
int iommu_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
struct apic_irq_message irq_msg)
{
+ (void)cell;
+ (void)device_id;
+ (void)vector;
+ (void)irq_msg;
/* TODO: Implement */
return -ENOSYS;
}
@@ -740,6 +753,7 @@ static int amd_iommu_init_features(struct amd_iommu *entry,
static int amd_iommu_init_buffers(struct amd_iommu *entry,
struct jailhouse_iommu *iommu)
{
+ (void)iommu;
/* Allocate and configure command buffer */
entry->cmd_buf_base = page_alloc(&mem_pool, PAGES(CMD_BUF_SIZE));
if (!entry->cmd_buf_base)
diff --git a/hypervisor/arch/x86/ivshmem.c b/hypervisor/arch/x86/ivshmem.c
index 62c1808e..c55c4d24 100644
--- a/hypervisor/arch/x86/ivshmem.c
+++ b/hypervisor/arch/x86/ivshmem.c
@@ -60,4 +60,6 @@ int arch_ivshmem_update_msix(struct ivshmem_endpoint *ive,
unsigned int vector,
void arch_ivshmem_update_intx(struct ivshmem_endpoint *ive, bool enabled)
{
+ (void)ive;
+ (void)enabled;
}
diff --git a/hypervisor/arch/x86/setup.c b/hypervisor/arch/x86/setup.c
index a3e1f9ed..98624052 100644
--- a/hypervisor/arch/x86/setup.c
+++ b/hypervisor/arch/x86/setup.c
@@ -221,6 +221,7 @@ void __attribute__((noreturn)) arch_cpu_activate_vmm(void)
void arch_cpu_restore(unsigned int cpu_id, int return_code)
{
+ (void)return_code;
static spinlock_t tss_lock;
struct per_cpu *cpu_data = per_cpu(cpu_id);
unsigned int tss_idx;
diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c
index bf25d00d..e8e8b338 100644
--- a/hypervisor/arch/x86/vmx.c
+++ b/hypervisor/arch/x86/vmx.c
@@ -1028,6 +1028,7 @@ void vcpu_get_guest_paging_structs(struct
guest_paging_structures *pg_structs)
pt_entry_t vcpu_pae_get_pdpte(page_table_t page_table, unsigned long virt)
{
+ (void)page_table;
return &this_cpu_data()->pdpte[(virt >> 30) & 0x3];
}
--
2.28.0
--
You received this message because you are subscribed to the Google Groups
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/jailhouse-dev/20201021145404.100463-15-andrea.bastoni%40tum.de.