[RFC PATCH v1 05/28] KVM: SVM: prepare for new bit definition in nested_ctl

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Currently the nested_ctl variable in the vmcb_control_area structure is
used to indicate nested paging support. The nested paging support field
is actually defined as bit 0 of the this field. In order to support a new
feature flag the usage of the nested_ctl and nested paging support must
be converted to operate on a single bit.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/svm.h |2 ++
 arch/x86/kvm/svm.c |7 ---
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 14824fc..2aca535 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -136,6 +136,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
 #define SVM_VM_CR_SVM_DIS_MASK  0x0010ULL
 
+#define SVM_NESTED_CTL_NP_ENABLE   BIT(0)
+
 struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9b2de7c..9b59260 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1177,7 +1177,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
-   control->nested_ctl = 1;
+   control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
@@ -2701,7 +2701,8 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
if (vmcb->control.asid == 0)
return false;
 
-   if (vmcb->control.nested_ctl && !npt_enabled)
+   if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
+   !npt_enabled)
return false;
 
return true;
@@ -2776,7 +2777,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
 
-   if (nested_vmcb->control.nested_ctl) {
+   if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
kvm_mmu_unload(>vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(>vcpu);

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 26/28] KVM: SVM: add KVM_SEV_DEBUG_DECRYPT command

2016-08-22 Thread Brijesh Singh
The command decrypts a page of guest memory for debugging purposes.

For more information see [1], section 7.1

[1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |   83 
 1 file changed, 83 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 63e7d15..b383bc7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5606,6 +5606,84 @@ err_1:
return ret;
 }
 
+static int __sev_dbg_decrypt_page(struct kvm *kvm, unsigned long src,
+ void *dst, int *psp_ret)
+{
+   int ret, pinned;
+   struct page **inpages;
+   struct psp_data_dbg *decrypt;
+
+   decrypt = kzalloc(sizeof(*decrypt), GFP_KERNEL);
+   if (!decrypt)
+   return -ENOMEM;
+
+   ret = -ENOMEM;
+   inpages = kzalloc(1 * sizeof(struct page *), GFP_KERNEL);
+   if (!inpages)
+   goto err_1;
+
+   /* pin the user virtual address */
+   ret = -EFAULT;
+   down_read(>mm->mmap_sem);
+   pinned = get_user_pages(src, 1, 1, 0, inpages, NULL);
+   up_read(>mm->mmap_sem);
+   if (pinned < 0)
+   goto err_2;
+
+   decrypt->hdr.buffer_len = sizeof(*decrypt);
+   decrypt->handle = kvm_sev_handle();
+   decrypt->dst_addr = __pa(dst) | sme_me_mask;
+   decrypt->src_addr = __sev_page_pa(inpages[0]);
+   decrypt->length = PAGE_SIZE;
+
+   ret = psp_dbg_decrypt(decrypt, psp_ret);
+   if (ret)
+   printk(KERN_ERR "SEV: DEBUG_DECRYPT %d (%#010x)\n",
+   ret, *psp_ret);
+   release_pages(inpages, 1, 0);
+err_2:
+   kfree(inpages);
+err_1:
+   kfree(decrypt);
+   return ret;
+}
+
+static int sev_dbg_decrypt(struct kvm *kvm,
+  struct kvm_sev_dbg_decrypt __user *argp,
+  int *psp_ret)
+{
+   void *data;
+   int ret, offset, len;
+   struct kvm_sev_dbg_decrypt debug;
+
+   if (!kvm_sev_guest())
+   return -ENOTTY;
+
+   if (copy_from_user(, argp, sizeof(*argp)))
+   return -EFAULT;
+
+   if (debug.length > PAGE_SIZE)
+   return -EINVAL;
+
+   data = (void *) get_zeroed_page(GFP_KERNEL);
+   if (!data)
+   return -ENOMEM;
+
+   /* decrypt one page */
+   ret = __sev_dbg_decrypt_page(kvm, debug.src_addr, data, psp_ret);
+   if (ret)
+   goto err_1;
+
+   /* we have decrypted full page but copy request length */
+   offset = debug.src_addr & (PAGE_SIZE - 1);
+   len = min_t(size_t, (PAGE_SIZE - offset), debug.length);
+   if (copy_to_user((uint8_t *)debug.dst_addr, data + offset, len))
+   ret = -EFAULT;
+err_1:
+   free_page((unsigned long)data);
+   return ret;
+}
+
 static int amd_sev_issue_cmd(struct kvm *kvm,
 struct kvm_sev_issue_cmd __user *user_data)
 {
@@ -5636,6 +5714,11 @@ static int amd_sev_issue_cmd(struct kvm *kvm,
_code);
break;
}
+   case KVM_SEV_DBG_DECRYPT: {
+   r = sev_dbg_decrypt(kvm, (void *)arg.opaque,
+   _code);
+   break;
+   }
default:
break;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 19/28] KVM: SVM: prepare to reserve asid for SEV guest

2016-08-22 Thread Brijesh Singh
In current implementation, asid allocation starts from 1, this patch
adds a min_asid variable in svm_vcpu structure to allow starting asid
from something other than 1.

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 211be94..f010b23 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -470,6 +470,7 @@ struct svm_cpu_data {
u64 asid_generation;
u32 max_asid;
u32 next_asid;
+   u32 min_asid;
struct kvm_ldttss_desc *tss_desc;
 
struct page *save_area;
@@ -726,6 +727,7 @@ static int svm_hardware_enable(void)
sd->asid_generation = 1;
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
+   sd->min_asid = 1;
 
native_store_gdt(_descr);
gdt = (struct desc_struct *)gdt_descr.address;
@@ -1887,7 +1889,7 @@ static void new_asid(struct vcpu_svm *svm, struct 
svm_cpu_data *sd)
 {
if (sd->next_asid > sd->max_asid) {
++sd->asid_generation;
-   sd->next_asid = 1;
+   sd->next_asid = sd->min_asid;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 10/28] x86: Change early_ioremap to early_memremap for BOOT data

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Signed-off-by: Tom Lendacky 
---
 arch/x86/kernel/acpi/boot.c |4 ++--
 arch/x86/kernel/mpparse.c   |   10 +-
 drivers/sfi/sfi_core.c  |6 +++---
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 1ad5fe2..4622ea2 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -120,7 +120,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned 
long size)
if (!phys || !size)
return NULL;
 
-   return early_ioremap(phys, size);
+   return early_memremap(phys, size, BOOT_DATA);
 }
 
 void __init __acpi_unmap_table(char *map, unsigned long size)
@@ -128,7 +128,7 @@ void __init __acpi_unmap_table(char *map, unsigned long 
size)
if (!map || !size)
return;
 
-   early_iounmap(map, size);
+   early_memunmap(map, size);
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 0f8d204..04def9f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -436,9 +436,9 @@ static unsigned long __init get_mpc_size(unsigned long 
physptr)
struct mpc_table *mpc;
unsigned long size;
 
-   mpc = early_ioremap(physptr, PAGE_SIZE);
+   mpc = early_memremap(physptr, PAGE_SIZE, BOOT_DATA);
size = mpc->length;
-   early_iounmap(mpc, PAGE_SIZE);
+   early_memunmap(mpc, PAGE_SIZE);
apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
 
return size;
@@ -450,7 +450,7 @@ static int __init check_physptr(struct mpf_intel *mpf, 
unsigned int early)
unsigned long size;
 
size = get_mpc_size(mpf->physptr);
-   mpc = early_ioremap(mpf->physptr, size);
+   mpc = early_memremap(mpf->physptr, size, BOOT_DATA);
/*
 * Read the physical hardware table.  Anything here will
 * override the defaults.
@@ -461,10 +461,10 @@ static int __init check_physptr(struct mpf_intel *mpf, 
unsigned int early)
 #endif
pr_err("BIOS bug, MP table errors detected!...\n");
pr_cont("... disabling SMP support. (tell your hw vendor)\n");
-   early_iounmap(mpc, size);
+   early_memunmap(mpc, size);
return -1;
}
-   early_iounmap(mpc, size);
+   early_memunmap(mpc, size);
 
if (early)
return -1;
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 296db7a..3078d35 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -92,7 +92,7 @@ static struct sfi_table_simple *syst_va __read_mostly;
 static u32 sfi_use_ioremap __read_mostly;
 
 /*
- * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
+ * sfi_un/map_memory calls early_memremap/memunmap which is a __init function
  * and introduces section mismatch. So use __ref to make it calm.
  */
 static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
@@ -103,7 +103,7 @@ static void __iomem * __ref sfi_map_memory(u64 phys, u32 
size)
if (sfi_use_ioremap)
return ioremap_cache(phys, size);
else
-   return early_ioremap(phys, size);
+   return early_memremap(phys, size, BOOT_DATA);
 }
 
 static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
@@ -114,7 +114,7 @@ static void __ref sfi_unmap_memory(void __iomem *virt, u32 
size)
if (sfi_use_ioremap)
iounmap(virt);
else
-   early_iounmap(virt, size);
+   early_memunmap(virt, size);
 }
 
 static void sfi_print_table_header(unsigned long long pa,

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 11/28] x86: Don't decrypt trampoline area if SEV is active

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

When Secure Encrypted Virtualization is active instruction fetches are
always interpreted as being from encrypted memory so the trampoline area
must remain encrypted when SEV is active.

Signed-off-by: Tom Lendacky 
---
 arch/x86/realmode/init.c |9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index c3edb49..f3207e5 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -138,10 +138,13 @@ static void __init set_real_mode_permissions(void)
/*
 * If memory encryption is active, the trampoline area will need to
 * be in non-encrypted memory in order to bring up other processors
-* successfully.
+* successfully. This only applies to SME, SEV requires the trampoline
+* to be encrypted.
 */
-   sme_early_mem_dec(__pa(base), size);
-   sme_set_mem_dec(base, size);
+   if (!sev_active) {
+   sme_early_mem_dec(__pa(base), size);
+   sme_set_mem_dec(base, size);
+   }
 
set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 16/28] x86: Add support to determine if running with SEV enabled

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Early in the boot process, add a check to determine if the kernel is
running with Secure Encrypted Virtualization (SEV) enabled. If active,
the kernel will perform steps necessary to insure the proper kernel
initialization process is performed.

Signed-off-by: Tom Lendacky 
---
 arch/x86/boot/compressed/Makefile  |2 +
 arch/x86/boot/compressed/head_64.S |   19 +
 arch/x86/boot/compressed/mem_encrypt.S |  123 
 arch/x86/include/uapi/asm/hyperv.h |4 +
 arch/x86/include/uapi/asm/kvm_para.h   |3 +
 arch/x86/kernel/mem_encrypt.S  |   36 +
 6 files changed, 187 insertions(+)
 create mode 100644 arch/x86/boot/compressed/mem_encrypt.S

diff --git a/arch/x86/boot/compressed/Makefile 
b/arch/x86/boot/compressed/Makefile
index 536ccfc..4888df9 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -73,6 +73,8 @@ vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/head_$(BITS).o 
$(obj)/misc.o \
$(obj)/string.o $(obj)/cmdline.o $(obj)/error.o \
$(obj)/piggy.o $(obj)/cpuflags.o
 
+vmlinux-objs-$(CONFIG_X86_64) += $(obj)/mem_encrypt.o
+
 vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
 ifdef CONFIG_X86_64
diff --git a/arch/x86/boot/compressed/head_64.S 
b/arch/x86/boot/compressed/head_64.S
index 0d80a7a..acb907a 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -131,6 +131,19 @@ ENTRY(startup_32)
  /*
   * Build early 4G boot pagetable
   */
+   /*
+* If SEV is active set the encryption mask in the page tables. This
+* will insure that when the kernel is copied and decompressed it
+* will be done so encrypted.
+*/
+   callsev_active
+   xorl%edx, %edx
+   testl   %eax, %eax
+   jz  1f
+   subl$32, %eax   /* Encryption bit is always above bit 31 */
+   bts %eax, %edx  /* Set encryption mask for page tables */
+1:
+
/* Initialize Page tables to 0 */
lealpgtable(%ebx), %edi
xorl%eax, %eax
@@ -141,12 +154,14 @@ ENTRY(startup_32)
lealpgtable + 0(%ebx), %edi
leal0x1007 (%edi), %eax
movl%eax, 0(%edi)
+   addl%edx, 4(%edi)
 
/* Build Level 3 */
lealpgtable + 0x1000(%ebx), %edi
leal0x1007(%edi), %eax
movl$4, %ecx
 1: movl%eax, 0x00(%edi)
+   addl%edx, 0x04(%edi)
addl$0x1000, %eax
addl$8, %edi
decl%ecx
@@ -157,6 +172,7 @@ ENTRY(startup_32)
movl$0x0183, %eax
movl$2048, %ecx
 1: movl%eax, 0(%edi)
+   addl%edx, 4(%edi)
addl$0x0020, %eax
addl$8, %edi
decl%ecx
@@ -344,6 +360,9 @@ preferred_addr:
subl$_end, %ebx
addq%rbp, %rbx
 
+   /* Check for SEV and adjust page tables as necessary */
+   callsev_adjust
+
/* Set up the stack */
leaqboot_stack_end(%rbx), %rsp
 
diff --git a/arch/x86/boot/compressed/mem_encrypt.S 
b/arch/x86/boot/compressed/mem_encrypt.S
new file mode 100644
index 000..56e19f6
--- /dev/null
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -0,0 +1,123 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+   .text
+   .code32
+ENTRY(sev_active)
+   xor %eax, %eax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+   push%ebx
+   push%ecx
+   push%edx
+
+   /* Check if running under a hypervisor */
+   movl$0x4000, %eax
+   cpuid
+   cmpl$0x4001, %eax
+   jb  .Lno_sev
+
+   movl$0x4001, %eax
+   cpuid
+   bt  $KVM_FEATURE_SEV, %eax
+   jnc .Lno_sev
+
+   /*
+* Check for memory encryption feature:
+*   CPUID Fn8000_001F[EAX] - Bit 0
+*/
+   movl$0x801f, %eax
+   cpuid
+   bt  $0, %eax
+   jnc .Lno_sev
+
+   /*
+* Get memory encryption information:
+*   CPUID Fn8000_001F[EBX] - Bits 5:0
+* Pagetable bit position used to indicate encryption
+*/
+   movl%ebx, %eax
+   andl$0x3f, %eax
+   jmp .Lsev_exit
+
+.Lno_sev:
+   xor %eax, %eax
+
+.Lsev_exit:
+   pop %edx
+   pop %ecx
+   pop %ebx
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+   ret
+ENDPROC(sev_active)
+
+   .code64
+ENTRY(sev_adjust)
+#ifdef 

[RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support

2016-08-22 Thread Brijesh Singh
The patch adds initial support required for Secure Encrypted
Virtualization (SEV) guest management API's.

ASID management:
 - Reserve asid range for SEV guest, SEV asid range is obtained
   through CPUID Fn8000_001f[ECX]. A non-SEV guest can use any
   asid outside the SEV asid range.
 - SEV guest must have asid value within asid range obtained
   through CPUID.
 - SEV guest must have the same asid for all vcpu's. A TLB flush
   is required if different vcpu for the same ASID is to be run
   on the same host CPU.

- save SEV private structure in kvm_arch.

- If SEV is available then initialize PSP firmware during hardware probe

Signed-off-by: Brijesh Singh 
---
 arch/x86/include/asm/kvm_host.h |9 ++
 arch/x86/kvm/svm.c  |  213 +++
 2 files changed, 221 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1dd673..9b885fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,6 +715,12 @@ struct kvm_hv {
u64 hv_crash_ctl;
 };
 
+struct kvm_sev_info {
+   unsigned int asid;  /* asid for this guest */
+   unsigned int handle;/* firmware handle */
+   unsigned int ref_count; /* number of active vcpus */
+};
+
 struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
@@ -799,6 +805,9 @@ struct kvm_arch {
 
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+   /* struct for SEV guest */
+   struct kvm_sev_info sev_info;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f010b23..dcee635 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -186,6 +187,9 @@ struct vcpu_svm {
struct page *avic_backing_page;
u64 *avic_physical_id_cache;
bool avic_is_running;
+
+   /* which host cpu was used for running this vcpu */
+   bool last_cpuid;
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
@@ -243,6 +247,25 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* Secure Encrypted Virtualization */
+static bool sev_enabled;
+static unsigned long max_sev_asid;
+static unsigned long *sev_asid_bitmap;
+
+#define kvm_sev_guest()(kvm->arch.sev_info.handle)
+#define kvm_sev_handle()   (kvm->arch.sev_info.handle)
+#define kvm_sev_ref()  (kvm->arch.sev_info.ref_count++)
+#define kvm_sev_unref()(kvm->arch.sev_info.ref_count--)
+#define svm_sev_handle()   (svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_asid() (svm->vcpu.kvm->arch.sev_info.asid)
+#define svm_sev_ref()  (svm->vcpu.kvm->arch.sev_info.ref_count++)
+#define svm_sev_unref()
(svm->vcpu.kvm->arch.sev_info.ref_count--)
+#define svm_sev_guest()(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_ref_count()(svm->vcpu.kvm->arch.sev_info.ref_count)
+
+static int sev_asid_new(void);
+static void sev_asid_free(int asid);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -474,6 +497,8 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;
 
struct page *save_area;
+
+   void **sev_vmcb;  /* index = sev_asid, value = vmcb pointer */
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -727,7 +752,10 @@ static int svm_hardware_enable(void)
sd->asid_generation = 1;
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
-   sd->min_asid = 1;
+   sd->min_asid = max_sev_asid + 1;
+
+   if (sev_enabled)
+   memset(sd->sev_vmcb, 0, (max_sev_asid + 1) * sizeof(void *));
 
native_store_gdt(_descr);
gdt = (struct desc_struct *)gdt_descr.address;
@@ -788,6 +816,7 @@ static void svm_cpu_uninit(int cpu)
 
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(sd->save_area);
+   kfree(sd->sev_vmcb);
kfree(sd);
 }
 
@@ -805,6 +834,14 @@ static int svm_cpu_init(int cpu)
if (!sd->save_area)
goto err_1;
 
+   if (sev_enabled) {
+   sd->sev_vmcb = kmalloc((max_sev_asid + 1) * sizeof(void *),
+   GFP_KERNEL);
+   r = -ENOMEM;
+   if (!sd->sev_vmcb)
+   goto err_1;
+   }
+
per_cpu(svm_data, cpu) = sd;
 
return 0;
@@ -931,6 +968,74 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+static __init void sev_hardware_setup(void)
+{
+   int ret, psp_ret;
+   struct psp_data_init *init;
+   struct psp_data_status *status;
+

[RFC PATCH v1 06/28] KVM: SVM: Add SEV feature definitions to KVM

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Define a new KVM cpu feature for Secure Encrypted Virtualization (SEV).
The kernel will check for the presence of this feature to determine if
it is running with SEV active.

Define the SEV enable bit for the VMCB control structure. The hypervisor
will use this bit to enable SEV in the guest.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/svm.h   |1 +
 arch/x86/include/uapi/asm/kvm_para.h |1 +
 2 files changed, 2 insertions(+)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 2aca535..fba2a7b 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -137,6 +137,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_VM_CR_SVM_DIS_MASK  0x0010ULL
 
 #define SVM_NESTED_CTL_NP_ENABLE   BIT(0)
+#define SVM_NESTED_CTL_SEV_ENABLE  BIT(1)
 
 struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
diff --git a/arch/x86/include/uapi/asm/kvm_para.h 
b/arch/x86/include/uapi/asm/kvm_para.h
index 94dc8ca..67dd610f 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -24,6 +24,7 @@
 #define KVM_FEATURE_STEAL_TIME 5
 #define KVM_FEATURE_PV_EOI 6
 #define KVM_FEATURE_PV_UNHALT  7
+#define KVM_FEATURE_SEV8
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 14/28] x86: Don't set the SME MSR bit when SEV is active

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

When SEV is active the virtual machine cannot set the MSR for SME, so
don't set the trampoline flag for SME.

Signed-off-by: Tom Lendacky 
---
 arch/x86/realmode/init.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index f3207e5..391d8ba 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -102,7 +102,7 @@ static void __init setup_real_mode(void)
*trampoline_cr4_features = mmu_cr4_features;
 
trampoline_header->flags = 0;
-   if (sme_me_mask)
+   if (sme_me_mask && !sev_active)
trampoline_header->flags |= TH_FLAGS_SME_ENABLE;
 
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 00/28] x86: Secure Encrypted Virtualization (AMD)

2016-08-22 Thread Brijesh Singh
This RFC series provides support for AMD's new Secure Encrypted 
Virtualization (SEV) feature. This RFC is build upon Secure Memory 
Encryption (SME) RFC.

SEV is an extension to the AMD-V architecture which supports running 
multiple VMs under the control of a hypervisor. When enabled, SEV 
hardware tags all code and data with its VM ASID which indicates which 
VM the data originated from or is intended for. This tag is kept with 
the data at all times when inside the SOC, and prevents that data from 
being used by anyone other than the owner. While the tag protects VM 
data inside the SOC, AES with 128 bit encryption protects data outside 
the SOC. When data leaves or enters the SOC, it is encrypted/decrypted 
respectively by hardware with a key based on the associated tag.

SEV guest VMs have the concept of private and shared memory.  Private memory
is encrypted with the  guest-specific key, while shared memory may be encrypted
with hypervisor key.  Certain types of memory (namely instruction pages and
guest page tables) are always treated as private memory by the hardware.
For data memory, SEV guest VMs can choose which pages they would like to
be private. The choice is done using the standard CPU page tables using
the C-bit, and is fully controlled by the guest. Due to security reasons
all the DMA operations inside the  guest must be performed on shared pages
(C-bit clear).  Note that since C-bit is only controllable by the guest OS
when it is operating in 64-bit or 32-bit PAE mode, in all other modes the
SEV hardware forces the C-bit to a 1.

SEV is designed to protect guest VMs from a benign but vulnerable
(i.e. not fully malicious) hypervisor. In particular, it reduces the attack
surface of guest VMs and can prevent certain types of VM-escape bugs
(e.g. hypervisor read-anywhere) from being used to steal guest data.

The RFC series also includes a crypto driver (psp.ko) which communicates
with SEV firmware that runs within the AMD secure processor provides a
secure key management interfaces. The hypervisor uses this interface to 
enable SEV for secure guest and perform common hypervisor activities
such as launching, running, snapshotting , migrating and debugging a 
guest. A new ioctl (KVM_SEV_ISSUE_CMD) is introduced which will enable
Qemu to send commands to the SEV firmware during guest life cycle.

The RFC series also includes patches required in guest OS to enable SEV 
feature. A guest OS can check SEV support by calling KVM_FEATURE cpuid 
instruction.

The following links provide additional details:

AMD Memory Encryption whitepaper:
 
http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf

AMD64 Architecture Programmer's Manual:
http://support.amd.com/TechDocs/24593.pdf
SME is section 7.10
SEV is section 15.34

Secure Encrypted Virutualization Key Management:
http://support.amd.com/TechDocs/55766_SEV-KM API_Spec.pdf

---

TODO:
- send qemu/seabios RFC's on respective mailing list
- integrate the psp driver with CCP driver (they share the PCI id's)
- add SEV guest migration command support
- add SEV snapshotting command support
- determine how to do ioremap of physical memory with mem encryption enabled
  (e.g acpi tables)
- determine how to share the guest memory with hypervisor for to support
  pvclock driver

Brijesh Singh (11):
  crypto: add AMD Platform Security Processor driver
  KVM: SVM: prepare to reserve asid for SEV guest
  KVM: SVM: prepare for SEV guest management API support
  KVM: introduce KVM_SEV_ISSUE_CMD ioctl
  KVM: SVM: add SEV launch start command
  KVM: SVM: add SEV launch update command
  KVM: SVM: add SEV_LAUNCH_FINISH command
  KVM: SVM: add KVM_SEV_GUEST_STATUS command
  KVM: SVM: add KVM_SEV_DEBUG_DECRYPT command
  KVM: SVM: add KVM_SEV_DEBUG_ENCRYPT command
  KVM: SVM: add command to query SEV API version

Tom Lendacky (17):
  kvm: svm: Add support for additional SVM NPF error codes
  kvm: svm: Add kvm_fast_pio_in support
  kvm: svm: Use the hardware provided GPA instead of page walk
  x86: Secure Encrypted Virtualization (SEV) support
  KVM: SVM: prepare for new bit definition in nested_ctl
  KVM: SVM: Add SEV feature definitions to KVM
  x86: Do not encrypt memory areas if SEV is enabled
  Access BOOT related data encrypted with SEV active
  x86/efi: Access EFI data as encrypted when SEV is active
  x86: Change early_ioremap to early_memremap for BOOT data
  x86: Don't decrypt trampoline area if SEV is active
  x86: DMA support for SEV memory encryption
  iommu/amd: AMD IOMMU support for SEV
  x86: Don't set the SME MSR bit when SEV is active
  x86: Unroll string I/O when SEV is active
  x86: Add support to determine if running with SEV enabled
  KVM: SVM: Enable SEV by setting the SEV_ENABLE cpu feature


 arch/x86/boot/compressed/Makefile  |2 
 

[RFC PATCH v1 12/28] x86: DMA support for SEV memory encryption

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

DMA access to memory mapped as encrypted while SEV is active can not be
encrypted during device write or decrypted during device read. In order
for DMA to properly work when SEV is active, the swiotlb bounce buffers
must be used.

Signed-off-by: Tom Lendacky 
---
 arch/x86/mm/mem_encrypt.c |   48 +
 1 file changed, 48 insertions(+)

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1154353..ce6e3ea 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -173,8 +173,52 @@ void __init sme_early_init(void)
/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | 
sme_me_mask);
+
+   if (sev_active)
+   swiotlb_force = 1;
 }
 
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+  gfp_t gfp, unsigned long attrs)
+{
+   void *vaddr;
+
+   vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, gfp, attrs);
+   if (!vaddr)
+   return NULL;
+
+   /* Clear the SME encryption bit for DMA use */
+   sme_set_mem_dec(vaddr, size);
+
+   /* Remove the encryption bit from the DMA address */
+   *dma_handle &= ~sme_me_mask;
+
+   return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+dma_addr_t dma_handle, unsigned long attrs)
+{
+   /* Set the SME encryption bit for re-use as encrypted */
+   sme_set_mem_enc(vaddr, size);
+
+   x86_swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
+}
+
+static struct dma_map_ops sme_dma_ops = {
+   .alloc  = sme_alloc,
+   .free   = sme_free,
+   .map_page   = swiotlb_map_page,
+   .unmap_page = swiotlb_unmap_page,
+   .map_sg = swiotlb_map_sg_attrs,
+   .unmap_sg   = swiotlb_unmap_sg_attrs,
+   .sync_single_for_cpu= swiotlb_sync_single_for_cpu,
+   .sync_single_for_device = swiotlb_sync_single_for_device,
+   .sync_sg_for_cpu= swiotlb_sync_sg_for_cpu,
+   .sync_sg_for_device = swiotlb_sync_sg_for_device,
+   .mapping_error  = swiotlb_dma_mapping_error,
+};
+
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void)
 {
@@ -184,6 +228,10 @@ void __init mem_encrypt_init(void)
/* Make SWIOTLB use an unencrypted DMA area */
swiotlb_clear_encryption();
 
+   /* Use SEV DMA operations if SEV is active */
+   if (sev_active)
+   dma_ops = _dma_ops;
+
pr_info("memory encryption active\n");
 }
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Provide support for Secure Encyrpted Virtualization (SEV). This initial
support defines the SEV active flag in order for the kernel to determine
if it is running with SEV active or not.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |3 +++
 arch/x86/kernel/mem_encrypt.S  |8 
 arch/x86/kernel/x8664_ksyms_64.c   |1 +
 3 files changed, 12 insertions(+)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index e395729..9c592d1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -20,6 +20,7 @@
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
+extern unsigned int sev_active;
 
 u8 sme_get_me_loss(void);
 
@@ -50,6 +51,8 @@ void swiotlb_set_mem_dec(void *vaddr, unsigned long size);
 
 #define sme_me_mask0UL
 
+#define sev_active 0
+
 static inline u8 sme_get_me_loss(void)
 {
return 0;
diff --git a/arch/x86/kernel/mem_encrypt.S b/arch/x86/kernel/mem_encrypt.S
index bf9f6a9..6a8cd18 100644
--- a/arch/x86/kernel/mem_encrypt.S
+++ b/arch/x86/kernel/mem_encrypt.S
@@ -96,6 +96,10 @@ ENDPROC(sme_enable)
 
 ENTRY(sme_encrypt_kernel)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
+   /* If SEV is active then the kernel is already encrypted */
+   cmpl$0, sev_active(%rip)
+   jnz .Lencrypt_exit
+
/* If SME is not active then no need to encrypt the kernel */
cmpq$0, sme_me_mask(%rip)
jz  .Lencrypt_exit
@@ -334,6 +338,10 @@ sme_me_loss:
.byte   0x00
.align  8
 
+ENTRY(sev_active)
+   .word   0x
+   .align  8
+
 mem_encrypt_enable_option:
.asciz "mem_encrypt=on"
.align  8
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 651c4c8..14bfc0b 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -88,4 +88,5 @@ EXPORT_SYMBOL(___preempt_schedule_notrace);
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 EXPORT_SYMBOL_GPL(sme_me_mask);
 EXPORT_SYMBOL_GPL(sme_get_me_loss);
+EXPORT_SYMBOL_GPL(sev_active);
 #endif

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 22/28] KVM: SVM: add SEV launch start command

2016-08-22 Thread Brijesh Singh
The command initate the process to launch this guest into
SEV-enabled mode.

For more information on command structure see [1], section 6.1

[1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |  212 +++-
 1 file changed, 209 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index dcee635..0b6da4a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -265,6 +265,9 @@ static unsigned long *sev_asid_bitmap;
 
 static int sev_asid_new(void);
 static void sev_asid_free(int asid);
+static void sev_deactivate_handle(unsigned int handle);
+static void sev_decommission_handle(unsigned int handle);
+static int sev_activate_asid(unsigned int handle, int asid, int *psp_ret);
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -1645,9 +1648,18 @@ static void sev_uninit_vcpu(struct vcpu_svm *svm)
 
svm_sev_unref();
 
-   for_each_possible_cpu(cpu) {
-   sd = per_cpu(svm_data, cpu);
-   sd->sev_vmcb[asid] = NULL;
+   /* when reference count reaches to zero then free SEV asid and
+* deactivate psp handle
+*/
+   if (!svm_sev_ref_count()) {
+   sev_deactivate_handle(svm_sev_handle());
+   sev_decommission_handle(svm_sev_handle());
+   sev_asid_free(svm_sev_asid());
+
+   for_each_possible_cpu(cpu) {
+   sd = per_cpu(svm_data, cpu);
+   sd->sev_vmcb[asid] = NULL;
+   }
}
 }
 
@@ -5196,6 +5208,198 @@ static void sev_asid_free(int asid)
clear_bit(asid, sev_asid_bitmap);
 }
 
+static void sev_decommission_handle(unsigned int handle)
+{
+   int ret, psp_ret;
+   struct psp_data_decommission *decommission;
+
+   decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+   if (!decommission)
+   return;
+
+   decommission->hdr.buffer_len = sizeof(*decommission);
+   decommission->handle = handle;
+   ret = psp_guest_decommission(decommission, _ret);
+   if (ret)
+   printk(KERN_ERR "SEV: DECOMISSION ret=%d (%#010x)\n",
+   ret, psp_ret);
+
+   kfree(decommission);
+}
+
+static void sev_deactivate_handle(unsigned int handle)
+{
+   int ret, psp_ret;
+   struct psp_data_deactivate *deactivate;
+
+   deactivate = kzalloc(sizeof(*deactivate), GFP_KERNEL);
+   if (!deactivate)
+   return;
+
+   deactivate->hdr.buffer_len = sizeof(*deactivate);
+   deactivate->handle = handle;
+   ret = psp_guest_deactivate(deactivate, _ret);
+   if (ret) {
+   printk(KERN_ERR "SEV: DEACTIVATE ret=%d (%#010x)\n",
+   ret, psp_ret);
+   goto buffer_free;
+   }
+
+   wbinvd_on_all_cpus();
+
+   ret = psp_guest_df_flush(_ret);
+   if (ret)
+   printk(KERN_ERR "SEV: DF_FLUSH ret=%d (%#010x)\n",
+   ret, psp_ret);
+
+buffer_free:
+   kfree(deactivate);
+}
+
+static int sev_activate_asid(unsigned int handle, int asid, int *psp_ret)
+{
+   int ret;
+   struct psp_data_activate *activate;
+
+   wbinvd_on_all_cpus();
+
+   ret = psp_guest_df_flush(psp_ret);
+   if (ret) {
+   printk(KERN_ERR "SEV: DF_FLUSH ret=%d (%#010x)\n",
+   ret, *psp_ret);
+   return ret;
+   }
+
+   activate = kzalloc(sizeof(*activate), GFP_KERNEL);
+   if (!activate)
+   return -ENOMEM;
+
+   activate->hdr.buffer_len = sizeof(*activate);
+   activate->handle = handle;
+   activate->asid   = asid;
+   ret = psp_guest_activate(activate, psp_ret);
+   if (ret)
+   printk(KERN_ERR "SEV: ACTIVATE ret=%d (%#010x)\n",
+   ret, *psp_ret);
+   kfree(activate);
+   return ret;
+}
+
+static int sev_pre_start(struct kvm *kvm, int *asid)
+{
+   int ret;
+
+   /* If guest has active psp handle then deactivate before calling
+* launch start.
+*/
+   if (kvm_sev_guest()) {
+   sev_deactivate_handle(kvm_sev_handle());
+   sev_decommission_handle(kvm_sev_handle());
+   *asid = kvm->arch.sev_info.asid;  /* reuse the asid */
+   ret = 0;
+   } else {
+   /* Allocate new asid for this launch */
+   ret = sev_asid_new();
+   if (ret < 0) {
+   printk(KERN_ERR "SEV: failed to allocate asid\n");
+   return ret;
+   }
+   *asid = ret;
+   ret = 0;
+   }
+
+   return ret;
+}
+
+static int sev_post_start(struct kvm *kvm, int asid, int handle, int *psp_ret)
+{
+   int ret;
+
+   /* 

[RFC PATCH v1 18/28] crypto: add AMD Platform Security Processor driver

2016-08-22 Thread Brijesh Singh
The driver to communicate with Secure Encrypted Virtualization (SEV)
firmware running within the AMD secure processor providing a secure key
management interface for SEV guests.

Signed-off-by: Tom Lendacky 
Signed-off-by: Brijesh Singh 
---
 drivers/crypto/Kconfig   |   11 +
 drivers/crypto/Makefile  |1 
 drivers/crypto/psp/Kconfig   |8 
 drivers/crypto/psp/Makefile  |3 
 drivers/crypto/psp/psp-dev.c |  220 +++
 drivers/crypto/psp/psp-dev.h |   95 +
 drivers/crypto/psp/psp-ops.c |  454 +++
 drivers/crypto/psp/psp-pci.c |  376 +++
 include/linux/ccp-psp.h  |  833 ++
 include/uapi/linux/Kbuild|1 
 include/uapi/linux/ccp-psp.h |  182 +
 11 files changed, 2184 insertions(+)
 create mode 100644 drivers/crypto/psp/Kconfig
 create mode 100644 drivers/crypto/psp/Makefile
 create mode 100644 drivers/crypto/psp/psp-dev.c
 create mode 100644 drivers/crypto/psp/psp-dev.h
 create mode 100644 drivers/crypto/psp/psp-ops.c
 create mode 100644 drivers/crypto/psp/psp-pci.c
 create mode 100644 include/linux/ccp-psp.h
 create mode 100644 include/uapi/linux/ccp-psp.h

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1af94e2..3bdbc51 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -464,6 +464,17 @@ if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig"
 endif
 
+config CRYPTO_DEV_PSP
+   bool "Support for AMD Platform Security Processor"
+   depends on X86 && PCI
+   help
+ The AMD Platform Security Processor provides hardware key-
+ management services for VMGuard encrypted memory.
+
+if CRYPTO_DEV_PSP
+   source "drivers/crypto/psp/Kconfig"
+endif
+
 config CRYPTO_DEV_MXS_DCP
tristate "Support for Freescale MXS DCP"
depends on (ARCH_MXS || ARCH_MXC)
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 3c6432d..1ea1e08 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
 obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
 obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_PSP) += psp/
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
diff --git a/drivers/crypto/psp/Kconfig b/drivers/crypto/psp/Kconfig
new file mode 100644
index 000..acd9b87
--- /dev/null
+++ b/drivers/crypto/psp/Kconfig
@@ -0,0 +1,8 @@
+config CRYPTO_DEV_PSP_DD
+   tristate "PSP Key Management device driver"
+   depends on CRYPTO_DEV_PSP
+   default m
+   help
+ Provides the interface to use the AMD PSP key management APIs
+ for use with the AMD Secure Enhanced Virtualization. If you
+ choose 'M' here, this module will be called psp.
diff --git a/drivers/crypto/psp/Makefile b/drivers/crypto/psp/Makefile
new file mode 100644
index 000..1b7d00c
--- /dev/null
+++ b/drivers/crypto/psp/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_PSP_DD) += psp.o
+psp-objs := psp-dev.o psp-ops.o
+psp-$(CONFIG_PCI) += psp-pci.o
diff --git a/drivers/crypto/psp/psp-dev.c b/drivers/crypto/psp/psp-dev.c
new file mode 100644
index 000..65d5c7e
--- /dev/null
+++ b/drivers/crypto/psp/psp-dev.c
@@ -0,0 +1,220 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "psp-dev.h"
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+MODULE_DESCRIPTION("AMD VMGuard key-management driver prototype");
+
+static struct psp_device *psp_master;
+
+static LIST_HEAD(psp_devs);
+static DEFINE_SPINLOCK(psp_devs_lock);
+
+static atomic_t psp_id;
+
+static void psp_add_device(struct psp_device *psp)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(_devs_lock, flags);
+
+   list_add_tail(>entry, _devs);
+   psp_master = psp->get_master(_devs);
+
+   spin_unlock_irqrestore(_devs_lock, flags);
+}
+
+static void psp_del_device(struct psp_device *psp)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(_devs_lock, flags);
+
+   list_del(>entry);
+   if (psp == psp_master)
+   psp_master = NULL;
+
+   spin_unlock_irqrestore(_devs_lock, flags);
+}
+
+static void psp_check_support(struct psp_device *psp)
+{
+   if (ioread32(psp->io_regs + PSP_CMDRESP))
+   psp->sev_enabled = 1;
+}
+
+/**
+ * psp_get_master_device - returns a pointer to the 

[RFC PATCH v1 09/28] x86/efi: Access EFI data as encrypted when SEV is active

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

EFI data is encrypted when the kernel is run under SEV. Update the
page table references to be sure the EFI memory areas are accessed
encrypted.

Signed-off-by: Tom Lendacky 
---
 arch/x86/platform/efi/efi_64.c |   14 --
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 0871ea4..98363f3 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -213,7 +213,7 @@ void efi_sync_low_kernel_mappings(void)
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-   unsigned long pfn, text;
+   unsigned long pfn, text, flags;
efi_memory_desc_t *md;
struct page *page;
unsigned npages;
@@ -230,6 +230,10 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, 
unsigned num_pages)
efi_scratch.efi_pgt = (pgd_t *)__sme_pa(efi_pgd);
pgd = efi_pgd;
 
+   flags = _PAGE_NX | _PAGE_RW;
+   if (sev_active)
+   flags |= _PAGE_ENC;
+
/*
 * It can happen that the physical address of new_memmap lands in memory
 * which is not mapped in the EFI page table. Therefore we need to go
@@ -237,7 +241,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, 
unsigned num_pages)
 * phys_efi_set_virtual_address_map().
 */
pfn = pa_memmap >> PAGE_SHIFT;
-   if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | 
_PAGE_RW)) {
+   if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, flags)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
@@ -302,6 +306,9 @@ static void __init __map_region(efi_memory_desc_t *md, u64 
va)
if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD;
 
+   if (sev_active)
+   flags |= _PAGE_ENC;
+
pfn = md->phys_addr >> PAGE_SHIFT;
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
@@ -426,6 +433,9 @@ void __init efi_runtime_update_mappings(void)
(md->type != EFI_RUNTIME_SERVICES_CODE))
pf |= _PAGE_RW;
 
+   if (sev_active)
+   pf |= _PAGE_ENC;
+
/* Update the 1:1 mapping */
pfn = md->phys_addr >> PAGE_SHIFT;
if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, 
md->num_pages, pf))

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 07/28] x86: Do not encrypt memory areas if SEV is enabled

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

When running under SEV, some memory areas that were originally not
encrypted under SME are already encrypted. In these situations do not
attempt to encrypt them.

Signed-off-by: Tom Lendacky 
---
 arch/x86/kernel/head64.c |4 ++--
 arch/x86/kernel/setup.c  |7 ---
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 358d7bc..4a15def 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -114,7 +114,7 @@ static void __init create_unencrypted_mapping(void 
*address, unsigned long size)
unsigned long physaddr = (unsigned long)address - __PAGE_OFFSET;
pmdval_t pmd_flags, pmd;
 
-   if (!sme_me_mask)
+   if (!sme_me_mask || sev_active)
return;
 
/* Clear the encryption mask from the early_pmd_flags */
@@ -165,7 +165,7 @@ static void __init __clear_mapping(unsigned long address)
 
 static void __init clear_mapping(void *address, unsigned long size)
 {
-   if (!sme_me_mask)
+   if (!sme_me_mask || sev_active)
return;
 
do {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cec8a63..9c10383 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -380,10 +380,11 @@ static void __init reserve_initrd(void)
 
/*
 * This memory is marked encrypted by the kernel but the ramdisk
-* was loaded in the clear by the bootloader, so make sure that
-* the ramdisk image is encrypted.
+* was loaded in the clear by the bootloader (unless SEV is active),
+* so make sure that the ramdisk image is encrypted.
 */
-   sme_early_mem_enc(ramdisk_image, ramdisk_end - ramdisk_image);
+   if (!sev_active)
+   sme_early_mem_enc(ramdisk_image, ramdisk_end - ramdisk_image);
 
initrd_start = 0;
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 17/28] KVM: SVM: Enable SEV by setting the SEV_ENABLE cpu feature

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

Modify the SVM cpuid update function to indicate if Secure Encrypted
Virtualization (SEV) is active by setting the SEV KVM cpu features bit
if SEV is active.  SEV is active if Secure Memory Encryption is active
in the host and the SEV_ENABLE bit of the VMCB is set.

Signed-off-by: Tom Lendacky 
---
 arch/x86/kvm/cpuid.c |4 +++-
 arch/x86/kvm/svm.c   |   18 ++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3235e0f..d34faea 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -583,7 +583,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
entry->edx = 0;
break;
case 0x8000:
-   entry->eax = min(entry->eax, 0x801a);
+   entry->eax = min(entry->eax, 0x801f);
break;
case 0x8001:
entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
@@ -616,6 +616,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
break;
case 0x801d:
break;
+   case 0x801f:
+   break;
/*Add support for Centaur's CPUID instruction*/
case 0xC000:
/*Just support up to 0xC004 now*/
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9b59260..211be94 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -43,6 +43,7 @@
 #include 
 
 #include 
+#include 
 #include "trace.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -4677,10 +4678,27 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_cpuid_entry2 *entry;
+   struct vmcb_control_area *ca = >vmcb->control;
+   struct kvm_cpuid_entry2 *features, *sev_info;
 
/* Update nrips enabled cache */
svm->nrips_enabled = !!guest_cpuid_has_nrips(>vcpu);
 
+   /* Check for Secure Encrypted Virtualization support */
+   features = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+   if (!features)
+   return;
+
+   sev_info = kvm_find_cpuid_entry(vcpu, 0x801f, 0);
+   if (!sev_info)
+   return;
+
+   if (ca->nested_ctl & SVM_NESTED_CTL_SEV_ENABLE) {
+   features->eax |= (1 << KVM_FEATURE_SEV);
+   cpuid(0x801f, _info->eax, _info->ebx,
+ _info->ecx, _info->edx);
+   }
+
if (!kvm_vcpu_apicv_active(vcpu))
return;
 

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 01/28] kvm: svm: Add support for additional SVM NPF error codes

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

AMD hardware adds two additional bits to aid in nested page fault handling.

Bit 32 - NPF occurred while translating the guest's final physical address
Bit 33 - NPF occurred while translating the guest page tables

The guest page tables fault indicator can be used as an aid for nested
virtualization. Using V0 for the host, V1 for the first level guest and
V2 for the second level guest, when both V1 and V2 are using nested paging
there are currently a number of unnecessary instruction emulations. When
V2 is launched shadow paging is used in V1 for the nested tables of V2. As
a result, KVM marks these pages as RO in the host nested page tables. When
V2 exits and we resume V1, these pages are still marked RO.

Every nested walk for a guest page table is treated as a user-level write
access and this causes a lot of NPFs because the V1 page tables are marked
RO in the V0 nested tables. While executing V1, when these NPFs occur KVM
sees a write to a read-only page, emulates the V1 instruction and unprotects
the page (marking it RW). This patch looks for cases where we get a NPF due
to a guest page table walk where the page was marked RO. It immediately
unprotects the page and resumes the guest, leading to far fewer instruction
emulations when nested virtualization is used.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/kvm_host.h |   11 ++-
 arch/x86/kvm/mmu.c  |   20 ++--
 arch/x86/kvm/svm.c  |2 +-
 3 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c51c1cb..3f05d36 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -191,6 +191,8 @@ enum {
 #define PFERR_RSVD_BIT 3
 #define PFERR_FETCH_BIT 4
 #define PFERR_PK_BIT 5
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
 
 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -198,6 +200,13 @@ enum {
 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+
+#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |   \
+PFERR_USER_MASK |  \
+PFERR_WRITE_MASK | \
+PFERR_PRESENT_MASK)
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
@@ -1203,7 +1212,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
   void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a7040f4..3b47a5d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4512,7 +4512,7 @@ static void make_mmu_pages_available(struct kvm_vcpu 
*vcpu)
kvm_mmu_commit_zap_page(vcpu->kvm, _list);
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
   void *insn, int insn_len)
 {
int r, emulation_type = EMULTYPE_RETRY;
@@ -4531,12 +4531,28 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t 
cr2, u32 error_code,
return r;
}
 
-   r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+   r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+ false);
if (r < 0)
return r;
if (!r)
return 1;
 
+   /*
+* Before emulating the instruction, check if the error code
+* was due to a RO violation while translating the guest page.
+* This can occur when using nested virtualization with nested
+* paging in both guests. If true, we simply unprotect the page
+* and resume the guest.
+*
+* Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+*   in PFERR_NEXT_GUEST_PAGE)
+*/
+   if (error_code == PFERR_NESTED_GUEST_PAGE) {
+   kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+   return 1;
+   }
+
if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
 emulate:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1e6b84b..d8b9c8c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1935,7 +1935,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned 
long 

[RFC PATCH v1 03/28] kvm: svm: Use the hardware provided GPA instead of page walk

2016-08-22 Thread Brijesh Singh
From: Tom Lendacky 

When a guest causes a NPF which requires emulation, KVM sometimes walks
the guest page tables to translate the GVA to a GPA. This is unnecessary
most of the time on AMD hardware since the hardware provides the GPA in
EXITINFO2.

The only exception cases involve string operations involving rep or
operations that use two memory locations. With rep, the GPA will only be
the value of the initial NPF and with dual memory locations we won't know
which memory address was translated into EXITINFO2.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/kvm_emulate.h |3 +++
 arch/x86/include/asm/kvm_host.h|3 +++
 arch/x86/kvm/svm.c |2 ++
 arch/x86/kvm/x86.c |   17 -
 4 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
index e9cd7be..2d1ac09 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -344,6 +344,9 @@ struct x86_emulate_ctxt {
struct read_cache mem_read;
 };
 
+/* String operation identifier (matches the definition in emulate.c) */
+#define CTXT_STRING_OP (1 << 13)
+
 /* Repeat String Operation Prefix */
 #define REPE_PREFIX0xf3
 #define REPNE_PREFIX   0xf2
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c38f878..b1dd673 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -667,6 +667,9 @@ struct kvm_vcpu_arch {
 
int pending_ioapic_eoi;
int pending_external_vector;
+
+   /* GPA available (AMD only) */
+   bool gpa_available;
 };
 
 struct kvm_lpage_info {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fd5a9a8..9b2de7c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4055,6 +4055,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
+   vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
+
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 78295b0..d6f2f4b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4382,7 +4382,19 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, 
unsigned long gva,
return 1;
}
 
-   *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+   /*
+* If the exit was due to a NPF we may already have a GPA.
+* If the GPA is present, use it to avoid the GVA to GPA table
+* walk. Note, this cannot be used on string operations since
+* string operation using rep will only have the initial GPA
+* from when the NPF occurred.
+*/
+   if (vcpu->arch.gpa_available &&
+   !(vcpu->arch.emulate_ctxt.d & CTXT_STRING_OP))
+   *gpa = exception->address;
+   else
+   *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access,
+  exception);
 
if (*gpa == UNMAPPED_GVA)
return -1;
@@ -5504,6 +5516,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
}
 
 restart:
+   /* Save the faulting GPA (cr2) in the address field */
+   ctxt->exception.address = cr2;
+
r = x86_emulate_insn(ctxt);
 
if (r == EMULATION_INTERCEPTED)

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 23/28] KVM: SVM: add SEV launch update command

2016-08-22 Thread Brijesh Singh
The command is used for encrypting guest memory region.

For more information see [1], section 6.2

[1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |  126 
 1 file changed, 126 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0b6da4a..c78bdc6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -35,6 +35,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
@@ -263,6 +265,8 @@ static unsigned long *sev_asid_bitmap;
 #define svm_sev_guest()(svm->vcpu.kvm->arch.sev_info.handle)
 #define svm_sev_ref_count()(svm->vcpu.kvm->arch.sev_info.ref_count)
 
+#define __sev_page_pa(x) ((page_to_pfn(x) << PAGE_SHIFT) | sme_me_mask)
+
 static int sev_asid_new(void);
 static void sev_asid_free(int asid);
 static void sev_deactivate_handle(unsigned int handle);
@@ -5376,6 +5380,123 @@ err_1:
return ret;
 }
 
+static int sev_pre_update(struct page **pages, unsigned long uaddr, int npages)
+{
+   int pinned;
+
+   /* pin the user virtual address */
+   down_read(>mm->mmap_sem);
+   pinned = get_user_pages(uaddr, npages, 1, 0, pages, NULL);
+   up_read(>mm->mmap_sem);
+   if (pinned != npages) {
+   printk(KERN_ERR "SEV: failed to pin  %d pages (got %d)\n",
+   npages, pinned);
+   goto err;
+   }
+
+   return 0;
+err:
+   if (pinned > 0)
+   release_pages(pages, pinned, 0);
+   return 1;
+}
+
+static int sev_launch_update(struct kvm *kvm,
+struct kvm_sev_launch_update __user *arg,
+int *psp_ret)
+{
+   int first, last;
+   struct page **inpages;
+   int ret, nr_pages;
+   unsigned long uaddr, ulen;
+   int i, buffer_len, len, offset;
+   struct kvm_sev_launch_update params;
+   struct psp_data_launch_update *update;
+
+   /* Get the parameters from the user */
+   if (copy_from_user(, arg, sizeof(*arg)))
+   return -EFAULT;
+
+   uaddr = params.address;
+   ulen = params.length;
+
+   /* Get number of pages */
+   first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
+   last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
+   nr_pages = (last - first + 1);
+
+   /* allocate the buffers */
+   buffer_len = sizeof(*update);
+   update = kzalloc(buffer_len, GFP_KERNEL);
+   if (!update)
+   return -ENOMEM;
+
+   ret = -ENOMEM;
+   inpages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+   if (!inpages)
+   goto err_1;
+
+   ret = sev_pre_update(inpages, uaddr, nr_pages);
+   if (ret)
+   goto err_2;
+
+   /* the array of pages returned by get_user_pages() is a page-aligned
+* memory. Since the user buffer is probably not page-aligned, we need
+* to calculate the offset within a page for first update entry.
+*/
+   offset = uaddr & (PAGE_SIZE - 1);
+   len = min_t(size_t, (PAGE_SIZE - offset), ulen);
+   ulen -= len;
+
+   /* update first page -
+* special care need to be taken for the first page because we might
+* be dealing with offset within the page
+*/
+   update->hdr.buffer_len = buffer_len;
+   update->handle = kvm_sev_handle();
+   update->length = len;
+   update->address = __sev_page_pa(inpages[0]) + offset;
+   clflush_cache_range(page_address(inpages[0]), PAGE_SIZE);
+   ret = psp_guest_launch_update(update, 5, psp_ret);
+   if (ret) {
+   printk(KERN_ERR "SEV: LAUNCH_UPDATE addr %#llx len %d "
+   "ret=%d (%#010x)\n", update->address,
+   update->length, ret, *psp_ret);
+   goto err_3;
+   }
+
+   /* update remaining pages */
+   for (i = 1; i < nr_pages; i++) {
+
+   len = min_t(size_t, PAGE_SIZE, ulen);
+   ulen -= len;
+   update->length = len;
+   update->address = __sev_page_pa(inpages[i]);
+   clflush_cache_range(page_address(inpages[i]), PAGE_SIZE);
+
+   ret = psp_guest_launch_update(update, 5, psp_ret);
+   if (ret) {
+   printk(KERN_ERR "SEV: LAUNCH_UPDATE addr %#llx len %d "
+   "ret=%d (%#010x)\n", update->address,
+   update->length, ret, *psp_ret);
+   goto err_3;
+   }
+   }
+
+err_3:
+   /* mark pages dirty */
+   for (i = 0; i < nr_pages; i++) {
+   set_page_dirty_lock(inpages[i]);
+   mark_page_accessed(inpages[i]);
+   }
+   release_pages(inpages, nr_pages, 0);
+err_2:
+   kfree(inpages);
+err_1:
+   kfree(update);
+   return ret;
+}
+ 
 

[RFC PATCH v1 28/28] KVM: SVM: add command to query SEV API version

2016-08-22 Thread Brijesh Singh
Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |   23 +++
 1 file changed, 23 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4af195d..88b8f89 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5779,6 +5779,25 @@ err_1:
return ret;
 }
 
+static int sev_api_version(int *psp_ret)
+{
+   int ret;
+   struct psp_data_status *status;
+
+   status = kzalloc(sizeof(*status), GFP_KERNEL);
+   if (!status)
+   return -ENOMEM;
+
+   ret = psp_platform_status(status, psp_ret);
+   if (ret)
+   goto err;
+
+   ret = (status->api_major << 8) | status->api_minor;
+err:
+   kfree(status);
+   return ret;
+}
+
 static int amd_sev_issue_cmd(struct kvm *kvm,
 struct kvm_sev_issue_cmd __user *user_data)
 {
@@ -5819,6 +5838,10 @@ static int amd_sev_issue_cmd(struct kvm *kvm,
_code);
break;
}
+   case KVM_SEV_API_VERSION: {
+   r = sev_api_version(_code);
+   break;
+   }
default:
break;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 27/28] KVM: SVM: add KVM_SEV_DEBUG_ENCRYPT command

2016-08-22 Thread Brijesh Singh
The command encrypts a region of guest memory for debugging purposes.

For more information see [1], section 7.2

[1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |  100 
 1 file changed, 100 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b383bc7..4af195d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5684,6 +5684,101 @@ err_1:
return ret;
 }
 
+static int sev_dbg_encrypt(struct kvm *kvm,
+  struct kvm_sev_dbg_encrypt __user *argp,
+  int *psp_ret)
+{
+   void *data;
+   int len, ret, d_off;
+   struct page **inpages;
+   struct psp_data_dbg *encrypt;
+   struct kvm_sev_dbg_encrypt debug;
+   unsigned long src_addr, dst_addr;
+
+   if (!kvm_sev_guest())
+   return -ENOTTY;
+
+   if (copy_from_user(, argp, sizeof(*argp)))
+   return -EFAULT;
+
+   if (debug.length > PAGE_SIZE)
+   return -EINVAL;
+
+   len = debug.length;
+   src_addr = debug.src_addr;
+   dst_addr = debug.dst_addr;
+
+   inpages = kzalloc(1 * sizeof(struct page *), GFP_KERNEL);
+   if (!inpages)
+   return -ENOMEM;
+
+   /* pin the guest destination virtual address */
+   down_read(>mm->mmap_sem);
+   ret = get_user_pages(dst_addr, 1, 1, 0, inpages, NULL);
+   up_read(>mm->mmap_sem);
+   if (ret < 0)
+   goto err_1;
+
+   encrypt = kzalloc(sizeof(*encrypt), GFP_KERNEL);
+   if (!encrypt)
+   goto err_2;
+
+   data = (void *) get_zeroed_page(GFP_KERNEL);
+   if (!data)
+   goto err_3;
+
+   encrypt->hdr.buffer_len = sizeof(*encrypt);
+   encrypt->handle = kvm_sev_handle();
+
+   if ((len & 15) || (dst_addr & 15)) {
+   /* if destination address and length are not 16-byte
+* aligned then:
+* a) decrypt destination page into temporary buffer
+* b) copy source data into temporary buffer at correct offset
+* c) encrypt temporary buffer
+*/
+   ret = __sev_dbg_decrypt_page(kvm, dst_addr, data, psp_ret);
+   if (ret)
+   goto err_4;
+
+   d_off = dst_addr & (PAGE_SIZE - 1);
+   ret = -EFAULT;
+   if (copy_from_user(data + d_off,
+   (uint8_t *)debug.src_addr, len))
+   goto err_4;
+
+   encrypt->length = PAGE_SIZE;
+   encrypt->src_addr = __pa(data) | sme_me_mask;
+   encrypt->dst_addr =  __sev_page_pa(inpages[0]);
+   } else {
+   if (copy_from_user(data, (uint8_t *)debug.src_addr, len))
+   goto err_4;
+
+   d_off = dst_addr & (PAGE_SIZE - 1);
+   encrypt->length = len;
+   encrypt->src_addr = __pa(data) | sme_me_mask;
+   encrypt->dst_addr = __sev_page_pa(inpages[0]);
+   encrypt->dst_addr += d_off;
+   }
+
+   ret = psp_dbg_encrypt(encrypt, psp_ret);
+   if (ret)
+   printk(KERN_ERR "SEV: DEBUG_ENCRYPT: [%#lx=>%#lx+%#x] "
+   "%d (%#010x)\n",src_addr, dst_addr, len,
+   ret, *psp_ret);
+
+err_4:
+   free_page((unsigned long)data);
+err_3:
+   kfree(encrypt);
+err_2:
+   release_pages(inpages, 1, 0);
+err_1:
+   kfree(inpages);
+
+   return ret;
+}
+
 static int amd_sev_issue_cmd(struct kvm *kvm,
 struct kvm_sev_issue_cmd __user *user_data)
 {
@@ -5719,6 +5814,11 @@ static int amd_sev_issue_cmd(struct kvm *kvm,
_code);
break;
}
+   case KVM_SEV_DBG_ENCRYPT: {
+   r = sev_dbg_encrypt(kvm, (void *)arg.opaque,
+   _code);
+   break;
+   }
default:
break;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 25/28] KVM: SVM: add KVM_SEV_GUEST_STATUS command

2016-08-22 Thread Brijesh Singh
The command is used to query the SEV guest status.

For more information see [1], section 6.10

[1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf

Signed-off-by: Brijesh Singh 
---
 arch/x86/kvm/svm.c |   41 +
 1 file changed, 41 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 60cc0f7..63e7d15 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5570,6 +5570,42 @@ err_1:
return ret;
 }
 
+static int sev_guest_status(struct kvm *kvm,
+   struct kvm_sev_guest_status __user *argp,
+   int *psp_ret)
+{
+   int ret;
+   struct kvm_sev_guest_status params;
+   struct psp_data_guest_status *status;
+
+   if (!kvm_sev_guest())
+   return -ENOTTY;
+
+   if (copy_from_user(, argp, sizeof(*argp)))
+   return -EFAULT;
+
+   status = kzalloc(sizeof(*status), GFP_KERNEL);
+   if (!status)
+   return -ENOMEM;
+
+   status->hdr.buffer_len = sizeof(*status);
+   status->handle = kvm_sev_handle();
+   ret = psp_guest_status(status, psp_ret);
+   if (ret) {
+   printk(KERN_ERR "SEV: GUEST_STATUS ret=%d (%#010x)\n",
+   ret, *psp_ret);
+   goto err_1;
+   }
+   params.policy = status->policy;
+   params.state = status->state;
+
+   if (copy_to_user(argp, , sizeof(*argp)))
+   ret = -EFAULT;
+err_1:
+   kfree(status);
+   return ret;
+}
+
 static int amd_sev_issue_cmd(struct kvm *kvm,
 struct kvm_sev_issue_cmd __user *user_data)
 {
@@ -5595,6 +5631,11 @@ static int amd_sev_issue_cmd(struct kvm *kvm,
_code);
break;
}
+   case KVM_SEV_GUEST_STATUS: {
+   r = sev_guest_status(kvm, (void *)arg.opaque,
+   _code);
+   break;
+   }
default:
break;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH v1 21/28] KVM: introduce KVM_SEV_ISSUE_CMD ioctl

2016-08-22 Thread Brijesh Singh
The ioctl will be used by qemu to issue the Secure Encrypted
Virtualization (SEV) guest commands to transition a guest into
into SEV-enabled mode.

a typical usage:

struct kvm_sev_launch_start start;
struct kvm_sev_issue_cmd data;

data.cmd = KVM_SEV_LAUNCH_START;
data.opaque = 

ret = ioctl(fd, KVM_SEV_ISSUE_CMD, );

On SEV command failure, data.ret_code will contain the firmware error code.

Signed-off-by: Brijesh Singh 
---
 arch/x86/include/asm/kvm_host.h |3 +
 arch/x86/kvm/x86.c  |   13 
 include/uapi/linux/kvm.h|  125 +++
 3 files changed, 141 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9b885fc..a94e37d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1040,6 +1040,9 @@ struct kvm_x86_ops {
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+   int (*sev_issue_cmd)(struct kvm *kvm,
+struct kvm_sev_issue_cmd __user *argp);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d6f2f4b..0c0adad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3820,6 +3820,15 @@ split_irqchip_unlock:
return r;
 }
 
+static int kvm_vm_ioctl_sev_issue_cmd(struct kvm *kvm,
+ struct kvm_sev_issue_cmd __user *argp)
+{
+   if (kvm_x86_ops->sev_issue_cmd)
+   return kvm_x86_ops->sev_issue_cmd(kvm, argp);
+
+   return -ENOTTY;
+}
+
 long kvm_arch_vm_ioctl(struct file *filp,
   unsigned int ioctl, unsigned long arg)
 {
@@ -4085,6 +4094,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_enable_cap(kvm, );
break;
}
+   case KVM_SEV_ISSUE_CMD: {
+   r = kvm_vm_ioctl_sev_issue_cmd(kvm, argp);
+   break;
+   }
default:
r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 300ef25..72c18c3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1274,6 +1274,131 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_X86_SMM */
 #define KVM_SMI   _IO(KVMIO,   0xb7)
 
+/* Secure Encrypted Virtualization mode */
+enum sev_cmd {
+   KVM_SEV_LAUNCH_START = 0,
+   KVM_SEV_LAUNCH_UPDATE,
+   KVM_SEV_LAUNCH_FINISH,
+   KVM_SEV_GUEST_STATUS,
+   KVM_SEV_DBG_DECRYPT,
+   KVM_SEV_DBG_ENCRYPT,
+   KVM_SEV_RECEIVE_START,
+   KVM_SEV_RECEIVE_UPDATE,
+   KVM_SEV_RECEIVE_FINISH,
+   KVM_SEV_SEND_START,
+   KVM_SEV_SEND_UPDATE,
+   KVM_SEV_SEND_FINISH,
+   KVM_SEV_API_VERSION,
+   KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_issue_cmd {
+   __u32 cmd;
+   __u64 opaque;
+   __u32 ret_code;
+};
+
+struct kvm_sev_launch_start {
+   __u32 handle;
+   __u32 flags;
+   __u32 policy;
+   __u8 nonce[16];
+   __u8 dh_pub_qx[32];
+   __u8 dh_pub_qy[32];
+};
+
+struct kvm_sev_launch_update {
+   __u64   address;
+   __u32   length;
+};
+
+struct kvm_sev_launch_finish {
+   __u32 vcpu_count;
+   __u32 vcpu_length;
+   __u64 vcpu_mask_addr;
+   __u32 vcpu_mask_length;
+   __u8  measurement[32];
+};
+
+struct kvm_sev_guest_status {
+   __u32 policy;
+   __u32 state;
+};
+
+struct kvm_sev_dbg_decrypt {
+   __u64 src_addr;
+   __u64 dst_addr;
+   __u32 length;
+};
+
+struct kvm_sev_dbg_encrypt {
+   __u64 src_addr;
+   __u64 dst_addr;
+   __u32 length;
+};
+
+struct kvm_sev_receive_start {
+   __u32 handle;
+   __u32 flags;
+   __u32 policy;
+   __u8 policy_meas[32];
+   __u8 wrapped_tek[24];
+   __u8 wrapped_tik[24];
+   __u8 ten[16];
+   __u8 dh_pub_qx[32];
+   __u8 dh_pub_qy[32];
+   __u8 nonce[16];
+};
+
+struct kvm_sev_receive_update {
+   __u8 iv[16];
+   __u64 address;
+   __u32 length;
+};
+
+struct kvm_sev_receive_finish {
+   __u8 measurement[32];
+};
+
+struct kvm_sev_send_start {
+   __u8 nonce[16];
+   __u32 policy;
+   __u8 policy_meas[32];
+   __u8 wrapped_tek[24];
+   __u8 wrapped_tik[24];
+   __u8 ten[16];
+   __u8 iv[16];
+   __u32 flags;
+   __u8 api_major;
+   __u8 api_minor;
+   __u32 serial;
+   __u8 dh_pub_qx[32];
+   __u8 dh_pub_qy[32];
+   __u8 pek_sig_r[32];
+   __u8 pek_sig_s[32];
+   __u8 cek_sig_r[32];
+   __u8 cek_sig_s[32];
+   __u8 cek_pub_qx[32];
+   __u8 cek_pub_qy[32];
+   __u8 ask_sig_r[32];
+   __u8 ask_sig_s[32];
+   __u32 ncerts;
+   __u32 cert_length;
+   __u64 certs_addr;
+};
+
+struct kvm_sev_send_update {
+   __u32 length;
+   __u64 src_addr;
+   __u64 dst_addr;
+};
+
+struct kvm_sev_send_finish {
+   

Re: [PATCH 1/2] PCI/IOV: Add function to allow Function Dependency Link override.

2016-08-22 Thread Bjorn Helgaas
On Mon, Aug 22, 2016 at 07:49:09AM -0700, David Daney wrote:
> On 08/22/2016 07:36 AM, Bjorn Helgaas wrote:
> >Hi David & Omer,
> >
> >On Fri, Aug 19, 2016 at 03:32:12PM -0700, Omer Khaliq wrote:
> >>From: David Daney 
> >>
> >>Some hardware presents an incorrect SR-IOV Function Dependency Link,
> >>add a function to allow this to be overridden in the PF driver for
> >>such devices.
> >>
> >>Signed-off-by: David Daney 
> >>Signed-off-by: Omer Khaliq 
> >>---
> >>  drivers/pci/iov.c   | 14 ++
> >>  include/linux/pci.h |  1 +
> >>  2 files changed, 15 insertions(+)
> >>
> >>diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
> >>index 2194b44..81f0672 100644
> >>--- a/drivers/pci/iov.c
> >>+++ b/drivers/pci/iov.c
> >>@@ -640,6 +640,20 @@ int pci_enable_sriov(struct pci_dev *dev, int 
> >>nr_virtfn)
> >>  EXPORT_SYMBOL_GPL(pci_enable_sriov);
> >>
> >>  /**
> >>+ * pci_sriov_fdl_override - fix incorrect Function Dependency Link
> >>+ * @dev: the PCI device
> >>+ * @fdl: the corrected Function Dependency Link value
> >>+ *
> >>+ * For hardware presenting an incorrect Function Dependency Link in
> >>+ * the SR-IOV Extended Capability, allow a driver to override it.
> >>+ */
> >>+void pci_sriov_fdl_override(struct pci_dev *dev, u8 fdl)
> >>+{
> >>+   dev->sriov->link = fdl;
> >>+}
> >>+EXPORT_SYMBOL_GPL(pci_sriov_fdl_override);
> >
> >We usually use quirks to work around problems in config space.  That's
> >a nice mechanism because we don't have to add new PCI core interfaces
> >and it makes it clear that we're working around a hardware problem.
> >
> >Can you use a quirk here?  We allocate dev->sriov in the
> >pci_init_capabilities() path, so it looks like a pci_fixup_final quirk
> >should work.
> >
> 
> The struct pci_sriov definition is private to drivers/pci, so in
> order to use a quirk to fix this, we would have to put it in
> drivers/pci/quirks.c.  I was trying to keep this very device
> specific code in the driver, which requires an accessor to be able
> to manipulate the dev->sriov->link field.
> 
> If you prefer a quirk in drivers/pci/quirks.c, we can certainly do
> that instead.

Oh, I didn't notice that pci_sriov was declared in drivers/pci/pci.h
instead of linux/pci.h.  I do think I would prefer a quirk, and I
think it's fine to put it in drivers/pci/quirks.c.

Bjorn
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH -next] chcr: Fix non static symbol warning

2016-08-22 Thread Wei Yongjun
From: Wei Yongjun 

Fixes the following sparse warning:

drivers/crypto/chelsio/chcr_algo.c:593:5: warning:
 symbol 'cxgb4_is_crypto_q_full' was not declared. Should it be static?

Signed-off-by: Wei Yongjun 
---
 drivers/crypto/chelsio/chcr_algo.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c 
b/drivers/crypto/chelsio/chcr_algo.c
index ad8e353..e4ddb92 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -590,7 +590,7 @@ badkey_err:
return -EINVAL;
 }
 
-int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 {
int ret = 0;
struct sge_ofld_txq *q;



--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/2] PCI/IOV: Add function to allow Function Dependency Link override.

2016-08-22 Thread David Daney

On 08/22/2016 07:36 AM, Bjorn Helgaas wrote:

Hi David & Omer,

On Fri, Aug 19, 2016 at 03:32:12PM -0700, Omer Khaliq wrote:

From: David Daney 

Some hardware presents an incorrect SR-IOV Function Dependency Link,
add a function to allow this to be overridden in the PF driver for
such devices.

Signed-off-by: David Daney 
Signed-off-by: Omer Khaliq 
---
  drivers/pci/iov.c   | 14 ++
  include/linux/pci.h |  1 +
  2 files changed, 15 insertions(+)

diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 2194b44..81f0672 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -640,6 +640,20 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  EXPORT_SYMBOL_GPL(pci_enable_sriov);

  /**
+ * pci_sriov_fdl_override - fix incorrect Function Dependency Link
+ * @dev: the PCI device
+ * @fdl: the corrected Function Dependency Link value
+ *
+ * For hardware presenting an incorrect Function Dependency Link in
+ * the SR-IOV Extended Capability, allow a driver to override it.
+ */
+void pci_sriov_fdl_override(struct pci_dev *dev, u8 fdl)
+{
+   dev->sriov->link = fdl;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_fdl_override);


We usually use quirks to work around problems in config space.  That's
a nice mechanism because we don't have to add new PCI core interfaces
and it makes it clear that we're working around a hardware problem.

Can you use a quirk here?  We allocate dev->sriov in the
pci_init_capabilities() path, so it looks like a pci_fixup_final quirk
should work.



The struct pci_sriov definition is private to drivers/pci, so in order 
to use a quirk to fix this, we would have to put it in 
drivers/pci/quirks.c.  I was trying to keep this very device specific 
code in the driver, which requires an accessor to be able to manipulate 
the dev->sriov->link field.


If you prefer a quirk in drivers/pci/quirks.c, we can certainly do that 
instead.


Thanks for taking the time to look at this,
David Daney





+
+/**
   * pci_disable_sriov - disable the SR-IOV capability
   * @dev: the PCI device
   */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2599a98..da8a5b3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1823,6 +1823,7 @@ int pci_num_vf(struct pci_dev *dev);
  int pci_vfs_assigned(struct pci_dev *dev);
  int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
  int pci_sriov_get_totalvfs(struct pci_dev *dev);
+void pci_sriov_fdl_override(struct pci_dev *dev, u8 fdl);
  resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
  #else
  static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-pci" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/2] PCI/IOV: Add function to allow Function Dependency Link override.

2016-08-22 Thread Bjorn Helgaas
Hi David & Omer,

On Fri, Aug 19, 2016 at 03:32:12PM -0700, Omer Khaliq wrote:
> From: David Daney 
> 
> Some hardware presents an incorrect SR-IOV Function Dependency Link,
> add a function to allow this to be overridden in the PF driver for
> such devices.
> 
> Signed-off-by: David Daney 
> Signed-off-by: Omer Khaliq 
> ---
>  drivers/pci/iov.c   | 14 ++
>  include/linux/pci.h |  1 +
>  2 files changed, 15 insertions(+)
> 
> diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
> index 2194b44..81f0672 100644
> --- a/drivers/pci/iov.c
> +++ b/drivers/pci/iov.c
> @@ -640,6 +640,20 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
>  EXPORT_SYMBOL_GPL(pci_enable_sriov);
>  
>  /**
> + * pci_sriov_fdl_override - fix incorrect Function Dependency Link
> + * @dev: the PCI device
> + * @fdl: the corrected Function Dependency Link value
> + *
> + * For hardware presenting an incorrect Function Dependency Link in
> + * the SR-IOV Extended Capability, allow a driver to override it.
> + */
> +void pci_sriov_fdl_override(struct pci_dev *dev, u8 fdl)
> +{
> + dev->sriov->link = fdl;
> +}
> +EXPORT_SYMBOL_GPL(pci_sriov_fdl_override);

We usually use quirks to work around problems in config space.  That's
a nice mechanism because we don't have to add new PCI core interfaces
and it makes it clear that we're working around a hardware problem.

Can you use a quirk here?  We allocate dev->sriov in the
pci_init_capabilities() path, so it looks like a pci_fixup_final quirk
should work.

> +
> +/**
>   * pci_disable_sriov - disable the SR-IOV capability
>   * @dev: the PCI device
>   */
> diff --git a/include/linux/pci.h b/include/linux/pci.h
> index 2599a98..da8a5b3 100644
> --- a/include/linux/pci.h
> +++ b/include/linux/pci.h
> @@ -1823,6 +1823,7 @@ int pci_num_vf(struct pci_dev *dev);
>  int pci_vfs_assigned(struct pci_dev *dev);
>  int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
>  int pci_sriov_get_totalvfs(struct pci_dev *dev);
> +void pci_sriov_fdl_override(struct pci_dev *dev, u8 fdl);
>  resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
>  #else
>  static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
> -- 
> 1.9.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-pci" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html