Re: [PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-17 Thread Jungseok Lee
On Thursday, April 17, 2014 9:13 PM, Marc Zyngier wrote:
> On Wed, Apr 16 2014 at  5:33:31 am BST, Jungseok Lee  
> wrote:
> > This patch adds 4 levels of translation tables implementation for both
> > HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4
> > levels guest can run on ARMv8 architecture as introducing this feature.
> 
> Just to be sure: have you tested it with asymetric configurations (4kB host, 
> 64kB guest, and the
> oposite configuration)?

Dear Marc

Yes, I've tested all asymmetric configurations using 4K+3Level, 4K+4Level
and 64K+2Level. I will add all test configurations in the commit message
from the next version.

> > Signed-off-by: Jungseok Lee 
> > Reviewed-by: Sungjinn Chung 
> > ---
> >  arch/arm/include/asm/kvm_mmu.h   |   10 +
> >  arch/arm/kvm/mmu.c   |   88 
> > +-
> >  arch/arm64/include/asm/kvm_arm.h |   20 +
> >  arch/arm64/include/asm/kvm_mmu.h |   10 +
> >  4 files changed, 117 insertions(+), 11 deletions(-)
> >
> > diff --git a/arch/arm/include/asm/kvm_mmu.h
> > b/arch/arm/include/asm/kvm_mmu.h index 5c7aa3c..6f7906e 100644
> > --- a/arch/arm/include/asm/kvm_mmu.h
> > +++ b/arch/arm/include/asm/kvm_mmu.h
> > @@ -37,6 +37,11 @@
> >   */
> >  #define TRAMPOLINE_VA  UL(CONFIG_VECTORS_BASE)
> >
> > +/*
> > + * NUM_OBJS depends on the number of page table translation levels
> > +*/
> > +#define NUM_OBJS   2
> 
> I'm afraid this is way too generic. Use something along the lines of 
> MMU_CACHE_MIN_PAGES, that makes
> it obvious what we're talking about.

Okay, I will change it.

> > +
> >  #ifndef __ASSEMBLY__
> >
> >  #include 
> > @@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
> > clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));  }
> >
> > +static inline void kvm_clean_pmd(pmd_t *pmd) {
> > +   clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); }
> > +
> >  static inline void kvm_clean_pmd_entry(pmd_t *pmd)  {
> > clean_pmd_entry(pmd);
> > diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index
> > 80bb1e6..7fc9e55 100644
> > --- a/arch/arm/kvm/mmu.c
> > +++ b/arch/arm/kvm/mmu.c
> > @@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, 
> > unsigned long start,
> > return 0;
> >  }
> >
> > +static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
> > +  unsigned long end, unsigned long pfn,
> > +  pgprot_t prot)
> > +{
> > +   pud_t *pud;
> > +   pmd_t *pmd;
> > +   unsigned long addr, next;
> > +
> > +   addr = start;
> > +   do {
> > +   pud = pud_offset(pgd, addr);
> > +
> > +   if (pud_none_or_clear_bad(pud)) {
> > +   pmd = pmd_alloc_one(NULL, addr);
> > +   if (!pmd) {
> > +   kvm_err("Cannot allocate Hyp pmd\n");
> > +   return -ENOMEM;
> > +   }
> > +   pud_populate(NULL, pud, pmd);
> > +   get_page(virt_to_page(pud));
> > +   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
> > +   }
> > +
> > +   next = pud_addr_end(addr, end);
> > +
> > +   create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
> > +   pfn += (next - addr) >> PAGE_SHIFT;
> > +   } while (addr = next, addr != end);
> > +
> > +   return 0;
> > +}
> > +
> >  static int __create_hyp_mappings(pgd_t *pgdp,
> >  unsigned long start, unsigned long end,
> >  unsigned long pfn, pgprot_t prot)  {
> > pgd_t *pgd;
> > pud_t *pud;
> > -   pmd_t *pmd;
> > unsigned long addr, next;
> > int err = 0;
> >
> > @@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
> > end = PAGE_ALIGN(end);
> > do {
> > pgd = pgdp + pgd_index(addr);
> > -   pud = pud_offset(pgd, addr);
> >
> > -   if (pud_none_or_clear_bad(pud)) {
> > -   pmd = pmd_alloc_one(NULL, addr);
> > -   if (!pmd) {
> > -   kvm_err("Cannot allocate Hyp pmd\n");
> > +   if (pgd_none(*pgd)) {
> > +   pud = pud_alloc_one(NULL, addr);
> > +   if (!pud) {
> > +   kvm_err("Cannot allocate Hyp pud\n");
> > err = -ENOMEM;
> > goto out;
> > }
> > -   pud_populate(NULL, pud, pmd);
> > -   get_page(virt_to_page(pud));
> > -   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
> > +   pgd_populate(NULL, pgd, pud);
> > +   get_page(virt_to_page(pgd));
> > +   kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
> > }
> >
> > next = pgd_addr_end(addr, end);
> > -   err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
> > +
> > +   err = 

Re: [PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-17 Thread Marc Zyngier
On Wed, Apr 16 2014 at  5:33:31 am BST, Jungseok Lee  
wrote:
> This patch adds 4 levels of translation tables implementation for both
> HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4 levels
> guest can run on ARMv8 architecture as introducing this feature.

Just to be sure: have you tested it with asymetric configurations (4kB
host, 64kB guest, and the oposite configuration)?

> Signed-off-by: Jungseok Lee 
> Reviewed-by: Sungjinn Chung 
> ---
>  arch/arm/include/asm/kvm_mmu.h   |   10 +
>  arch/arm/kvm/mmu.c   |   88 
> +-
>  arch/arm64/include/asm/kvm_arm.h |   20 +
>  arch/arm64/include/asm/kvm_mmu.h |   10 +
>  4 files changed, 117 insertions(+), 11 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index 5c7aa3c..6f7906e 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -37,6 +37,11 @@
>   */
>  #define TRAMPOLINE_VAUL(CONFIG_VECTORS_BASE)
>  
> +/*
> + * NUM_OBJS depends on the number of page table translation levels
> + */
> +#define NUM_OBJS 2

I'm afraid this is way too generic. Use something along the lines of
MMU_CACHE_MIN_PAGES, that makes it obvious what we're talking about.

> +
>  #ifndef __ASSEMBLY__
>  
>  #include 
> @@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
>   clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
>  }
>  
> +static inline void kvm_clean_pmd(pmd_t *pmd)
> +{
> + clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
> +}
> +
>  static inline void kvm_clean_pmd_entry(pmd_t *pmd)
>  {
>   clean_pmd_entry(pmd);
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index 80bb1e6..7fc9e55 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned 
> long start,
>   return 0;
>  }
>  
> +static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
> +unsigned long end, unsigned long pfn,
> +pgprot_t prot)
> +{
> + pud_t *pud;
> + pmd_t *pmd;
> + unsigned long addr, next;
> +
> + addr = start;
> + do {
> + pud = pud_offset(pgd, addr);
> +
> + if (pud_none_or_clear_bad(pud)) {
> + pmd = pmd_alloc_one(NULL, addr);
> + if (!pmd) {
> + kvm_err("Cannot allocate Hyp pmd\n");
> + return -ENOMEM;
> + }
> + pud_populate(NULL, pud, pmd);
> + get_page(virt_to_page(pud));
> + kvm_flush_dcache_to_poc(pud, sizeof(*pud));
> + }
> +
> + next = pud_addr_end(addr, end);
> +
> + create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
> + pfn += (next - addr) >> PAGE_SHIFT;
> + } while (addr = next, addr != end);
> +
> + return 0;
> +}
> +
>  static int __create_hyp_mappings(pgd_t *pgdp,
>unsigned long start, unsigned long end,
>unsigned long pfn, pgprot_t prot)
>  {
>   pgd_t *pgd;
>   pud_t *pud;
> - pmd_t *pmd;
>   unsigned long addr, next;
>   int err = 0;
>  
> @@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
>   end = PAGE_ALIGN(end);
>   do {
>   pgd = pgdp + pgd_index(addr);
> - pud = pud_offset(pgd, addr);
>  
> - if (pud_none_or_clear_bad(pud)) {
> - pmd = pmd_alloc_one(NULL, addr);
> - if (!pmd) {
> - kvm_err("Cannot allocate Hyp pmd\n");
> + if (pgd_none(*pgd)) {
> + pud = pud_alloc_one(NULL, addr);
> + if (!pud) {
> + kvm_err("Cannot allocate Hyp pud\n");
>   err = -ENOMEM;
>   goto out;
>   }
> - pud_populate(NULL, pud, pmd);
> - get_page(virt_to_page(pud));
> - kvm_flush_dcache_to_poc(pud, sizeof(*pud));
> + pgd_populate(NULL, pgd, pud);
> + get_page(virt_to_page(pgd));
> + kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
>   }
>  
>   next = pgd_addr_end(addr, end);
> - err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
> +
> + err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
> +
>   if (err)
>   goto out;
>   pfn += (next - addr) >> PAGE_SHIFT;
> @@ -563,6 +595,24 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
>   kvm->arch.pgd = NULL;
>  }
>  
> +static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache 
> *cache,
> +

Re: [PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-17 Thread Marc Zyngier
On Wed, Apr 16 2014 at  5:33:31 am BST, Jungseok Lee jays@samsung.com 
wrote:
 This patch adds 4 levels of translation tables implementation for both
 HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4 levels
 guest can run on ARMv8 architecture as introducing this feature.

Just to be sure: have you tested it with asymetric configurations (4kB
host, 64kB guest, and the oposite configuration)?

 Signed-off-by: Jungseok Lee jays@samsung.com
 Reviewed-by: Sungjinn Chung sungjinn.ch...@samsung.com
 ---
  arch/arm/include/asm/kvm_mmu.h   |   10 +
  arch/arm/kvm/mmu.c   |   88 
 +-
  arch/arm64/include/asm/kvm_arm.h |   20 +
  arch/arm64/include/asm/kvm_mmu.h |   10 +
  4 files changed, 117 insertions(+), 11 deletions(-)

 diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
 index 5c7aa3c..6f7906e 100644
 --- a/arch/arm/include/asm/kvm_mmu.h
 +++ b/arch/arm/include/asm/kvm_mmu.h
 @@ -37,6 +37,11 @@
   */
  #define TRAMPOLINE_VAUL(CONFIG_VECTORS_BASE)
  
 +/*
 + * NUM_OBJS depends on the number of page table translation levels
 + */
 +#define NUM_OBJS 2

I'm afraid this is way too generic. Use something along the lines of
MMU_CACHE_MIN_PAGES, that makes it obvious what we're talking about.

 +
  #ifndef __ASSEMBLY__
  
  #include asm/cacheflush.h
 @@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
   clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
  }
  
 +static inline void kvm_clean_pmd(pmd_t *pmd)
 +{
 + clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
 +}
 +
  static inline void kvm_clean_pmd_entry(pmd_t *pmd)
  {
   clean_pmd_entry(pmd);
 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
 index 80bb1e6..7fc9e55 100644
 --- a/arch/arm/kvm/mmu.c
 +++ b/arch/arm/kvm/mmu.c
 @@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned 
 long start,
   return 0;
  }
  
 +static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
 +unsigned long end, unsigned long pfn,
 +pgprot_t prot)
 +{
 + pud_t *pud;
 + pmd_t *pmd;
 + unsigned long addr, next;
 +
 + addr = start;
 + do {
 + pud = pud_offset(pgd, addr);
 +
 + if (pud_none_or_clear_bad(pud)) {
 + pmd = pmd_alloc_one(NULL, addr);
 + if (!pmd) {
 + kvm_err(Cannot allocate Hyp pmd\n);
 + return -ENOMEM;
 + }
 + pud_populate(NULL, pud, pmd);
 + get_page(virt_to_page(pud));
 + kvm_flush_dcache_to_poc(pud, sizeof(*pud));
 + }
 +
 + next = pud_addr_end(addr, end);
 +
 + create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
 + pfn += (next - addr)  PAGE_SHIFT;
 + } while (addr = next, addr != end);
 +
 + return 0;
 +}
 +
  static int __create_hyp_mappings(pgd_t *pgdp,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
  {
   pgd_t *pgd;
   pud_t *pud;
 - pmd_t *pmd;
   unsigned long addr, next;
   int err = 0;
  
 @@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
   end = PAGE_ALIGN(end);
   do {
   pgd = pgdp + pgd_index(addr);
 - pud = pud_offset(pgd, addr);
  
 - if (pud_none_or_clear_bad(pud)) {
 - pmd = pmd_alloc_one(NULL, addr);
 - if (!pmd) {
 - kvm_err(Cannot allocate Hyp pmd\n);
 + if (pgd_none(*pgd)) {
 + pud = pud_alloc_one(NULL, addr);
 + if (!pud) {
 + kvm_err(Cannot allocate Hyp pud\n);
   err = -ENOMEM;
   goto out;
   }
 - pud_populate(NULL, pud, pmd);
 - get_page(virt_to_page(pud));
 - kvm_flush_dcache_to_poc(pud, sizeof(*pud));
 + pgd_populate(NULL, pgd, pud);
 + get_page(virt_to_page(pgd));
 + kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
   }
  
   next = pgd_addr_end(addr, end);
 - err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
 +
 + err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
 +
   if (err)
   goto out;
   pfn += (next - addr)  PAGE_SHIFT;
 @@ -563,6 +595,24 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
   kvm-arch.pgd = NULL;
  }
  
 +static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache 
 *cache,
 +  phys_addr_t addr)
 +{
 + pgd_t *pgd;
 

Re: [PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-17 Thread Jungseok Lee
On Thursday, April 17, 2014 9:13 PM, Marc Zyngier wrote:
 On Wed, Apr 16 2014 at  5:33:31 am BST, Jungseok Lee jays@samsung.com 
 wrote:
  This patch adds 4 levels of translation tables implementation for both
  HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4
  levels guest can run on ARMv8 architecture as introducing this feature.
 
 Just to be sure: have you tested it with asymetric configurations (4kB host, 
 64kB guest, and the
 oposite configuration)?

Dear Marc

Yes, I've tested all asymmetric configurations using 4K+3Level, 4K+4Level
and 64K+2Level. I will add all test configurations in the commit message
from the next version.

  Signed-off-by: Jungseok Lee jays@samsung.com
  Reviewed-by: Sungjinn Chung sungjinn.ch...@samsung.com
  ---
   arch/arm/include/asm/kvm_mmu.h   |   10 +
   arch/arm/kvm/mmu.c   |   88 
  +-
   arch/arm64/include/asm/kvm_arm.h |   20 +
   arch/arm64/include/asm/kvm_mmu.h |   10 +
   4 files changed, 117 insertions(+), 11 deletions(-)
 
  diff --git a/arch/arm/include/asm/kvm_mmu.h
  b/arch/arm/include/asm/kvm_mmu.h index 5c7aa3c..6f7906e 100644
  --- a/arch/arm/include/asm/kvm_mmu.h
  +++ b/arch/arm/include/asm/kvm_mmu.h
  @@ -37,6 +37,11 @@
*/
   #define TRAMPOLINE_VA  UL(CONFIG_VECTORS_BASE)
 
  +/*
  + * NUM_OBJS depends on the number of page table translation levels
  +*/
  +#define NUM_OBJS   2
 
 I'm afraid this is way too generic. Use something along the lines of 
 MMU_CACHE_MIN_PAGES, that makes
 it obvious what we're talking about.

Okay, I will change it.

  +
   #ifndef __ASSEMBLY__
 
   #include asm/cacheflush.h
  @@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
  clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));  }
 
  +static inline void kvm_clean_pmd(pmd_t *pmd) {
  +   clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); }
  +
   static inline void kvm_clean_pmd_entry(pmd_t *pmd)  {
  clean_pmd_entry(pmd);
  diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index
  80bb1e6..7fc9e55 100644
  --- a/arch/arm/kvm/mmu.c
  +++ b/arch/arm/kvm/mmu.c
  @@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, 
  unsigned long start,
  return 0;
   }
 
  +static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
  +  unsigned long end, unsigned long pfn,
  +  pgprot_t prot)
  +{
  +   pud_t *pud;
  +   pmd_t *pmd;
  +   unsigned long addr, next;
  +
  +   addr = start;
  +   do {
  +   pud = pud_offset(pgd, addr);
  +
  +   if (pud_none_or_clear_bad(pud)) {
  +   pmd = pmd_alloc_one(NULL, addr);
  +   if (!pmd) {
  +   kvm_err(Cannot allocate Hyp pmd\n);
  +   return -ENOMEM;
  +   }
  +   pud_populate(NULL, pud, pmd);
  +   get_page(virt_to_page(pud));
  +   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
  +   }
  +
  +   next = pud_addr_end(addr, end);
  +
  +   create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
  +   pfn += (next - addr)  PAGE_SHIFT;
  +   } while (addr = next, addr != end);
  +
  +   return 0;
  +}
  +
   static int __create_hyp_mappings(pgd_t *pgdp,
   unsigned long start, unsigned long end,
   unsigned long pfn, pgprot_t prot)  {
  pgd_t *pgd;
  pud_t *pud;
  -   pmd_t *pmd;
  unsigned long addr, next;
  int err = 0;
 
  @@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
  end = PAGE_ALIGN(end);
  do {
  pgd = pgdp + pgd_index(addr);
  -   pud = pud_offset(pgd, addr);
 
  -   if (pud_none_or_clear_bad(pud)) {
  -   pmd = pmd_alloc_one(NULL, addr);
  -   if (!pmd) {
  -   kvm_err(Cannot allocate Hyp pmd\n);
  +   if (pgd_none(*pgd)) {
  +   pud = pud_alloc_one(NULL, addr);
  +   if (!pud) {
  +   kvm_err(Cannot allocate Hyp pud\n);
  err = -ENOMEM;
  goto out;
  }
  -   pud_populate(NULL, pud, pmd);
  -   get_page(virt_to_page(pud));
  -   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
  +   pgd_populate(NULL, pgd, pud);
  +   get_page(virt_to_page(pgd));
  +   kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
  }
 
  next = pgd_addr_end(addr, end);
  -   err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
  +
  +   err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
  +
  if (err)
  goto out;
  pfn += (next - addr)  PAGE_SHIFT; @@ -563,6 

[PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-15 Thread Jungseok Lee
This patch adds 4 levels of translation tables implementation for both
HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4 levels
guest can run on ARMv8 architecture as introducing this feature.

Signed-off-by: Jungseok Lee 
Reviewed-by: Sungjinn Chung 
---
 arch/arm/include/asm/kvm_mmu.h   |   10 +
 arch/arm/kvm/mmu.c   |   88 +-
 arch/arm64/include/asm/kvm_arm.h |   20 +
 arch/arm64/include/asm/kvm_mmu.h |   10 +
 4 files changed, 117 insertions(+), 11 deletions(-)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 5c7aa3c..6f7906e 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -37,6 +37,11 @@
  */
 #define TRAMPOLINE_VA  UL(CONFIG_VECTORS_BASE)
 
+/*
+ * NUM_OBJS depends on the number of page table translation levels
+ */
+#define NUM_OBJS   2
+
 #ifndef __ASSEMBLY__
 
 #include 
@@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
 }
 
+static inline void kvm_clean_pmd(pmd_t *pmd)
+{
+   clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
+}
+
 static inline void kvm_clean_pmd_entry(pmd_t *pmd)
 {
clean_pmd_entry(pmd);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 80bb1e6..7fc9e55 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned 
long start,
return 0;
 }
 
+static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
+  unsigned long end, unsigned long pfn,
+  pgprot_t prot)
+{
+   pud_t *pud;
+   pmd_t *pmd;
+   unsigned long addr, next;
+
+   addr = start;
+   do {
+   pud = pud_offset(pgd, addr);
+
+   if (pud_none_or_clear_bad(pud)) {
+   pmd = pmd_alloc_one(NULL, addr);
+   if (!pmd) {
+   kvm_err("Cannot allocate Hyp pmd\n");
+   return -ENOMEM;
+   }
+   pud_populate(NULL, pud, pmd);
+   get_page(virt_to_page(pud));
+   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
+   }
+
+   next = pud_addr_end(addr, end);
+
+   create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
+   pfn += (next - addr) >> PAGE_SHIFT;
+   } while (addr = next, addr != end);
+
+   return 0;
+}
+
 static int __create_hyp_mappings(pgd_t *pgdp,
 unsigned long start, unsigned long end,
 unsigned long pfn, pgprot_t prot)
 {
pgd_t *pgd;
pud_t *pud;
-   pmd_t *pmd;
unsigned long addr, next;
int err = 0;
 
@@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
end = PAGE_ALIGN(end);
do {
pgd = pgdp + pgd_index(addr);
-   pud = pud_offset(pgd, addr);
 
-   if (pud_none_or_clear_bad(pud)) {
-   pmd = pmd_alloc_one(NULL, addr);
-   if (!pmd) {
-   kvm_err("Cannot allocate Hyp pmd\n");
+   if (pgd_none(*pgd)) {
+   pud = pud_alloc_one(NULL, addr);
+   if (!pud) {
+   kvm_err("Cannot allocate Hyp pud\n");
err = -ENOMEM;
goto out;
}
-   pud_populate(NULL, pud, pmd);
-   get_page(virt_to_page(pud));
-   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
+   pgd_populate(NULL, pgd, pud);
+   get_page(virt_to_page(pgd));
+   kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
}
 
next = pgd_addr_end(addr, end);
-   err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
+
+   err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
+
if (err)
goto out;
pfn += (next - addr) >> PAGE_SHIFT;
@@ -563,6 +595,24 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
kvm->arch.pgd = NULL;
 }
 
+static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache 
*cache,
+phys_addr_t addr)
+{
+   pgd_t *pgd;
+   pud_t *pud;
+
+   pgd = kvm->arch.pgd + pgd_index(addr);
+   if (pgd_none(*pgd)) {
+   if (!cache)
+   return NULL;
+   pud = mmu_memory_cache_alloc(cache);
+   pgd_populate(NULL, pgd, pud);
+   get_page(virt_to_page(pgd));
+   }
+
+   return pud_offset(pgd, addr);
+}
+
 static pmd_t *stage2_get_pmd(struct kvm 

[PATCH v2 7/7] arm64: KVM: Implement 4 levels of translation tables for HYP and stage2

2014-04-15 Thread Jungseok Lee
This patch adds 4 levels of translation tables implementation for both
HYP and stage2. A combination of 4KB + 4 levels host and 4KB + 4 levels
guest can run on ARMv8 architecture as introducing this feature.

Signed-off-by: Jungseok Lee jays@samsung.com
Reviewed-by: Sungjinn Chung sungjinn.ch...@samsung.com
---
 arch/arm/include/asm/kvm_mmu.h   |   10 +
 arch/arm/kvm/mmu.c   |   88 +-
 arch/arm64/include/asm/kvm_arm.h |   20 +
 arch/arm64/include/asm/kvm_mmu.h |   10 +
 4 files changed, 117 insertions(+), 11 deletions(-)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 5c7aa3c..6f7906e 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -37,6 +37,11 @@
  */
 #define TRAMPOLINE_VA  UL(CONFIG_VECTORS_BASE)
 
+/*
+ * NUM_OBJS depends on the number of page table translation levels
+ */
+#define NUM_OBJS   2
+
 #ifndef __ASSEMBLY__
 
 #include asm/cacheflush.h
@@ -94,6 +99,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
 }
 
+static inline void kvm_clean_pmd(pmd_t *pmd)
+{
+   clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
+}
+
 static inline void kvm_clean_pmd_entry(pmd_t *pmd)
 {
clean_pmd_entry(pmd);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 80bb1e6..7fc9e55 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -388,13 +388,44 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned 
long start,
return 0;
 }
 
+static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
+  unsigned long end, unsigned long pfn,
+  pgprot_t prot)
+{
+   pud_t *pud;
+   pmd_t *pmd;
+   unsigned long addr, next;
+
+   addr = start;
+   do {
+   pud = pud_offset(pgd, addr);
+
+   if (pud_none_or_clear_bad(pud)) {
+   pmd = pmd_alloc_one(NULL, addr);
+   if (!pmd) {
+   kvm_err(Cannot allocate Hyp pmd\n);
+   return -ENOMEM;
+   }
+   pud_populate(NULL, pud, pmd);
+   get_page(virt_to_page(pud));
+   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
+   }
+
+   next = pud_addr_end(addr, end);
+
+   create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
+   pfn += (next - addr)  PAGE_SHIFT;
+   } while (addr = next, addr != end);
+
+   return 0;
+}
+
 static int __create_hyp_mappings(pgd_t *pgdp,
 unsigned long start, unsigned long end,
 unsigned long pfn, pgprot_t prot)
 {
pgd_t *pgd;
pud_t *pud;
-   pmd_t *pmd;
unsigned long addr, next;
int err = 0;
 
@@ -403,22 +434,23 @@ static int __create_hyp_mappings(pgd_t *pgdp,
end = PAGE_ALIGN(end);
do {
pgd = pgdp + pgd_index(addr);
-   pud = pud_offset(pgd, addr);
 
-   if (pud_none_or_clear_bad(pud)) {
-   pmd = pmd_alloc_one(NULL, addr);
-   if (!pmd) {
-   kvm_err(Cannot allocate Hyp pmd\n);
+   if (pgd_none(*pgd)) {
+   pud = pud_alloc_one(NULL, addr);
+   if (!pud) {
+   kvm_err(Cannot allocate Hyp pud\n);
err = -ENOMEM;
goto out;
}
-   pud_populate(NULL, pud, pmd);
-   get_page(virt_to_page(pud));
-   kvm_flush_dcache_to_poc(pud, sizeof(*pud));
+   pgd_populate(NULL, pgd, pud);
+   get_page(virt_to_page(pgd));
+   kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
}
 
next = pgd_addr_end(addr, end);
-   err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
+
+   err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
+
if (err)
goto out;
pfn += (next - addr)  PAGE_SHIFT;
@@ -563,6 +595,24 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
kvm-arch.pgd = NULL;
 }
 
+static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache 
*cache,
+phys_addr_t addr)
+{
+   pgd_t *pgd;
+   pud_t *pud;
+
+   pgd = kvm-arch.pgd + pgd_index(addr);
+   if (pgd_none(*pgd)) {
+   if (!cache)
+   return NULL;
+   pud = mmu_memory_cache_alloc(cache);
+   pgd_populate(NULL, pgd, pud);
+   get_page(virt_to_page(pgd));
+   }
+
+   return pud_offset(pgd,