Re: Use km_alloc(9) in drm

2022-02-06 Thread Sebastien Marie
On Sat, Feb 05, 2022 at 11:44:03AM +0100, Mark Kettenis wrote:
> We want to get rid of the uvm_km_valloc() interfaces in favour of
> km_alloc().  This changes the calls in drm(4) over.  The kv_physwait
> struct is made static to prevent collission with a symbol in
> vm_machdep.c on some architectures.  The goal is to move this into
> uvm/uvm_km.c eventually.
> 
> Just to make sure I didn't screw the conversion up somehow a few tests
> on a mix of inteldrm(4) and amdgpu(4) systems would be good.

I am running it since Feb 5 on amd64 (amdgpu0: RAVEN2 3 CU rev 0x09)
and see no particular new problem.
 

> Index: dev/pci/drm/drm_linux.c
> ===
> RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
> retrieving revision 1.89
> diff -u -p -r1.89 drm_linux.c
> --- dev/pci/drm/drm_linux.c   21 Jan 2022 23:49:36 -  1.89
> +++ dev/pci/drm/drm_linux.c   5 Feb 2022 10:40:22 -
> @@ -564,6 +564,11 @@ __pagevec_release(struct pagevec *pvec)
>   pagevec_reinit(pvec);
>  }
>  
> +static struct kmem_va_mode kv_physwait = {
> + .kv_map = _map,
> + .kv_wait = 1,
> +};
> +
>  void *
>  kmap(struct vm_page *pg)
>  {
> @@ -572,7 +577,7 @@ kmap(struct vm_page *pg)
>  #if defined (__HAVE_PMAP_DIRECT)
>   va = pmap_map_direct(pg);
>  #else
> - va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
> + va = (vaddr_t)km_alloc(PAGE_SIZE, _physwait, _none, _waitok);
>   pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
>   pmap_update(pmap_kernel());
>  #endif
> @@ -589,7 +594,7 @@ kunmap_va(void *addr)
>  #else
>   pmap_kremove(va, PAGE_SIZE);
>   pmap_update(pmap_kernel());
> - uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
> + km_free((void *)va, PAGE_SIZE, _physwait, _none);
>  #endif
>  }
>  
> @@ -624,7 +629,8 @@ vmap(struct vm_page **pages, unsigned in
>   paddr_t pa;
>   int i;
>  
> - va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
> + va = (vaddr_t)km_alloc(PAGE_SIZE * npages, _any, _none,
> + _nowait);
>   if (va == 0)
>   return NULL;
>   for (i = 0; i < npages; i++) {
> @@ -645,7 +651,7 @@ vunmap(void *addr, size_t size)
>  
>   pmap_remove(pmap_kernel(), va, va + size);
>   pmap_update(pmap_kernel());
> - uvm_km_free(kernel_map, va, size);
> + km_free((void *)va, size, _any, _none);
>  }
>  
>  bool
> 

-- 
Sebastien Marie



Re: Use km_alloc(9) in drm

2022-02-06 Thread Jonathan Gray
On Sat, Feb 05, 2022 at 02:26:48PM +0100, Mark Kettenis wrote:
> > Date: Sat, 5 Feb 2022 22:34:10 +1100
> > From: Jonathan Gray 
> > 
> > On Sat, Feb 05, 2022 at 11:44:03AM +0100, Mark Kettenis wrote:
> > > We want to get rid of the uvm_km_valloc() interfaces in favour of
> > > km_alloc().  This changes the calls in drm(4) over.  The kv_physwait
> > > struct is made static to prevent collission with a symbol in
> > > vm_machdep.c on some architectures.  The goal is to move this into
> > > uvm/uvm_km.c eventually.
> > > 
> > > Just to make sure I didn't screw the conversion up somehow a few tests
> > > on a mix of inteldrm(4) and amdgpu(4) systems would be good.
> > > 
> > > ok?
> > > 
> > > 
> > > Index: dev/pci/drm/drm_linux.c
> > > ===
> > > RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
> > > retrieving revision 1.89
> > > diff -u -p -r1.89 drm_linux.c
> > > --- dev/pci/drm/drm_linux.c   21 Jan 2022 23:49:36 -  1.89
> > > +++ dev/pci/drm/drm_linux.c   5 Feb 2022 10:40:22 -
> > > @@ -564,6 +564,11 @@ __pagevec_release(struct pagevec *pvec)
> > >   pagevec_reinit(pvec);
> > >  }
> > >  
> > > +static struct kmem_va_mode kv_physwait = {
> > > + .kv_map = _map,
> > > + .kv_wait = 1,
> > > +};
> > > +
> > >  void *
> > >  kmap(struct vm_page *pg)
> > >  {
> > > @@ -572,7 +577,7 @@ kmap(struct vm_page *pg)
> > >  #if defined (__HAVE_PMAP_DIRECT)
> > >   va = pmap_map_direct(pg);
> > >  #else
> > > - va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
> > > + va = (vaddr_t)km_alloc(PAGE_SIZE, _physwait, _none, _waitok);
> > >   pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
> > >   pmap_update(pmap_kernel());
> > >  #endif
> > > @@ -589,7 +594,7 @@ kunmap_va(void *addr)
> > >  #else
> > >   pmap_kremove(va, PAGE_SIZE);
> > >   pmap_update(pmap_kernel());
> > > - uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
> > > + km_free((void *)va, PAGE_SIZE, _physwait, _none);
> > >  #endif
> > >  }
> > >  
> > > @@ -624,7 +629,8 @@ vmap(struct vm_page **pages, unsigned in
> > >   paddr_t pa;
> > >   int i;
> > >  
> > > - va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
> > > + va = (vaddr_t)km_alloc(PAGE_SIZE * npages, _any, _none,
> > > + _nowait);
> > 
> > this could be kd_waitok as the linux vmap() uses might_sleep()
> > but I suppose that would be a change in behaviour/different diff
> 
> Actually that doesn't make a difference.  Specifying kd_waitok will
> only make it wait for physical pages.  But we're passing kp_none so no
> physical pages are allocated.
> 
> Yes, this is confusing and continues to trip people up.

ah right

tested by starting X and chromium on

amd64
amdgpu0: VEGA10 56 CU rev 0x01
inteldrm0: msi, BROADWELL, gen 8

i386
X and crack-attack (chromium won't start due to illegal instruction)
radeondrm0: RV200
inteldrm0: apic 1 int 16, I85X, gen 2

ok jsg@

> 
> > 
> > >   if (va == 0)
> > >   return NULL;
> > >   for (i = 0; i < npages; i++) {
> > > @@ -645,7 +651,7 @@ vunmap(void *addr, size_t size)
> > >  
> > >   pmap_remove(pmap_kernel(), va, va + size);
> > >   pmap_update(pmap_kernel());
> > > - uvm_km_free(kernel_map, va, size);
> > > + km_free((void *)va, size, _any, _none);
> > >  }
> > >  
> > >  bool
> > > 
> > > 
> > 
> 
> 



Re: Use km_alloc(9) in drm

2022-02-05 Thread Mark Kettenis
> Date: Sat, 5 Feb 2022 22:34:10 +1100
> From: Jonathan Gray 
> 
> On Sat, Feb 05, 2022 at 11:44:03AM +0100, Mark Kettenis wrote:
> > We want to get rid of the uvm_km_valloc() interfaces in favour of
> > km_alloc().  This changes the calls in drm(4) over.  The kv_physwait
> > struct is made static to prevent collission with a symbol in
> > vm_machdep.c on some architectures.  The goal is to move this into
> > uvm/uvm_km.c eventually.
> > 
> > Just to make sure I didn't screw the conversion up somehow a few tests
> > on a mix of inteldrm(4) and amdgpu(4) systems would be good.
> > 
> > ok?
> > 
> > 
> > Index: dev/pci/drm/drm_linux.c
> > ===
> > RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
> > retrieving revision 1.89
> > diff -u -p -r1.89 drm_linux.c
> > --- dev/pci/drm/drm_linux.c 21 Jan 2022 23:49:36 -  1.89
> > +++ dev/pci/drm/drm_linux.c 5 Feb 2022 10:40:22 -
> > @@ -564,6 +564,11 @@ __pagevec_release(struct pagevec *pvec)
> > pagevec_reinit(pvec);
> >  }
> >  
> > +static struct kmem_va_mode kv_physwait = {
> > +   .kv_map = _map,
> > +   .kv_wait = 1,
> > +};
> > +
> >  void *
> >  kmap(struct vm_page *pg)
> >  {
> > @@ -572,7 +577,7 @@ kmap(struct vm_page *pg)
> >  #if defined (__HAVE_PMAP_DIRECT)
> > va = pmap_map_direct(pg);
> >  #else
> > -   va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
> > +   va = (vaddr_t)km_alloc(PAGE_SIZE, _physwait, _none, _waitok);
> > pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
> > pmap_update(pmap_kernel());
> >  #endif
> > @@ -589,7 +594,7 @@ kunmap_va(void *addr)
> >  #else
> > pmap_kremove(va, PAGE_SIZE);
> > pmap_update(pmap_kernel());
> > -   uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
> > +   km_free((void *)va, PAGE_SIZE, _physwait, _none);
> >  #endif
> >  }
> >  
> > @@ -624,7 +629,8 @@ vmap(struct vm_page **pages, unsigned in
> > paddr_t pa;
> > int i;
> >  
> > -   va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
> > +   va = (vaddr_t)km_alloc(PAGE_SIZE * npages, _any, _none,
> > +   _nowait);
> 
> this could be kd_waitok as the linux vmap() uses might_sleep()
> but I suppose that would be a change in behaviour/different diff

Actually that doesn't make a difference.  Specifying kd_waitok will
only make it wait for physical pages.  But we're passing kp_none so no
physical pages are allocated.

Yes, this is confusing and continues to trip people up.

> 
> > if (va == 0)
> > return NULL;
> > for (i = 0; i < npages; i++) {
> > @@ -645,7 +651,7 @@ vunmap(void *addr, size_t size)
> >  
> > pmap_remove(pmap_kernel(), va, va + size);
> > pmap_update(pmap_kernel());
> > -   uvm_km_free(kernel_map, va, size);
> > +   km_free((void *)va, size, _any, _none);
> >  }
> >  
> >  bool
> > 
> > 
> 



Re: Use km_alloc(9) in drm

2022-02-05 Thread Jonathan Gray
On Sat, Feb 05, 2022 at 11:44:03AM +0100, Mark Kettenis wrote:
> We want to get rid of the uvm_km_valloc() interfaces in favour of
> km_alloc().  This changes the calls in drm(4) over.  The kv_physwait
> struct is made static to prevent collission with a symbol in
> vm_machdep.c on some architectures.  The goal is to move this into
> uvm/uvm_km.c eventually.
> 
> Just to make sure I didn't screw the conversion up somehow a few tests
> on a mix of inteldrm(4) and amdgpu(4) systems would be good.
> 
> ok?
> 
> 
> Index: dev/pci/drm/drm_linux.c
> ===
> RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
> retrieving revision 1.89
> diff -u -p -r1.89 drm_linux.c
> --- dev/pci/drm/drm_linux.c   21 Jan 2022 23:49:36 -  1.89
> +++ dev/pci/drm/drm_linux.c   5 Feb 2022 10:40:22 -
> @@ -564,6 +564,11 @@ __pagevec_release(struct pagevec *pvec)
>   pagevec_reinit(pvec);
>  }
>  
> +static struct kmem_va_mode kv_physwait = {
> + .kv_map = _map,
> + .kv_wait = 1,
> +};
> +
>  void *
>  kmap(struct vm_page *pg)
>  {
> @@ -572,7 +577,7 @@ kmap(struct vm_page *pg)
>  #if defined (__HAVE_PMAP_DIRECT)
>   va = pmap_map_direct(pg);
>  #else
> - va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
> + va = (vaddr_t)km_alloc(PAGE_SIZE, _physwait, _none, _waitok);
>   pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
>   pmap_update(pmap_kernel());
>  #endif
> @@ -589,7 +594,7 @@ kunmap_va(void *addr)
>  #else
>   pmap_kremove(va, PAGE_SIZE);
>   pmap_update(pmap_kernel());
> - uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
> + km_free((void *)va, PAGE_SIZE, _physwait, _none);
>  #endif
>  }
>  
> @@ -624,7 +629,8 @@ vmap(struct vm_page **pages, unsigned in
>   paddr_t pa;
>   int i;
>  
> - va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
> + va = (vaddr_t)km_alloc(PAGE_SIZE * npages, _any, _none,
> + _nowait);

this could be kd_waitok as the linux vmap() uses might_sleep()
but I suppose that would be a change in behaviour/different diff

>   if (va == 0)
>   return NULL;
>   for (i = 0; i < npages; i++) {
> @@ -645,7 +651,7 @@ vunmap(void *addr, size_t size)
>  
>   pmap_remove(pmap_kernel(), va, va + size);
>   pmap_update(pmap_kernel());
> - uvm_km_free(kernel_map, va, size);
> + km_free((void *)va, size, _any, _none);
>  }
>  
>  bool
> 
> 



Use km_alloc(9) in drm

2022-02-05 Thread Mark Kettenis
We want to get rid of the uvm_km_valloc() interfaces in favour of
km_alloc().  This changes the calls in drm(4) over.  The kv_physwait
struct is made static to prevent collission with a symbol in
vm_machdep.c on some architectures.  The goal is to move this into
uvm/uvm_km.c eventually.

Just to make sure I didn't screw the conversion up somehow a few tests
on a mix of inteldrm(4) and amdgpu(4) systems would be good.

ok?


Index: dev/pci/drm/drm_linux.c
===
RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v
retrieving revision 1.89
diff -u -p -r1.89 drm_linux.c
--- dev/pci/drm/drm_linux.c 21 Jan 2022 23:49:36 -  1.89
+++ dev/pci/drm/drm_linux.c 5 Feb 2022 10:40:22 -
@@ -564,6 +564,11 @@ __pagevec_release(struct pagevec *pvec)
pagevec_reinit(pvec);
 }
 
+static struct kmem_va_mode kv_physwait = {
+   .kv_map = _map,
+   .kv_wait = 1,
+};
+
 void *
 kmap(struct vm_page *pg)
 {
@@ -572,7 +577,7 @@ kmap(struct vm_page *pg)
 #if defined (__HAVE_PMAP_DIRECT)
va = pmap_map_direct(pg);
 #else
-   va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
+   va = (vaddr_t)km_alloc(PAGE_SIZE, _physwait, _none, _waitok);
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
 #endif
@@ -589,7 +594,7 @@ kunmap_va(void *addr)
 #else
pmap_kremove(va, PAGE_SIZE);
pmap_update(pmap_kernel());
-   uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
+   km_free((void *)va, PAGE_SIZE, _physwait, _none);
 #endif
 }
 
@@ -624,7 +629,8 @@ vmap(struct vm_page **pages, unsigned in
paddr_t pa;
int i;
 
-   va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
+   va = (vaddr_t)km_alloc(PAGE_SIZE * npages, _any, _none,
+   _nowait);
if (va == 0)
return NULL;
for (i = 0; i < npages; i++) {
@@ -645,7 +651,7 @@ vunmap(void *addr, size_t size)
 
pmap_remove(pmap_kernel(), va, va + size);
pmap_update(pmap_kernel());
-   uvm_km_free(kernel_map, va, size);
+   km_free((void *)va, size, _any, _none);
 }
 
 bool