Re: lost connection to test machine (4)

2018-02-14 Thread Dennis Zhou
On Mon, Feb 12, 2018 at 06:00:13PM +0100, Daniel Borkmann wrote:
> 
> [ +Dennis, +Tejun ]
> 
> Looks like we're stuck in percpu allocator with key/value size of 4 bytes
> each and large number of entries (max_entries) in the reproducer in above
> link.
> 
> Could we have some __GFP_NORETRY semantics and let allocations fail instead
> of triggering OOM killer?

#syz test: git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git master

As I don't have a great idea how to best test this, I'm going to just
run it against syzbot. Locally simple allocation tests seem fine. Though
it may require the second patch as well to enable pass through of the
following flags.

I will send a patchset if the syzbot results look good. This changes the
balance path to use __GFP_NORETRY and __GFP_NOWARN.

Thanks,
Dennis

---
 mm/percpu-km.c |  8 
 mm/percpu-vm.c | 18 +++---
 mm/percpu.c| 45 -
 3 files changed, 43 insertions(+), 28 deletions(-)

diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index d2a7664..0d88d7b 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -34,7 +34,7 @@
 #include 
 
 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
-  int page_start, int page_end)
+  int page_start, int page_end, gfp_t gfp)
 {
return 0;
 }
@@ -45,18 +45,18 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
/* nada */
 }
 
-static struct pcpu_chunk *pcpu_create_chunk(void)
+static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
 {
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
struct pcpu_chunk *chunk;
struct page *pages;
int i;
 
-   chunk = pcpu_alloc_chunk();
+   chunk = pcpu_alloc_chunk(gfp);
if (!chunk)
return NULL;
 
-   pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
+   pages = alloc_pages(gfp | GFP_KERNEL, order_base_2(nr_pages));
if (!pages) {
pcpu_free_chunk(chunk);
return NULL;
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 9158e5a..ea9906a 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void)
lockdep_assert_held(&pcpu_alloc_mutex);
 
if (!pages)
-   pages = pcpu_mem_zalloc(pages_size);
+   pages = pcpu_mem_zalloc(pages_size, 0);
return pages;
 }
 
@@ -73,18 +73,21 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
  * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
  * @page_start: page index of the first page to be allocated
  * @page_end: page index of the last page to be allocated + 1
+ * @gfp: allocation flags passed to the underlying allocator
  *
  * Allocate pages [@page_start,@page_end) into @pages for all units.
  * The allocation is for @chunk.  Percpu core doesn't care about the
  * content of @pages and will pass it verbatim to pcpu_map_pages().
  */
 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
-   struct page **pages, int page_start, int page_end)
+   struct page **pages, int page_start, int page_end,
+   gfp_t gfp)
 {
-   const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
unsigned int cpu, tcpu;
int i;
 
+   gfp |=  GFP_KERNEL | __GFP_HIGHMEM;
+
for_each_possible_cpu(cpu) {
for (i = page_start; i < page_end; i++) {
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
@@ -262,6 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
  * @chunk: chunk of interest
  * @page_start: the start page
  * @page_end: the end page
+ * @gfp: allocation flags passed to the underlying memory allocator
  *
  * For each cpu, populate and map pages [@page_start,@page_end) into
  * @chunk.
@@ -270,7 +274,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
  * pcpu_alloc_mutex, does GFP_KERNEL allocation.
  */
 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
-  int page_start, int page_end)
+  int page_start, int page_end, gfp_t gfp)
 {
struct page **pages;
 
@@ -278,7 +282,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
if (!pages)
return -ENOMEM;
 
-   if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
+   if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
return -ENOMEM;
 
if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
@@ -325,12 +329,12 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk 
*chunk,
pcpu_free_pages(chunk, pages, page_start, page_end);
 }
 
-static struct pcpu_chunk *pcpu_create_chunk(void)
+static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
 {
struct pcpu_chunk *chunk;
struct vm_struct **vms;
 
-   chunk = pcpu_allo

Re: lost connection to test machine (4)

2018-02-13 Thread Dennis Zhou
On Tue, Feb 13, 2018 at 09:49:27AM -0800, Eric Dumazet wrote:
> On Tue, 2018-02-13 at 11:34 -0600, Dennis Zhou wrote:
> > Hi Eric,
> > 
> > On Tue, Feb 13, 2018 at 05:35:26AM -0800, Eric Dumazet wrote:
> > > 
> > > Also I would consider using this fix as I had warnings of cpus being
> > > stuck there for more than 50 ms :
> > > 
> > > 
> > > diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
> > > index 
> > > 9158e5a81391ced4e268e3d5dd9879c2bc7280ce..6309b01ceb357be01e857e5f899429403836f41f
> > >  100644
> > > --- a/mm/percpu-vm.c
> > > +++ b/mm/percpu-vm.c
> > > @@ -92,6 +92,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
> > >   *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
> > >   if (!*pagep)
> > >   goto err;
> > > + cond_resched();
> > >   }
> > >   }
> > >   return 0;
> > > 
> > > 
> > 
> > This function gets called from pcpu_populate_chunk while holding the
> > pcpu_alloc_mutex and is called from two scenarios. First, when an
> > allocation occurs to a place without backing pages, and second when the
> > workqueue item is scheduled to replenish the number of empty pages. So,
> > I don't think this is a good idea.
> > 
> 
> That _is_ a good idea, we do this already in vmalloc(), and vmalloc()
> can absolutely be called while some mutex(es) are held.
> 
> 
> > My understanding is if we're seeing warnings here, that means we're
> > struggling to find backing pages. I believe adding __GFP_NORETRY on the
> > workqueue path as Tejun mentioned above would help with warnings as
> > well, but not if they are caused by the allocation path.
> > 
> 
> That is a separate concern.
> 
> My patch simply avoids latency spikes when huge percpu allocations are
> happening, on systems with say 1024 cpus.
> 
> 

I see. I misunderstood thinking this was for the same concern.

Thanks,
Dennis


Re: lost connection to test machine (4)

2018-02-13 Thread Eric Dumazet
On Tue, 2018-02-13 at 11:34 -0600, Dennis Zhou wrote:
> Hi Eric,
> 
> On Tue, Feb 13, 2018 at 05:35:26AM -0800, Eric Dumazet wrote:
> > 
> > Also I would consider using this fix as I had warnings of cpus being
> > stuck there for more than 50 ms :
> > 
> > 
> > diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
> > index 
> > 9158e5a81391ced4e268e3d5dd9879c2bc7280ce..6309b01ceb357be01e857e5f899429403836f41f
> >  100644
> > --- a/mm/percpu-vm.c
> > +++ b/mm/percpu-vm.c
> > @@ -92,6 +92,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
> > *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
> > if (!*pagep)
> > goto err;
> > +   cond_resched();
> > }
> > }
> > return 0;
> > 
> > 
> 
> This function gets called from pcpu_populate_chunk while holding the
> pcpu_alloc_mutex and is called from two scenarios. First, when an
> allocation occurs to a place without backing pages, and second when the
> workqueue item is scheduled to replenish the number of empty pages. So,
> I don't think this is a good idea.
> 

That _is_ a good idea, we do this already in vmalloc(), and vmalloc()
can absolutely be called while some mutex(es) are held.


> My understanding is if we're seeing warnings here, that means we're
> struggling to find backing pages. I believe adding __GFP_NORETRY on the
> workqueue path as Tejun mentioned above would help with warnings as
> well, but not if they are caused by the allocation path.
> 

That is a separate concern.

My patch simply avoids latency spikes when huge percpu allocations are
happening, on systems with say 1024 cpus.




Re: lost connection to test machine (4)

2018-02-13 Thread Dennis Zhou
Hi Eric,

On Tue, Feb 13, 2018 at 05:35:26AM -0800, Eric Dumazet wrote:
> 
> Also I would consider using this fix as I had warnings of cpus being
> stuck there for more than 50 ms :
> 
> 
> diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
> index 
> 9158e5a81391ced4e268e3d5dd9879c2bc7280ce..6309b01ceb357be01e857e5f899429403836f41f
>  100644
> --- a/mm/percpu-vm.c
> +++ b/mm/percpu-vm.c
> @@ -92,6 +92,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
>   *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
>   if (!*pagep)
>   goto err;
> + cond_resched();
>   }
>   }
>   return 0;
> 
> 

This function gets called from pcpu_populate_chunk while holding the
pcpu_alloc_mutex and is called from two scenarios. First, when an
allocation occurs to a place without backing pages, and second when the
workqueue item is scheduled to replenish the number of empty pages. So,
I don't think this is a good idea.

My understanding is if we're seeing warnings here, that means we're
struggling to find backing pages. I believe adding __GFP_NORETRY on the
workqueue path as Tejun mentioned above would help with warnings as
well, but not if they are caused by the allocation path.

Thanks,
Dennis


Re: lost connection to test machine (4)

2018-02-13 Thread Eric Dumazet
On Mon, 2018-02-12 at 12:05 -0800, Tejun Heo wrote:
> On Mon, Feb 12, 2018 at 09:03:25AM -0800, Tejun Heo wrote:
> > Hello, Daniel.
> > 
> > On Mon, Feb 12, 2018 at 06:00:13PM +0100, Daniel Borkmann wrote:
> > > [ +Dennis, +Tejun ]
> > > 
> > > Looks like we're stuck in percpu allocator with key/value size of 4 bytes
> > > each and large number of entries (max_entries) in the reproducer in above
> > > link.
> > > 
> > > Could we have some __GFP_NORETRY semantics and let allocations fail 
> > > instead
> > > of triggering OOM killer?
> > 
> > For some part, maybe, but not generally.  The virt area allocation
> > goes down to page table allocation which is hard coded to use
> > GFP_KERNEL in arch mm code.
> 
> So, the following should convert majority of allocations to use
> __GFP_NORETRY.  It doesn't catch everything but should significantly
> lower the probability of hitting this and put this on the same footing
> as vmalloc.  Can you see whether this is enough?
> 
> Note that this patch isn't upstreamable.  We definitely want to
> restrict this to the rebalance path, but it should be good enough for
> testing.
> 
> Thanks.
> 
> diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
> index 9158e5a..0b4739f 100644
> --- a/mm/percpu-vm.c
> +++ b/mm/percpu-vm.c
> @@ -81,7 +81,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
>  static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
>   struct page **pages, int page_start, int page_end)
>  {
> - const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
> + const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_NORETRY;
>   unsigned int cpu, tcpu;
>   int i;
>  

Also I would consider using this fix as I had warnings of cpus being
stuck there for more than 50 ms :


diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 
9158e5a81391ced4e268e3d5dd9879c2bc7280ce..6309b01ceb357be01e857e5f899429403836f41f
 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -92,6 +92,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
if (!*pagep)
goto err;
+   cond_resched();
}
}
return 0;




Re: lost connection to test machine (4)

2018-02-12 Thread Tejun Heo
On Mon, Feb 12, 2018 at 09:03:25AM -0800, Tejun Heo wrote:
> Hello, Daniel.
> 
> On Mon, Feb 12, 2018 at 06:00:13PM +0100, Daniel Borkmann wrote:
> > [ +Dennis, +Tejun ]
> > 
> > Looks like we're stuck in percpu allocator with key/value size of 4 bytes
> > each and large number of entries (max_entries) in the reproducer in above
> > link.
> > 
> > Could we have some __GFP_NORETRY semantics and let allocations fail instead
> > of triggering OOM killer?
> 
> For some part, maybe, but not generally.  The virt area allocation
> goes down to page table allocation which is hard coded to use
> GFP_KERNEL in arch mm code.

So, the following should convert majority of allocations to use
__GFP_NORETRY.  It doesn't catch everything but should significantly
lower the probability of hitting this and put this on the same footing
as vmalloc.  Can you see whether this is enough?

Note that this patch isn't upstreamable.  We definitely want to
restrict this to the rebalance path, but it should be good enough for
testing.

Thanks.

diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 9158e5a..0b4739f 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -81,7 +81,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end)
 {
-   const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
+   const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_NORETRY;
unsigned int cpu, tcpu;
int i;
 


Re: lost connection to test machine (4)

2018-02-12 Thread Tejun Heo
Hello, Daniel.

On Mon, Feb 12, 2018 at 06:00:13PM +0100, Daniel Borkmann wrote:
> [ +Dennis, +Tejun ]
> 
> Looks like we're stuck in percpu allocator with key/value size of 4 bytes
> each and large number of entries (max_entries) in the reproducer in above
> link.
> 
> Could we have some __GFP_NORETRY semantics and let allocations fail instead
> of triggering OOM killer?

For some part, maybe, but not generally.  The virt area allocation
goes down to page table allocation which is hard coded to use
GFP_KERNEL in arch mm code.

Thanks.

-- 
tejun


Re: lost connection to test machine (4)

2018-02-12 Thread Daniel Borkmann
On 02/12/2018 05:03 PM, Dmitry Vyukov wrote:
> On Mon, Feb 12, 2018 at 5:00 PM, syzbot
>  wrote:
>> Hello,
>>
>> syzbot hit the following crash on bpf-next commit
>> 617aebe6a97efa539cc4b8a52adccd89596e6be0 (Sun Feb 4 00:25:42 2018 +)
>> Merge tag 'usercopy-v4.16-rc1' of
>> git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
>>
>> So far this crash happened 898 times on bpf-next, net-next, upstream.
>> C reproducer is attached.
>> syzkaller reproducer is attached.
>> Raw console output is attached.
>> compiler: gcc (GCC) 7.1.1 20170620
>> .config is attached.
> 
> The reproducer first causes several tasks spending minutes at this stack:
> 
> [  110.762189] NMI backtrace for cpu 2
> [  110.762206] CPU: 2 PID: 3760 Comm: syz-executor Not tainted 4.15.0+ #96
> [  110.762210] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
> BIOS Bochs 01/01/2011
> [  110.762224] RIP: 0010:mutex_spin_on_owner+0x303/0x420
> [  110.762232] INFO: NMI handler (nmi_cpu_backtrace_handler) took too
> long to run: 1.103 msecs
> [  110.762237] RSP: 0018:88005be470e8 EFLAGS: 0246
> [  110.762268] RAX: 88006ca0 RBX:  RCX: 
> 81554165
> [  110.762275] RDX: 0001 RSI: 10d97884 RDI: 
> 
> [  110.762281] RBP: 88005be47210 R08: dc01 R09: 
> fbfff0db2b75
> [  110.762286] R10: fbfff0db2b74 R11: 86d95ba7 R12: 
> 86d95ba0
> [  110.762292] R13: ed000b7c8e25 R14: dc00 R15: 
> 880064691040
> [  110.762300] FS:  7f84ed029700() GS:88006cb0()
> knlGS:
> [  110.762305] CS:  0010 DS:  ES:  CR0: 80050033
> [  110.762311] CR2: 7fd565f7b1b0 CR3: 5bddf002 CR4: 
> 001606e0
> [  110.762316] Call Trace:
> [  110.762383]  __mutex_lock.isra.1+0x97d/0x1440
> [  110.762659]  __mutex_lock_slowpath+0xe/0x10
> [  110.762668]  mutex_lock+0x3e/0x50
> [  110.762677]  pcpu_alloc+0x846/0xfe0
> [  110.762778]  __alloc_percpu_gfp+0x27/0x30
> [  110.762801]  array_map_alloc+0x484/0x690
> [  110.762832]  SyS_bpf+0xa27/0x4770
> [  110.763190]  do_syscall_64+0x297/0x760
> [  110.763260]  entry_SYSCALL_64_after_hwframe+0x21/0x86
> 
> and later machine dies with:
> 
> [  191.484308] Kernel panic - not syncing: Out of memory and no
> killable processes...
> [  191.484308]
> [  191.485740] CPU: 3 PID: 746 Comm: kworker/3:1 Not tainted 4.15.0+ #96
> [  191.486761] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
> BIOS Bochs 01/01/2011
> [  191.488071] Workqueue: events pcpu_balance_workfn
> [  191.488821] Call Trace:
> [  191.489299]  dump_stack+0x175/0x225
> [  191.490590]  panic+0x22a/0x4be
> [  191.493061]  out_of_memory.cold.31+0x20/0x21
> [  191.496380]  __alloc_pages_slowpath+0x1d98/0x28a0
> [  191.503616]  __alloc_pages_nodemask+0x89c/0xc60
> [  191.507876]  pcpu_populate_chunk+0x1fd/0x9b0
> [  191.510114]  pcpu_balance_workfn+0x1019/0x1450
> [  191.517804]  process_one_work+0x9d5/0x1460
> [  191.522714]  worker_thread+0x1cc/0x1410
> [  191.529319]  kthread+0x304/0x3c0
> 
> The original message with attachments is here:
> https://groups.google.com/d/msg/syzkaller-bugs/Km3xEZu9zzU/rO-7XuwZAgAJ

[ +Dennis, +Tejun ]

Looks like we're stuck in percpu allocator with key/value size of 4 bytes
each and large number of entries (max_entries) in the reproducer in above
link.

Could we have some __GFP_NORETRY semantics and let allocations fail instead
of triggering OOM killer?


Re: lost connection to test machine (4)

2018-02-12 Thread Dmitry Vyukov
On Mon, Feb 12, 2018 at 5:00 PM, syzbot
 wrote:
> Hello,
>
> syzbot hit the following crash on bpf-next commit
> 617aebe6a97efa539cc4b8a52adccd89596e6be0 (Sun Feb 4 00:25:42 2018 +)
> Merge tag 'usercopy-v4.16-rc1' of
> git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
>
> So far this crash happened 898 times on bpf-next, net-next, upstream.
> C reproducer is attached.
> syzkaller reproducer is attached.
> Raw console output is attached.
> compiler: gcc (GCC) 7.1.1 20170620
> .config is attached.


The reproducer first causes several tasks spending minutes at this stack:

[  110.762189] NMI backtrace for cpu 2
[  110.762206] CPU: 2 PID: 3760 Comm: syz-executor Not tainted 4.15.0+ #96
[  110.762210] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS Bochs 01/01/2011
[  110.762224] RIP: 0010:mutex_spin_on_owner+0x303/0x420
[  110.762232] INFO: NMI handler (nmi_cpu_backtrace_handler) took too
long to run: 1.103 msecs
[  110.762237] RSP: 0018:88005be470e8 EFLAGS: 0246
[  110.762268] RAX: 88006ca0 RBX:  RCX: 81554165
[  110.762275] RDX: 0001 RSI: 10d97884 RDI: 
[  110.762281] RBP: 88005be47210 R08: dc01 R09: fbfff0db2b75
[  110.762286] R10: fbfff0db2b74 R11: 86d95ba7 R12: 86d95ba0
[  110.762292] R13: ed000b7c8e25 R14: dc00 R15: 880064691040
[  110.762300] FS:  7f84ed029700() GS:88006cb0()
knlGS:
[  110.762305] CS:  0010 DS:  ES:  CR0: 80050033
[  110.762311] CR2: 7fd565f7b1b0 CR3: 5bddf002 CR4: 001606e0
[  110.762316] Call Trace:
[  110.762383]  __mutex_lock.isra.1+0x97d/0x1440
[  110.762659]  __mutex_lock_slowpath+0xe/0x10
[  110.762668]  mutex_lock+0x3e/0x50
[  110.762677]  pcpu_alloc+0x846/0xfe0
[  110.762778]  __alloc_percpu_gfp+0x27/0x30
[  110.762801]  array_map_alloc+0x484/0x690
[  110.762832]  SyS_bpf+0xa27/0x4770
[  110.763190]  do_syscall_64+0x297/0x760
[  110.763260]  entry_SYSCALL_64_after_hwframe+0x21/0x86

and later machine dies with:

[  191.484308] Kernel panic - not syncing: Out of memory and no
killable processes...
[  191.484308]
[  191.485740] CPU: 3 PID: 746 Comm: kworker/3:1 Not tainted 4.15.0+ #96
[  191.486761] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS Bochs 01/01/2011
[  191.488071] Workqueue: events pcpu_balance_workfn
[  191.488821] Call Trace:
[  191.489299]  dump_stack+0x175/0x225
[  191.490590]  panic+0x22a/0x4be
[  191.493061]  out_of_memory.cold.31+0x20/0x21
[  191.496380]  __alloc_pages_slowpath+0x1d98/0x28a0
[  191.503616]  __alloc_pages_nodemask+0x89c/0xc60
[  191.507876]  pcpu_populate_chunk+0x1fd/0x9b0
[  191.510114]  pcpu_balance_workfn+0x1019/0x1450
[  191.517804]  process_one_work+0x9d5/0x1460
[  191.522714]  worker_thread+0x1cc/0x1410
[  191.529319]  kthread+0x304/0x3c0


The original message with attachments is here:
https://groups.google.com/d/msg/syzkaller-bugs/Km3xEZu9zzU/rO-7XuwZAgAJ


> IMPORTANT: if you fix the bug, please add the following tag to the commit:
> Reported-by: syzbot+adb03f3f0bb57ce3a...@syzkaller.appspotmail.com
> It will help syzbot understand when the bug is fixed. See footer for
> details.
> If you forward the report, please keep this part and the footer.
>
>
>
> ---
> This bug is generated by a dumb bot. It may contain errors.
> See https://goo.gl/tpsmEJ for details.
> Direct all questions to syzkal...@googlegroups.com.
>
> syzbot will keep track of this bug report.
> If you forgot to add the Reported-by tag, once the fix for this bug is
> merged
> into any tree, please reply to this email with:
> #syz fix: exact-commit-title
> If you want to test a patch for this bug, please reply with:
> #syz test: git://repo/address.git branch
> and provide the patch inline or as an attachment.
> To mark this as a duplicate of another syzbot report, please reply with:
> #syz dup: exact-subject-of-another-report
> If it's a one-off invalid bug report, please reply with:
> #syz invalid
> Note: if the crash happens again, it will cause creation of a new bug
> report.
> Note: all commands must start from beginning of the line in the email body.
>
> --
> You received this message because you are subscribed to the Google Groups
> "syzkaller-bugs" group.
> To unsubscribe from this group and stop receiving emails from it, send an
> email to syzkaller-bugs+unsubscr...@googlegroups.com.
> To view this discussion on the web visit
> https://groups.google.com/d/msgid/syzkaller-bugs/001a113f8734783e94056505f8fd%40google.com.
> For more options, visit https://groups.google.com/d/optout.