Re: svn commit: r340097 - in head/sys: kern sys

2018-11-12 Thread Andrew Gallatin

On 11/2/18 11:43 PM, Matt Macy wrote:

Author: mmacy
Date: Sat Nov  3 03:43:32 2018
New Revision: 340097
URL: 
https://urldefense.proofpoint.com/v2/url?u=https-3A__svnweb.freebsd.org_changeset_base_340097=DwIDaQ=imBPVzF25OnBgGmVOlcsiEgHoG1i6YHLR0Sj_gZ4adc=Ed-falealxPeqc22ehgAUCLh8zlZbibZLSMWJeZro4A=C46M75X_gZcJY3aXGYy_P4DQJhD-uEFU00BP6AzHPik=JvPbkoXDB3zzo2IjmopaQxJ3kRcIwzosrpY4elq80LQ=

Log:
   Convert epoch to read / write records per cpu
   
   In discussing D17503 "Run epoch calls sooner and more reliably" with

   sbahra@ we came to the conclusion that epoch is currently misusing the
   ck_epoch API. It isn't safe to do a "write side" operation (ck_epoch_call
   or ck_epoch_poll) in the middle of a "read side" section. Since, by 
definition,
   it's possible to be preempted during the middle of an EPOCH_PREEMPT
   epoch the GC task might call ck_epoch_poll or another thread might call
   ck_epoch_call on the same section. The right solution is ultimately to change
   the way that ck_epoch works for this use case. However, as a stopgap for
   12 we agreed to simply have separate records for each use case.
   
   Tested by: pho@
   
   MFC after:	3 days



Hi Matt,

Can you elaborate why this is needed?

I seem to recall that Samy Al Bahra made some upstream changes to CK 
that modified the CK API to legitimize our use of the API, and these 
were brought into FreeBSD in r339375. Were these insufficient?


Also, it would be great if you could get review on epoch changes. Epoch 
is totally awesome, and I'm thrilled that you brought it in.  However, 
it is very tricky, and it seems like changes here could benefit from review.


Thanks,

Drew



___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


Re: svn commit: r340097 - in head/sys: kern sys

2018-11-12 Thread Hans Petter Selasky

On 11/3/18 4:43 AM, Matt Macy wrote:

Author: mmacy
Date: Sat Nov  3 03:43:32 2018
New Revision: 340097
URL: https://svnweb.freebsd.org/changeset/base/340097

Log:
   Convert epoch to read / write records per cpu
   
   In discussing D17503 "Run epoch calls sooner and more reliably" with

   sbahra@ we came to the conclusion that epoch is currently misusing the
   ck_epoch API. It isn't safe to do a "write side" operation (ck_epoch_call
   or ck_epoch_poll) in the middle of a "read side" section. Since, by 
definition,
   it's possible to be preempted during the middle of an EPOCH_PREEMPT
   epoch the GC task might call ck_epoch_poll or another thread might call
   ck_epoch_call on the same section. The right solution is ultimately to change
   the way that ck_epoch works for this use case. However, as a stopgap for
   12 we agreed to simply have separate records for each use case.
   
   Tested by: pho@
   
   MFC after:	3 days


^^^ not yet MFC'ed - any reason why not?
--HPS
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r340097 - in head/sys: kern sys

2018-11-03 Thread Matt Macy
Author: mmacy
Date: Sat Nov  3 03:43:32 2018
New Revision: 340097
URL: https://svnweb.freebsd.org/changeset/base/340097

Log:
  Convert epoch to read / write records per cpu
  
  In discussing D17503 "Run epoch calls sooner and more reliably" with
  sbahra@ we came to the conclusion that epoch is currently misusing the
  ck_epoch API. It isn't safe to do a "write side" operation (ck_epoch_call
  or ck_epoch_poll) in the middle of a "read side" section. Since, by 
definition,
  it's possible to be preempted during the middle of an EPOCH_PREEMPT
  epoch the GC task might call ck_epoch_poll or another thread might call
  ck_epoch_call on the same section. The right solution is ultimately to change
  the way that ck_epoch works for this use case. However, as a stopgap for
  12 we agreed to simply have separate records for each use case.
  
  Tested by: pho@
  
  MFC after:3 days

Modified:
  head/sys/kern/subr_epoch.c
  head/sys/sys/epoch_private.h

Modified: head/sys/kern/subr_epoch.c
==
--- head/sys/kern/subr_epoch.c  Sat Nov  3 03:10:06 2018(r340096)
+++ head/sys/kern/subr_epoch.c  Sat Nov  3 03:43:32 2018(r340097)
@@ -150,7 +150,8 @@ epoch_ctor(epoch_t epoch)
CPU_FOREACH(cpu) {
er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
bzero(er, sizeof(*er));
-   ck_epoch_register(>e_epoch, >er_record, NULL);
+   ck_epoch_register(>e_epoch, >er_read_record, NULL);
+   ck_epoch_register(>e_epoch, >er_write_record, NULL);
TAILQ_INIT((struct threadlist *)(uintptr_t)>er_tdlist);
er->er_cpuid = cpu;
}
@@ -235,7 +236,7 @@ epoch_block_handler_preempt(struct ck_epoch *global __
int spincount, gen;
int locksheld __unused;
 
-   record = __containerof(cr, struct epoch_record, er_record);
+   record = __containerof(cr, struct epoch_record, er_read_record);
td = curthread;
locksheld = td->td_locks;
spincount = 0;
@@ -461,7 +462,7 @@ epoch_call(epoch_t epoch, epoch_context_t ctx, void (*
critical_enter();
*DPCPU_PTR(epoch_cb_count) += 1;
er = epoch_currecord(epoch);
-   ck_epoch_call(>er_record, cb, (ck_epoch_cb_t *)callback);
+   ck_epoch_call(>er_write_record, cb, (ck_epoch_cb_t *)callback);
critical_exit();
return;
 boottime:
@@ -485,7 +486,7 @@ epoch_call_task(void *arg __unused)
if (__predict_false((epoch = allepochs[i]) == NULL))
continue;
er = epoch_currecord(epoch);
-   record = >er_record;
+   record = >er_write_record;
if ((npending = record->n_pending) == 0)
continue;
ck_epoch_poll_deferred(record, _stack);

Modified: head/sys/sys/epoch_private.h
==
--- head/sys/sys/epoch_private.hSat Nov  3 03:10:06 2018
(r340096)
+++ head/sys/sys/epoch_private.hSat Nov  3 03:43:32 2018
(r340097)
@@ -89,7 +89,8 @@ typedef struct epoch_thread {
 TAILQ_HEAD (epoch_tdlist, epoch_thread);
 
 typedef struct epoch_record {
-   ck_epoch_record_t er_record;
+   ck_epoch_record_t er_read_record;
+   ck_epoch_record_t er_write_record;
volatile struct epoch_tdlist er_tdlist;
volatile uint32_t er_gen;
uint32_t er_cpuid;
@@ -138,7 +139,7 @@ epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
td->td_pre_epoch_prio = td->td_priority;
er = epoch_currecord(epoch);
TAILQ_INSERT_TAIL(>er_tdlist, etd, et_link);
-   ck_epoch_begin(>er_record, (ck_epoch_section_t *)>et_section);
+   ck_epoch_begin(>er_read_record, (ck_epoch_section_t 
*)>et_section);
critical_exit_sa(td);
 }
 
@@ -155,7 +156,7 @@ epoch_enter(epoch_t epoch)
td->td_epochnest++;
critical_enter_sa(td);
er = epoch_currecord(epoch);
-   ck_epoch_begin(>er_record, NULL);
+   ck_epoch_begin(>er_read_record, NULL);
 }
 
 static __inline void
@@ -183,7 +184,7 @@ epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
etd->et_magic_post = 0;
 #endif
etd->et_td = (void*)0xDEADBEEF;
-   ck_epoch_end(>er_record,
+   ck_epoch_end(>er_read_record,
(ck_epoch_section_t *)>et_section);
TAILQ_REMOVE(>er_tdlist, etd, et_link);
er->er_gen++;
@@ -203,7 +204,7 @@ epoch_exit(epoch_t epoch)
MPASS(td->td_epochnest);
td->td_epochnest--;
er = epoch_currecord(epoch);
-   ck_epoch_end(>er_record, NULL);
+   ck_epoch_end(>er_read_record, NULL);
critical_exit_sa(td);
 }
 #endif /* _KERNEL */
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to