33#include <sys/counter.h>
34#include <sys/kernel.h>
35#include <sys/limits.h>
39#include <sys/sysctl.h>
150#define SMR_SEQ_INIT 1
151#define SMR_SEQ_INCR 2
162#define SMR_SEQ_MAX_DELTA (UINT_MAX / 4)
163#define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
166#define SMR_SEQ_INCR (UINT_MAX / 10000)
167#define SMR_SEQ_INIT (UINT_MAX - 100000)
169#define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32)
170#define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2
188#define SMR_LAZY_GRACE 2
189#define SMR_LAZY_INCR (SMR_LAZY_GRACE * SMR_SEQ_INCR)
197#define SMR_SEQ_ADVANCE SMR_LAZY_INCR
199static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
221 union s_wr s_wr, old;
224 CRITICAL_ASSERT(curthread);
230 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair);
237 if (__predict_true(d == 0))
275 smr_seq_t goal, s_rd_seq;
277 CRITICAL_ASSERT(curthread);
278 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
279 (
"smr_default_advance: called with lazy smr."));
285 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
294 counter_u64_add(advance_wait, 1);
297 counter_u64_add(advance, 1);
310 if (++self->c_deferred < self->c_limit)
312 self->c_deferred = 0;
340 SMR_ASSERT_NOT_ENTERED(smr);
346 atomic_thread_fence_rel();
350 self = zpcpu_get(smr);
352 flags = self->c_flags;
353 goal = SMR_SEQ_INVALID;
354 if ((
flags & (SMR_LAZY | SMR_DEFERRED)) == 0)
356 else if ((
flags & SMR_LAZY) != 0)
358 else if ((
flags & SMR_DEFERRED) != 0)
374 c_seq = SMR_SEQ_INVALID;
376 c_seq = atomic_load_int(&c->c_seq);
377 if (c_seq == SMR_SEQ_INVALID)
395 if (SMR_SEQ_LT(c_seq, s_rd_seq))
402 if (SMR_SEQ_LEQ(goal, c_seq))
422 smr_seq_t s_wr_seq, smr_seq_t goal,
bool wait)
424 smr_seq_t rd_seq, c_seq;
427 CRITICAL_ASSERT(curthread);
428 counter_u64_add_protected(poll_scan, 1);
442 c_seq =
smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal,
449 if (c_seq != SMR_SEQ_INVALID)
450 rd_seq = SMR_SEQ_MIN(rd_seq, c_seq);
456 s_rd_seq = atomic_load_int(&s->s_rd_seq);
457 if (SMR_SEQ_GT(rd_seq, s_rd_seq)) {
458 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq);
482 smr_seq_t s_wr_seq, s_rd_seq;
490 KASSERT(!wait || !SMR_ENTERED(smr),
491 (
"smr_poll: Blocking not allowed in a SMR section."));
492 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
493 (
"smr_poll: Blocking not allowed on lazy smrs."));
502 self = zpcpu_get(smr);
504 flags = self->c_flags;
505 counter_u64_add_protected(poll, 1);
511 if ((
flags & SMR_LAZY) != 0)
518 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
524 if (SMR_SEQ_LEQ(goal, s_rd_seq))
531 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq);
537 delta = SMR_SEQ_DELTA(goal, s_wr_seq);
547 (
flags & (SMR_LAZY | SMR_DEFERRED)) != 0) {
569 s_rd_seq =
smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait);
570 success = SMR_SEQ_LEQ(goal, s_rd_seq);
573 counter_u64_add_protected(poll_fail, 1);
580 atomic_thread_fence_acq();
593 smr = uma_zalloc_pcpu(
smr_zone, M_WAITOK);
597 s->s_wr.ticks =
ticks;
601 c = zpcpu_get_cpu(smr, i);
602 c->c_seq = SMR_SEQ_INVALID;
608 atomic_thread_fence_seq_cst();
617 smr_synchronize(smr);
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
631 smr_zone = uma_zcreate(
"SMR CPU",
sizeof(
struct smr),
632 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
int atomic_cmpset_64(volatile uint64_t *p, uint64_t old, uint64_t new)
#define SMR_SEQ_MAX_DELTA
static uma_zone_t smr_zone
smr_t smr_create(const char *name, int limit, int flags)
static smr_seq_t smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait)
static uma_zone_t smr_shared_zone
static smr_seq_t smr_default_advance(smr_t smr, smr_shared_t s)
static smr_seq_t smr_shared_advance(smr_shared_t s)
#define SMR_SEQ_MAX_ADVANCE
static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW|CTLFLAG_MPSAFE, NULL, "SMR Stats")
static smr_seq_t smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self)
bool smr_poll(smr_t smr, smr_seq_t goal, bool wait)
SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, "")
static smr_seq_t smr_lazy_advance(smr_t smr, smr_shared_t s)
smr_seq_t smr_advance(smr_t smr)
void smr_destroy(smr_t smr)
static COUNTER_U64_DEFINE_EARLY(advance)
static smr_seq_t smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq, smr_seq_t s_wr_seq, smr_seq_t goal, bool wait)