1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/counter.h>
31#include <sys/kernel.h>
32#include <sys/limits.h>
33#include <sys/proc.h>
34#include <sys/smp.h>
35#include <sys/smr.h>
36#include <sys/sysctl.h>
37
38#include <vm/uma.h>
39
40/*
41 * Global Unbounded Sequences (GUS)
42 *
43 * This is a novel safe memory reclamation technique inspired by
44 * epoch based reclamation from Samy Al Bahra's concurrency kit which
45 * in turn was based on work described in:
46 *   Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
47 *   of Cambridge Computing Laboratory.
48 * And shares some similarities with:
49 *   Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 *   Data-Structures
51 *
52 * This is not an implementation of hazard pointers or related
53 * techniques.  The term safe memory reclamation is used as a
54 * generic descriptor for algorithms that defer frees to avoid
55 * use-after-free errors with lockless datastructures or as
56 * a mechanism to detect quiescence for writer synchronization.
57 *
58 * The basic approach is to maintain a monotonic write sequence
59 * number that is updated on some application defined granularity.
60 * Readers record the most recent write sequence number they have
61 * observed.  A shared read sequence number records the lowest
62 * sequence number observed by any reader as of the last poll.  Any
63 * write older than this value has been observed by all readers
64 * and memory can be reclaimed.  Like Epoch we also detect idle
65 * readers by storing an invalid sequence number in the per-cpu
66 * state when the read section exits.  Like Parsec we establish
67 * a global write clock that is used to mark memory on free.
68 *
69 * The write and read sequence numbers can be thought of as a two
70 * handed clock with readers always advancing towards writers.  GUS
71 * maintains the invariant that all readers can safely access memory
72 * that was visible at the time they loaded their copy of the sequence
73 * number.  Periodically the read sequence or hand is polled and
74 * advanced as far towards the write sequence as active readers allow.
75 * Memory which was freed between the old and new global read sequence
76 * number can now be reclaimed.  When the system is idle the two hands
77 * meet and no deferred memory is outstanding.  Readers never advance
78 * any sequence number, they only observe them.  The shared read
79 * sequence number is consequently never higher than the write sequence.
80 * A stored sequence number that falls outside of this range has expired
81 * and needs no scan to reclaim.
82 *
83 * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is
84 * that advancing the sequence number is decoupled from detecting its
85 * observation.  That is to say, the delta between read and write
86 * sequence numbers is not bound.  This can be thought of as a more
87 * generalized form of epoch which requires them at most one step
88 * apart.  This results in a more granular assignment of sequence
89 * numbers even as read latencies prohibit all or some expiration.
90 * It also allows writers to advance the sequence number and save the
91 * poll for expiration until a later time when it is likely to
92 * complete without waiting.  The batch granularity and free-to-use
93 * latency is dynamic and can be significantly smaller than in more
94 * strict systems.
95 *
96 * This mechanism is primarily intended to be used in coordination with
97 * UMA.  By integrating with the allocator we avoid all of the callout
98 * queue machinery and are provided with an efficient way to batch
99 * sequence advancement and waiting.  The allocator accumulates a full
100 * per-cpu cache of memory before advancing the sequence.  It then
101 * delays waiting for this sequence to expire until the memory is
102 * selected for reuse.  In this way we only increment the sequence
103 * value once for n=cache-size frees and the waits are done long
104 * after the sequence has been expired so they need only be verified
105 * to account for pathological conditions and to advance the read
106 * sequence.  Tying the sequence number to the bucket size has the
107 * nice property that as the zone gets busier the buckets get larger
108 * and the sequence writes become fewer.  If the coherency of advancing
109 * the write sequence number becomes too costly we can advance
110 * it for every N buckets in exchange for higher free-to-use
111 * latency and consequently higher memory consumption.
112 *
113 * If the read overhead of accessing the shared cacheline becomes
114 * especially burdensome an invariant TSC could be used in place of the
115 * sequence.  The algorithm would then only need to maintain the minimum
116 * observed tsc.  This would trade potential cache synchronization
117 * overhead for local serialization and cpu timestamp overhead.
118 */
119
120/*
121 * A simplified diagram:
122 *
123 * 0                                                          UINT_MAX
124 * | -------------------- sequence number space -------------------- |
125 *              ^ rd seq                            ^ wr seq
126 *              | ----- valid sequence numbers ---- |
127 *                ^cpuA  ^cpuC
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
129 *
130 *
131 * In this example cpuA has the lowest sequence number and poll can
132 * advance rd seq.  cpuB is not running and is considered to observe
133 * wr seq.
134 *
135 * Freed memory that is tagged with a sequence number between rd seq and
136 * wr seq can not be safely reclaimed because cpuA may hold a reference to
137 * it.  Any other memory is guaranteed to be unreferenced.
138 *
139 * Any writer is free to advance wr seq at any time however it may busy
140 * poll in pathological cases.
141 */
142
143static uma_zone_t smr_shared_zone;
144static uma_zone_t smr_zone;
145
146#ifndef INVARIANTS
147#define	SMR_SEQ_INIT	1		/* All valid sequence numbers are odd. */
148#define	SMR_SEQ_INCR	2
149
150/*
151 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and
152 * wr_seq.  For the modular arithmetic to work a value of UNIT_MAX / 2
153 * would be possible but it is checked after we increment the wr_seq so
154 * a safety margin is left to prevent overflow.
155 *
156 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed
157 * to prevent integer wrapping.  See smr_advance() for more details.
158 */
159#define	SMR_SEQ_MAX_DELTA	(UINT_MAX / 4)
160#define	SMR_SEQ_MAX_ADVANCE	(SMR_SEQ_MAX_DELTA - 1024)
161#else
162/* We want to test the wrapping feature in invariants kernels. */
163#define	SMR_SEQ_INCR	(UINT_MAX / 10000)
164#define	SMR_SEQ_INIT	(UINT_MAX - 100000)
165/* Force extra polls to test the integer overflow detection. */
166#define	SMR_SEQ_MAX_DELTA	(SMR_SEQ_INCR * 32)
167#define	SMR_SEQ_MAX_ADVANCE	SMR_SEQ_MAX_DELTA / 2
168#endif
169
170/*
171 * The grace period for lazy (tick based) SMR.
172 *
173 * Hardclock is responsible for advancing ticks on a single CPU while every
174 * CPU receives a regular clock interrupt.  The clock interrupts are flushing
175 * the store buffers and any speculative loads that may violate our invariants.
176 * Because these interrupts are not synchronized we must wait one additional
177 * tick in the future to be certain that all processors have had their state
178 * synchronized by an interrupt.
179 *
180 * This assumes that the clock interrupt will only be delayed by other causes
181 * that will flush the store buffer or prevent access to the section protected
182 * data.  For example, an idle processor, or an system management interrupt,
183 * or a vm exit.
184 */
185#define	SMR_LAZY_GRACE		2
186#define	SMR_LAZY_INCR		(SMR_LAZY_GRACE * SMR_SEQ_INCR)
187
188/*
189 * The maximum sequence number ahead of wr_seq that may still be valid.  The
190 * sequence may not be advanced on write for lazy or deferred SMRs.  In this
191 * case poll needs to attempt to forward the sequence number if the goal is
192 * within wr_seq + SMR_SEQ_ADVANCE.
193 */
194#define	SMR_SEQ_ADVANCE		SMR_LAZY_INCR
195
196static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
197    "SMR Stats");
198static COUNTER_U64_DEFINE_EARLY(advance);
199SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, "");
200static COUNTER_U64_DEFINE_EARLY(advance_wait);
201SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, "");
202static COUNTER_U64_DEFINE_EARLY(poll);
203SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, "");
204static COUNTER_U64_DEFINE_EARLY(poll_scan);
205SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, "");
206static COUNTER_U64_DEFINE_EARLY(poll_fail);
207SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, "");
208
209/*
210 * Advance a lazy write sequence number.  These move forward at the rate of
211 * ticks.  Grace is SMR_LAZY_INCR (2 ticks) in the future.
212 *
213 * This returns the goal write sequence number.
214 */
215static smr_seq_t
216smr_lazy_advance(smr_t smr, smr_shared_t s)
217{
218	union s_wr s_wr, old;
219	int t, d;
220
221	CRITICAL_ASSERT(curthread);
222
223	/*
224	 * Load the stored ticks value before the current one.  This way the
225	 * current value can only be the same or larger.
226	 */
227	old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair);
228	t = ticks;
229
230	/*
231	 * The most probable condition that the update already took place.
232	 */
233	d = t - s_wr.ticks;
234	if (__predict_true(d == 0))
235		goto out;
236	/* Cap the rate of advancement and handle long idle periods. */
237	if (d > SMR_LAZY_GRACE || d < 0)
238		d = SMR_LAZY_GRACE;
239	s_wr.ticks = t;
240	s_wr.seq += d * SMR_SEQ_INCR;
241
242	/*
243	 * This can only fail if another thread races to call advance().
244	 * Strong cmpset semantics mean we are guaranteed that the update
245	 * happened.
246	 */
247	atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair);
248out:
249	return (s_wr.seq + SMR_LAZY_INCR);
250}
251
252/*
253 * Increment the shared write sequence by 2.  Since it is initialized
254 * to 1 this means the only valid values are odd and an observed value
255 * of 0 in a particular CPU means it is not currently in a read section.
256 */
257static smr_seq_t
258smr_shared_advance(smr_shared_t s)
259{
260
261	return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR);
262}
263
264/*
265 * Advance the write sequence number for a normal smr section.  If the
266 * write sequence is too far behind the read sequence we have to poll
267 * to advance rd_seq and prevent undetectable wraps.
268 */
269static smr_seq_t
270smr_default_advance(smr_t smr, smr_shared_t s)
271{
272	smr_seq_t goal, s_rd_seq;
273
274	CRITICAL_ASSERT(curthread);
275	KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
276	    ("smr_default_advance: called with lazy smr."));
277
278	/*
279	 * Load the current read seq before incrementing the goal so
280	 * we are guaranteed it is always < goal.
281	 */
282	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
283	goal = smr_shared_advance(s);
284
285	/*
286	 * Force a synchronization here if the goal is getting too
287	 * far ahead of the read sequence number.  This keeps the
288	 * wrap detecting arithmetic working in pathological cases.
289	 */
290	if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) {
291		counter_u64_add(advance_wait, 1);
292		smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
293	}
294	counter_u64_add(advance, 1);
295
296	return (goal);
297}
298
299/*
300 * Deferred SMRs conditionally update s_wr_seq based on an
301 * cpu local interval count.
302 */
303static smr_seq_t
304smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self)
305{
306
307	if (++self->c_deferred < self->c_limit)
308		return (smr_shared_current(s) + SMR_SEQ_INCR);
309	self->c_deferred = 0;
310	return (smr_default_advance(smr, s));
311}
312
313/*
314 * Advance the write sequence and return the value for use as the
315 * wait goal.  This guarantees that any changes made by the calling
316 * thread prior to this call will be visible to all threads after
317 * rd_seq meets or exceeds the return value.
318 *
319 * This function may busy loop if the readers are roughly 1 billion
320 * sequence numbers behind the writers.
321 *
322 * Lazy SMRs will not busy loop and the wrap happens every 25 days
323 * at 1khz and 60 hours at 10khz.  Readers can block for no longer
324 * than half of this for SMR_SEQ_ macros to continue working.
325 */
326smr_seq_t
327smr_advance(smr_t smr)
328{
329	smr_t self;
330	smr_shared_t s;
331	smr_seq_t goal;
332	int flags;
333
334	/*
335	 * It is illegal to enter while in an smr section.
336	 */
337	SMR_ASSERT_NOT_ENTERED(smr);
338
339	/*
340	 * Modifications not done in a smr section need to be visible
341	 * before advancing the seq.
342	 */
343	atomic_thread_fence_rel();
344
345	critical_enter();
346	/* Try to touch the line once. */
347	self = zpcpu_get(smr);
348	s = self->c_shared;
349	flags = self->c_flags;
350	goal = SMR_SEQ_INVALID;
351	if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0)
352		goal = smr_default_advance(smr, s);
353	else if ((flags & SMR_LAZY) != 0)
354		goal = smr_lazy_advance(smr, s);
355	else if ((flags & SMR_DEFERRED) != 0)
356		goal = smr_deferred_advance(smr, s, self);
357	critical_exit();
358
359	return (goal);
360}
361
362/*
363 * Poll to determine the currently observed sequence number on a cpu
364 * and spinwait if the 'wait' argument is true.
365 */
366static smr_seq_t
367smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait)
368{
369	smr_seq_t c_seq;
370
371	c_seq = SMR_SEQ_INVALID;
372	for (;;) {
373		c_seq = atomic_load_int(&c->c_seq);
374		if (c_seq == SMR_SEQ_INVALID)
375			break;
376
377		/*
378		 * There is a race described in smr.h:smr_enter that
379		 * can lead to a stale seq value but not stale data
380		 * access.  If we find a value out of range here we
381		 * pin it to the current min to prevent it from
382		 * advancing until that stale section has expired.
383		 *
384		 * The race is created when a cpu loads the s_wr_seq
385		 * value in a local register and then another thread
386		 * advances s_wr_seq and calls smr_poll() which will
387		 * oberve no value yet in c_seq and advance s_rd_seq
388		 * up to s_wr_seq which is beyond the register
389		 * cached value.  This is only likely to happen on
390		 * hypervisor or with a system management interrupt.
391		 */
392		if (SMR_SEQ_LT(c_seq, s_rd_seq))
393			c_seq = s_rd_seq;
394
395		/*
396		 * If the sequence number meets the goal we are done
397		 * with this cpu.
398		 */
399		if (SMR_SEQ_LEQ(goal, c_seq))
400			break;
401
402		if (!wait)
403			break;
404		cpu_spinwait();
405	}
406
407	return (c_seq);
408}
409
410/*
411 * Loop until all cores have observed the goal sequence or have
412 * gone inactive.  Returns the oldest sequence currently active;
413 *
414 * This function assumes a snapshot of sequence values has
415 * been obtained and validated by smr_poll().
416 */
417static smr_seq_t
418smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq,
419    smr_seq_t s_wr_seq, smr_seq_t goal, bool wait)
420{
421	smr_seq_t rd_seq, c_seq;
422	int i;
423
424	CRITICAL_ASSERT(curthread);
425	counter_u64_add_protected(poll_scan, 1);
426
427	/*
428	 * The read sequence can be no larger than the write sequence at
429	 * the start of the poll.
430	 */
431	rd_seq = s_wr_seq;
432	CPU_FOREACH(i) {
433		/*
434		 * Query the active sequence on this cpu.  If we're not
435		 * waiting and we don't meet the goal we will still scan
436		 * the rest of the cpus to update s_rd_seq before returning
437		 * failure.
438		 */
439		c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal,
440		    wait);
441
442		/*
443		 * Limit the minimum observed rd_seq whether we met the goal
444		 * or not.
445		 */
446		if (c_seq != SMR_SEQ_INVALID)
447			rd_seq = SMR_SEQ_MIN(rd_seq, c_seq);
448	}
449
450	/*
451	 * Advance the rd_seq as long as we observed a more recent value.
452	 */
453	s_rd_seq = atomic_load_int(&s->s_rd_seq);
454	if (SMR_SEQ_GT(rd_seq, s_rd_seq)) {
455		atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq);
456		s_rd_seq = rd_seq;
457	}
458
459	return (s_rd_seq);
460}
461
462/*
463 * Poll to determine whether all readers have observed the 'goal' write
464 * sequence number.
465 *
466 * If wait is true this will spin until the goal is met.
467 *
468 * This routine will updated the minimum observed read sequence number in
469 * s_rd_seq if it does a scan.  It may not do a scan if another call has
470 * advanced s_rd_seq beyond the callers goal already.
471 *
472 * Returns true if the goal is met and false if not.
473 */
474bool
475smr_poll(smr_t smr, smr_seq_t goal, bool wait)
476{
477	smr_shared_t s;
478	smr_t self;
479	smr_seq_t s_wr_seq, s_rd_seq;
480	smr_delta_t delta;
481	int flags;
482	bool success;
483
484	/*
485	 * It is illegal to enter while in an smr section.
486	 */
487	KASSERT(!wait || !SMR_ENTERED(smr),
488	    ("smr_poll: Blocking not allowed in a SMR section."));
489	KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
490	    ("smr_poll: Blocking not allowed on lazy smrs."));
491
492	/*
493	 * Use a critical section so that we can avoid ABA races
494	 * caused by long preemption sleeps.
495	 */
496	success = true;
497	critical_enter();
498	/* Attempt to load from self only once. */
499	self = zpcpu_get(smr);
500	s = self->c_shared;
501	flags = self->c_flags;
502	counter_u64_add_protected(poll, 1);
503
504	/*
505	 * Conditionally advance the lazy write clock on any writer
506	 * activity.
507	 */
508	if ((flags & SMR_LAZY) != 0)
509		smr_lazy_advance(smr, s);
510
511	/*
512	 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
513	 * observe an updated read sequence that is larger than write.
514	 */
515	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
516
517	/*
518	 * If we have already observed the sequence number we can immediately
519	 * return success.  Most polls should meet this criterion.
520	 */
521	if (SMR_SEQ_LEQ(goal, s_rd_seq))
522		goto out;
523
524	/*
525	 * wr_seq must be loaded prior to any c_seq value so that a
526	 * stale c_seq can only reference time after this wr_seq.
527	 */
528	s_wr_seq = atomic_load_acq_int(&s->s_wr.seq);
529
530	/*
531	 * This is the distance from s_wr_seq to goal.  Positive values
532	 * are in the future.
533	 */
534	delta = SMR_SEQ_DELTA(goal, s_wr_seq);
535
536	/*
537	 * Detect a stale wr_seq.
538	 *
539	 * This goal may have come from a deferred advance or a lazy
540	 * smr.  If we are not blocking we can not succeed but the
541	 * sequence number is valid.
542	 */
543	if (delta > 0 && delta <= SMR_SEQ_ADVANCE &&
544	    (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) {
545		if (!wait) {
546			success = false;
547			goto out;
548		}
549		/* LAZY is always !wait. */
550		s_wr_seq = smr_shared_advance(s);
551		delta = 0;
552	}
553
554	/*
555	 * Detect an invalid goal.
556	 *
557	 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
558	 * it to be valid.  If it is not then the caller held on to it and
559	 * the integer wrapped.  If we wrapped back within range the caller
560	 * will harmlessly scan.
561	 */
562	if (delta > 0)
563		goto out;
564
565	/* Determine the lowest visible sequence number. */
566	s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait);
567	success = SMR_SEQ_LEQ(goal, s_rd_seq);
568out:
569	if (!success)
570		counter_u64_add_protected(poll_fail, 1);
571	critical_exit();
572
573	/*
574	 * Serialize with smr_advance()/smr_exit().  The caller is now free
575	 * to modify memory as expected.
576	 */
577	atomic_thread_fence_acq();
578
579	KASSERT(success || !wait, ("%s: blocking poll failed", __func__));
580	return (success);
581}
582
583smr_t
584smr_create(const char *name, int limit, int flags)
585{
586	smr_t smr, c;
587	smr_shared_t s;
588	int i;
589
590	s = uma_zalloc(smr_shared_zone, M_WAITOK);
591	smr = uma_zalloc_pcpu(smr_zone, M_WAITOK);
592
593	s->s_name = name;
594	s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT;
595	s->s_wr.ticks = ticks;
596
597	/* Initialize all CPUS, not just those running. */
598	for (i = 0; i <= mp_maxid; i++) {
599		c = zpcpu_get_cpu(smr, i);
600		c->c_seq = SMR_SEQ_INVALID;
601		c->c_shared = s;
602		c->c_deferred = 0;
603		c->c_limit = limit;
604		c->c_flags = flags;
605	}
606	atomic_thread_fence_seq_cst();
607
608	return (smr);
609}
610
611void
612smr_destroy(smr_t smr)
613{
614
615	smr_synchronize(smr);
616	uma_zfree(smr_shared_zone, smr->c_shared);
617	uma_zfree_pcpu(smr_zone, smr);
618}
619
620/*
621 * Initialize the UMA slab zone.
622 */
623void
624smr_init(void)
625{
626
627	smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared),
628	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
629	smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
630	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
631}
632