FreeBSD kernel kern code
subr_unit.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004 Poul-Henning Kamp
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 *
30 *
31 * Unit number allocation functions.
32 *
33 * These functions implement a mixed run-length/bitmap management of unit
34 * number spaces in a very memory efficient manner.
35 *
36 * Allocation policy is always lowest free number first.
37 *
38 * A return value of -1 signals that no more unit numbers are available.
39 *
40 * There is no cost associated with the range of unitnumbers, so unless
41 * the resource really is finite, specify INT_MAX to new_unrhdr() and
42 * forget about checking the return value.
43 *
44 * If a mutex is not provided when the unit number space is created, a
45 * default global mutex is used. The advantage to passing a mutex in, is
46 * that the alloc_unrl() function can be called with the mutex already
47 * held (it will not be released by alloc_unrl()).
48 *
49 * The allocation function alloc_unr{l}() never sleeps (but it may block on
50 * the mutex of course).
51 *
52 * Freeing a unit number may require allocating memory, and can therefore
53 * sleep so the free_unr() function does not come in a pre-locked variant.
54 *
55 * A userland test program is included.
56 *
57 * Memory usage is a very complex function of the exact allocation
58 * pattern, but always very compact:
59 * * For the very typical case where a single unbroken run of unit
60 * numbers are allocated 44 bytes are used on i386.
61 * * For a unit number space of 1000 units and the random pattern
62 * in the usermode test program included, the worst case usage
63 * was 252 bytes on i386 for 500 allocated and 500 free units.
64 * * For a unit number space of 10000 units and the random pattern
65 * in the usermode test program included, the worst case usage
66 * was 798 bytes on i386 for 5000 allocated and 5000 free units.
67 * * The worst case is where every other unit number is allocated and
68 * the rest are free. In that case 44 + N/4 bytes are used where
69 * N is the number of the highest unit allocated.
70 */
71
72#include <sys/param.h>
73#include <sys/types.h>
74#include <sys/_unrhdr.h>
75
76#ifdef _KERNEL
77
78#include <sys/bitstring.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/systm.h>
82#include <sys/limits.h>
83#include <sys/lock.h>
84#include <sys/mutex.h>
85
86/*
87 * In theory it would be smarter to allocate the individual blocks
88 * with the zone allocator, but at this time the expectation is that
89 * there will typically not even be enough allocations to fill a single
90 * page, so we stick with malloc for now.
91 */
92static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
93
94#define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
95#define Free(foo) free(foo, M_UNIT)
96
97static struct mtx unitmtx;
98
99MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
100
101#ifdef UNR64_LOCKED
102uint64_t
103alloc_unr64(struct unrhdr64 *unr64)
104{
105 uint64_t item;
106
107 mtx_lock(&unitmtx);
108 item = unr64->counter++;
109 mtx_unlock(&unitmtx);
110 return (item);
111}
112#endif
113
114#else /* ...USERLAND */
115
116#include <bitstring.h>
117#include <err.h>
118#include <errno.h>
119#include <getopt.h>
120#include <stdbool.h>
121#include <stdio.h>
122#include <stdlib.h>
123#include <string.h>
124
125#define KASSERT(cond, arg) \
126 do { \
127 if (!(cond)) { \
128 printf arg; \
129 abort(); \
130 } \
131 } while (0)
132
133static int no_alloc;
134#define Malloc(foo) _Malloc(foo, __LINE__)
135static void *
136_Malloc(size_t foo, int line)
137{
138
139 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
140 return (calloc(foo, 1));
141}
142#define Free(foo) free(foo)
143
144struct unrhdr;
145
146struct mtx {
147 int state;
148} unitmtx;
149
150static void
151mtx_lock(struct mtx *mp)
152{
153 KASSERT(mp->state == 0, ("mutex already locked"));
154 mp->state = 1;
155}
156
157static void
158mtx_unlock(struct mtx *mp)
159{
160 KASSERT(mp->state == 1, ("mutex not locked"));
161 mp->state = 0;
162}
163
164#define MA_OWNED 9
165
166static void
167mtx_assert(struct mtx *mp, int flag)
168{
169 if (flag == MA_OWNED) {
170 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
171 }
172}
173
174#define CTASSERT(foo)
175#define WITNESS_WARN(flags, lock, fmt, ...) (void)0
176
177#endif /* USERLAND */
178
179/*
180 * This is our basic building block.
181 *
182 * It can be used in three different ways depending on the value of the ptr
183 * element:
184 * If ptr is NULL, it represents a run of free items.
185 * If ptr points to the unrhdr it represents a run of allocated items.
186 * Otherwise it points to a bitstring of allocated items.
187 *
188 * For runs the len field is the length of the run.
189 * For bitmaps the len field represents the number of allocated items.
190 *
191 * The bitmap is the same size as struct unr to optimize memory management.
192 */
193struct unr {
194 TAILQ_ENTRY(unr) list;
195 u_int len;
196 void *ptr;
197};
198
199struct unrb {
200 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)];
201};
202
203CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
204
205/* Number of bits we can store in the bitmap */
206#define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
207
208/* Is the unrb empty in at least the first len bits? */
209static inline bool
210ub_empty(struct unrb *ub, int len) {
211 int first_set;
212
213 bit_ffs(ub->map, len, &first_set);
214 return (first_set == -1);
215}
216
217/* Is the unrb full? That is, is the number of set elements equal to len? */
218static inline bool
219ub_full(struct unrb *ub, int len)
220{
221 int first_clear;
222
223 bit_ffc(ub->map, len, &first_clear);
224 return (first_clear == -1);
225}
226
227#if defined(DIAGNOSTIC) || !defined(_KERNEL)
228/*
229 * Consistency check function.
230 *
231 * Checks the internal consistency as well as we can.
232 *
233 * Called at all boundaries of this API.
234 */
235static void
236check_unrhdr(struct unrhdr *uh, int line)
237{
238 struct unr *up;
239 struct unrb *ub;
240 int w;
241 u_int y, z;
242
243 y = uh->first;
244 z = 0;
245 TAILQ_FOREACH(up, &uh->head, list) {
246 z++;
247 if (up->ptr != uh && up->ptr != NULL) {
248 ub = up->ptr;
249 KASSERT (up->len <= NBITS,
250 ("UNR inconsistency: len %u max %zd (line %d)\n",
251 up->len, NBITS, line));
252 z++;
253 w = 0;
254 bit_count(ub->map, 0, up->len, &w);
255 y += w;
256 } else if (up->ptr != NULL)
257 y += up->len;
258 }
259 KASSERT (y == uh->busy,
260 ("UNR inconsistency: items %u found %u (line %d)\n",
261 uh->busy, y, line));
262 KASSERT (z == uh->alloc,
263 ("UNR inconsistency: chunks %u found %u (line %d)\n",
264 uh->alloc, z, line));
265}
266
267#else
268
269static __inline void
270check_unrhdr(struct unrhdr *uh __unused, int line __unused)
271{
272
273}
274
275#endif
276
277/*
278 * Userland memory management. Just use calloc and keep track of how
279 * many elements we have allocated for check_unrhdr().
280 */
281
282static __inline void *
283new_unr(struct unrhdr *uh, void **p1, void **p2)
284{
285 void *p;
286
287 uh->alloc++;
288 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
289 if (*p1 != NULL) {
290 p = *p1;
291 *p1 = NULL;
292 return (p);
293 } else {
294 p = *p2;
295 *p2 = NULL;
296 return (p);
297 }
298}
299
300static __inline void
301delete_unr(struct unrhdr *uh, void *ptr)
302{
303 struct unr *up;
304
305 uh->alloc--;
306 up = ptr;
307 TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
308}
309
310void
311clean_unrhdrl(struct unrhdr *uh)
312{
313 struct unr *up;
314
315 mtx_assert(uh->mtx, MA_OWNED);
316 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
317 TAILQ_REMOVE(&uh->ppfree, up, list);
318 mtx_unlock(uh->mtx);
319 Free(up);
320 mtx_lock(uh->mtx);
321 }
322
323}
324
325void
326clean_unrhdr(struct unrhdr *uh)
327{
328
329 mtx_lock(uh->mtx);
330 clean_unrhdrl(uh);
331 mtx_unlock(uh->mtx);
332}
333
334void
335init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
336{
337
338 KASSERT(low >= 0 && low <= high,
339 ("UNR: use error: new_unrhdr(%d, %d)", low, high));
340 if (mutex != NULL)
341 uh->mtx = mutex;
342 else
343 uh->mtx = &unitmtx;
344 TAILQ_INIT(&uh->head);
345 TAILQ_INIT(&uh->ppfree);
346 uh->low = low;
347 uh->high = high;
348 uh->first = 0;
349 uh->last = 1 + (high - low);
350 check_unrhdr(uh, __LINE__);
351}
352
353/*
354 * Allocate a new unrheader set.
355 *
356 * Highest and lowest valid values given as parameters.
357 */
358
359struct unrhdr *
360new_unrhdr(int low, int high, struct mtx *mutex)
361{
362 struct unrhdr *uh;
363
364 uh = Malloc(sizeof *uh);
365 init_unrhdr(uh, low, high, mutex);
366 return (uh);
367}
368
369void
370delete_unrhdr(struct unrhdr *uh)
371{
372
373 check_unrhdr(uh, __LINE__);
374 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
375 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
376 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
377 ("unrhdr has postponed item for free"));
378 Free(uh);
379}
380
381void
382clear_unrhdr(struct unrhdr *uh)
383{
384 struct unr *up, *uq;
385
386 KASSERT(TAILQ_EMPTY(&uh->ppfree),
387 ("unrhdr has postponed item for free"));
388 TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
389 if (up->ptr != uh) {
390 Free(up->ptr);
391 }
392 Free(up);
393 }
394 uh->busy = 0;
395 uh->alloc = 0;
396 init_unrhdr(uh, uh->low, uh->high, uh->mtx);
397
398 check_unrhdr(uh, __LINE__);
399}
400
401static __inline int
402is_bitmap(struct unrhdr *uh, struct unr *up)
403{
404 return (up->ptr != uh && up->ptr != NULL);
405}
406
407/*
408 * Look for sequence of items which can be combined into a bitmap, if
409 * multiple are present, take the one which saves most memory.
410 *
411 * Return (1) if a sequence was found to indicate that another call
412 * might be able to do more. Return (0) if we found no suitable sequence.
413 *
414 * NB: called from alloc_unr(), no new memory allocation allowed.
415 */
416static int
417optimize_unr(struct unrhdr *uh)
418{
419 struct unr *up, *uf, *us;
420 struct unrb *ub, *ubf;
421 u_int a, l, ba;
422
423 /*
424 * Look for the run of items (if any) which when collapsed into
425 * a bitmap would save most memory.
426 */
427 us = NULL;
428 ba = 0;
429 TAILQ_FOREACH(uf, &uh->head, list) {
430 if (uf->len >= NBITS)
431 continue;
432 a = 1;
433 if (is_bitmap(uh, uf))
434 a++;
435 l = uf->len;
436 up = uf;
437 while (1) {
438 up = TAILQ_NEXT(up, list);
439 if (up == NULL)
440 break;
441 if ((up->len + l) > NBITS)
442 break;
443 a++;
444 if (is_bitmap(uh, up))
445 a++;
446 l += up->len;
447 }
448 if (a > ba) {
449 ba = a;
450 us = uf;
451 }
452 }
453 if (ba < 3)
454 return (0);
455
456 /*
457 * If the first element is not a bitmap, make it one.
458 * Trying to do so without allocating more memory complicates things
459 * a bit
460 */
461 if (!is_bitmap(uh, us)) {
462 uf = TAILQ_NEXT(us, list);
463 TAILQ_REMOVE(&uh->head, us, list);
464 a = us->len;
465 l = us->ptr == uh ? 1 : 0;
466 ub = (void *)us;
467 bit_nclear(ub->map, 0, NBITS - 1);
468 if (l)
469 bit_nset(ub->map, 0, a);
470 if (!is_bitmap(uh, uf)) {
471 if (uf->ptr == NULL)
472 bit_nclear(ub->map, a, a + uf->len - 1);
473 else
474 bit_nset(ub->map, a, a + uf->len - 1);
475 uf->ptr = ub;
476 uf->len += a;
477 us = uf;
478 } else {
479 ubf = uf->ptr;
480 for (l = 0; l < uf->len; l++, a++) {
481 if (bit_test(ubf->map, l))
482 bit_set(ub->map, a);
483 else
484 bit_clear(ub->map, a);
485 }
486 uf->len = a;
487 delete_unr(uh, uf->ptr);
488 uf->ptr = ub;
489 us = uf;
490 }
491 }
492 ub = us->ptr;
493 while (1) {
494 uf = TAILQ_NEXT(us, list);
495 if (uf == NULL)
496 return (1);
497 if (uf->len + us->len > NBITS)
498 return (1);
499 if (uf->ptr == NULL) {
500 bit_nclear(ub->map, us->len, us->len + uf->len - 1);
501 us->len += uf->len;
502 TAILQ_REMOVE(&uh->head, uf, list);
503 delete_unr(uh, uf);
504 } else if (uf->ptr == uh) {
505 bit_nset(ub->map, us->len, us->len + uf->len - 1);
506 us->len += uf->len;
507 TAILQ_REMOVE(&uh->head, uf, list);
508 delete_unr(uh, uf);
509 } else {
510 ubf = uf->ptr;
511 for (l = 0; l < uf->len; l++, us->len++) {
512 if (bit_test(ubf->map, l))
513 bit_set(ub->map, us->len);
514 else
515 bit_clear(ub->map, us->len);
516 }
517 TAILQ_REMOVE(&uh->head, uf, list);
518 delete_unr(uh, ubf);
519 delete_unr(uh, uf);
520 }
521 }
522}
523
524/*
525 * See if a given unr should be collapsed with a neighbor.
526 *
527 * NB: called from alloc_unr(), no new memory allocation allowed.
528 */
529static void
530collapse_unr(struct unrhdr *uh, struct unr *up)
531{
532 struct unr *upp;
533 struct unrb *ub;
534
535 /* If bitmap is all set or clear, change it to runlength */
536 if (is_bitmap(uh, up)) {
537 ub = up->ptr;
538 if (ub_full(ub, up->len)) {
539 delete_unr(uh, up->ptr);
540 up->ptr = uh;
541 } else if (ub_empty(ub, up->len)) {
542 delete_unr(uh, up->ptr);
543 up->ptr = NULL;
544 }
545 }
546
547 /* If nothing left in runlength, delete it */
548 if (up->len == 0) {
549 upp = TAILQ_PREV(up, unrhd, list);
550 if (upp == NULL)
551 upp = TAILQ_NEXT(up, list);
552 TAILQ_REMOVE(&uh->head, up, list);
553 delete_unr(uh, up);
554 up = upp;
555 }
556
557 /* If we have "hot-spot" still, merge with neighbor if possible */
558 if (up != NULL) {
559 upp = TAILQ_PREV(up, unrhd, list);
560 if (upp != NULL && up->ptr == upp->ptr) {
561 up->len += upp->len;
562 TAILQ_REMOVE(&uh->head, upp, list);
563 delete_unr(uh, upp);
564 }
565 upp = TAILQ_NEXT(up, list);
566 if (upp != NULL && up->ptr == upp->ptr) {
567 up->len += upp->len;
568 TAILQ_REMOVE(&uh->head, upp, list);
569 delete_unr(uh, upp);
570 }
571 }
572
573 /* Merge into ->first if possible */
574 upp = TAILQ_FIRST(&uh->head);
575 if (upp != NULL && upp->ptr == uh) {
576 uh->first += upp->len;
577 TAILQ_REMOVE(&uh->head, upp, list);
578 delete_unr(uh, upp);
579 if (up == upp)
580 up = NULL;
581 }
582
583 /* Merge into ->last if possible */
584 upp = TAILQ_LAST(&uh->head, unrhd);
585 if (upp != NULL && upp->ptr == NULL) {
586 uh->last += upp->len;
587 TAILQ_REMOVE(&uh->head, upp, list);
588 delete_unr(uh, upp);
589 if (up == upp)
590 up = NULL;
591 }
592
593 /* Try to make bitmaps */
594 while (optimize_unr(uh))
595 continue;
596}
597
598/*
599 * Allocate a free unr.
600 */
601int
602alloc_unrl(struct unrhdr *uh)
603{
604 struct unr *up;
605 struct unrb *ub;
606 u_int x;
607 int y;
608
609 mtx_assert(uh->mtx, MA_OWNED);
610 check_unrhdr(uh, __LINE__);
611 x = uh->low + uh->first;
612
613 up = TAILQ_FIRST(&uh->head);
614
615 /*
616 * If we have an ideal split, just adjust the first+last
617 */
618 if (up == NULL && uh->last > 0) {
619 uh->first++;
620 uh->last--;
621 uh->busy++;
622 return (x);
623 }
624
625 /*
626 * We can always allocate from the first list element, so if we have
627 * nothing on the list, we must have run out of unit numbers.
628 */
629 if (up == NULL)
630 return (-1);
631
632 KASSERT(up->ptr != uh, ("UNR first element is allocated"));
633
634 if (up->ptr == NULL) { /* free run */
635 uh->first++;
636 up->len--;
637 } else { /* bitmap */
638 ub = up->ptr;
639 bit_ffc(ub->map, up->len, &y);
640 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
641 bit_set(ub->map, y);
642 x += y;
643 }
644 uh->busy++;
645 collapse_unr(uh, up);
646 return (x);
647}
648
649int
650alloc_unr(struct unrhdr *uh)
651{
652 int i;
653
654 mtx_lock(uh->mtx);
655 i = alloc_unrl(uh);
656 clean_unrhdrl(uh);
657 mtx_unlock(uh->mtx);
658 return (i);
659}
660
661static int
662alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
663{
664 struct unr *up, *upn;
665 struct unrb *ub;
666 u_int i, last, tl;
667
668 mtx_assert(uh->mtx, MA_OWNED);
669
670 if (item < uh->low + uh->first || item > uh->high)
671 return (-1);
672
673 up = TAILQ_FIRST(&uh->head);
674 /* Ideal split. */
675 if (up == NULL && item - uh->low == uh->first) {
676 uh->first++;
677 uh->last--;
678 uh->busy++;
679 check_unrhdr(uh, __LINE__);
680 return (item);
681 }
682
683 i = item - uh->low - uh->first;
684
685 if (up == NULL) {
686 up = new_unr(uh, p1, p2);
687 up->ptr = NULL;
688 up->len = i;
689 TAILQ_INSERT_TAIL(&uh->head, up, list);
690 up = new_unr(uh, p1, p2);
691 up->ptr = uh;
692 up->len = 1;
693 TAILQ_INSERT_TAIL(&uh->head, up, list);
694 uh->last = uh->high - uh->low - i;
695 uh->busy++;
696 check_unrhdr(uh, __LINE__);
697 return (item);
698 } else {
699 /* Find the item which contains the unit we want to allocate. */
700 TAILQ_FOREACH(up, &uh->head, list) {
701 if (up->len > i)
702 break;
703 i -= up->len;
704 }
705 }
706
707 if (up == NULL) {
708 if (i > 0) {
709 up = new_unr(uh, p1, p2);
710 up->ptr = NULL;
711 up->len = i;
712 TAILQ_INSERT_TAIL(&uh->head, up, list);
713 }
714 up = new_unr(uh, p1, p2);
715 up->ptr = uh;
716 up->len = 1;
717 TAILQ_INSERT_TAIL(&uh->head, up, list);
718 goto done;
719 }
720
721 if (is_bitmap(uh, up)) {
722 ub = up->ptr;
723 if (bit_test(ub->map, i) == 0) {
724 bit_set(ub->map, i);
725 goto done;
726 } else
727 return (-1);
728 } else if (up->ptr == uh)
729 return (-1);
730
731 KASSERT(up->ptr == NULL,
732 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
733
734 /* Split off the tail end, if any. */
735 tl = up->len - (1 + i);
736 if (tl > 0) {
737 upn = new_unr(uh, p1, p2);
738 upn->ptr = NULL;
739 upn->len = tl;
740 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
741 }
742
743 /* Split off head end, if any */
744 if (i > 0) {
745 upn = new_unr(uh, p1, p2);
746 upn->len = i;
747 upn->ptr = NULL;
748 TAILQ_INSERT_BEFORE(up, upn, list);
749 }
750 up->len = 1;
751 up->ptr = uh;
752
753done:
754 last = uh->high - uh->low - (item - uh->low);
755 if (uh->last > last)
756 uh->last = last;
757 uh->busy++;
758 collapse_unr(uh, up);
759 check_unrhdr(uh, __LINE__);
760 return (item);
761}
762
763int
764alloc_unr_specific(struct unrhdr *uh, u_int item)
765{
766 void *p1, *p2;
767 int i;
768
769 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
770
771 p1 = Malloc(sizeof(struct unr));
772 p2 = Malloc(sizeof(struct unr));
773
774 mtx_lock(uh->mtx);
775 i = alloc_unr_specificl(uh, item, &p1, &p2);
776 mtx_unlock(uh->mtx);
777
778 if (p1 != NULL)
779 Free(p1);
780 if (p2 != NULL)
781 Free(p2);
782
783 return (i);
784}
785
786/*
787 * Free a unr.
788 *
789 * If we can save unrs by using a bitmap, do so.
790 */
791static void
792free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
793{
794 struct unr *up, *upp, *upn;
795 struct unrb *ub;
796 u_int pl;
797
798 KASSERT(item >= uh->low && item <= uh->high,
799 ("UNR: free_unr(%u) out of range [%u...%u]",
800 item, uh->low, uh->high));
801 check_unrhdr(uh, __LINE__);
802 item -= uh->low;
803 upp = TAILQ_FIRST(&uh->head);
804 /*
805 * Freeing in the ideal split case
806 */
807 if (item + 1 == uh->first && upp == NULL) {
808 uh->last++;
809 uh->first--;
810 uh->busy--;
811 check_unrhdr(uh, __LINE__);
812 return;
813 }
814 /*
815 * Freeing in the ->first section. Create a run starting at the
816 * freed item. The code below will subdivide it.
817 */
818 if (item < uh->first) {
819 up = new_unr(uh, p1, p2);
820 up->ptr = uh;
821 up->len = uh->first - item;
822 TAILQ_INSERT_HEAD(&uh->head, up, list);
823 uh->first -= up->len;
824 }
825
826 item -= uh->first;
827
828 /* Find the item which contains the unit we want to free */
829 TAILQ_FOREACH(up, &uh->head, list) {
830 if (up->len > item)
831 break;
832 item -= up->len;
833 }
834
835 /* Handle bitmap items */
836 if (is_bitmap(uh, up)) {
837 ub = up->ptr;
838
839 KASSERT(bit_test(ub->map, item) != 0,
840 ("UNR: Freeing free item %d (bitmap)\n", item));
841 bit_clear(ub->map, item);
842 uh->busy--;
843 collapse_unr(uh, up);
844 return;
845 }
846
847 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
848
849 /* Just this one left, reap it */
850 if (up->len == 1) {
851 up->ptr = NULL;
852 uh->busy--;
853 collapse_unr(uh, up);
854 return;
855 }
856
857 /* Check if we can shift the item into the previous 'free' run */
858 upp = TAILQ_PREV(up, unrhd, list);
859 if (item == 0 && upp != NULL && upp->ptr == NULL) {
860 upp->len++;
861 up->len--;
862 uh->busy--;
863 collapse_unr(uh, up);
864 return;
865 }
866
867 /* Check if we can shift the item to the next 'free' run */
868 upn = TAILQ_NEXT(up, list);
869 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
870 upn->len++;
871 up->len--;
872 uh->busy--;
873 collapse_unr(uh, up);
874 return;
875 }
876
877 /* Split off the tail end, if any. */
878 pl = up->len - (1 + item);
879 if (pl > 0) {
880 upp = new_unr(uh, p1, p2);
881 upp->ptr = uh;
882 upp->len = pl;
883 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
884 }
885
886 /* Split off head end, if any */
887 if (item > 0) {
888 upp = new_unr(uh, p1, p2);
889 upp->len = item;
890 upp->ptr = uh;
891 TAILQ_INSERT_BEFORE(up, upp, list);
892 }
893 up->len = 1;
894 up->ptr = NULL;
895 uh->busy--;
896 collapse_unr(uh, up);
897}
898
899void
900free_unr(struct unrhdr *uh, u_int item)
901{
902 void *p1, *p2;
903
904 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
905 p1 = Malloc(sizeof(struct unr));
906 p2 = Malloc(sizeof(struct unr));
907 mtx_lock(uh->mtx);
908 free_unrl(uh, item, &p1, &p2);
909 clean_unrhdrl(uh);
910 mtx_unlock(uh->mtx);
911 if (p1 != NULL)
912 Free(p1);
913 if (p2 != NULL)
914 Free(p2);
915}
916
917#ifndef _KERNEL /* USERLAND test driver */
918
919/*
920 * Simple stochastic test driver for the above functions. The code resides
921 * here so that it can access static functions and structures.
922 */
923
924static bool verbose;
925#define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);}
926
927static void
928print_unr(struct unrhdr *uh, struct unr *up)
929{
930 u_int x;
931 struct unrb *ub;
932
933 printf(" %p len = %5u ", up, up->len);
934 if (up->ptr == NULL)
935 printf("free\n");
936 else if (up->ptr == uh)
937 printf("alloc\n");
938 else {
939 ub = up->ptr;
940 printf("bitmap [");
941 for (x = 0; x < up->len; x++) {
942 if (bit_test(ub->map, x))
943 printf("#");
944 else
945 printf(" ");
946 }
947 printf("]\n");
948 }
949}
950
951static void
952print_unrhdr(struct unrhdr *uh)
953{
954 struct unr *up;
955 u_int x;
956
957 printf(
958 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
959 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
960 x = uh->low + uh->first;
961 TAILQ_FOREACH(up, &uh->head, list) {
962 printf(" from = %5u", x);
963 print_unr(uh, up);
964 if (up->ptr == NULL || up->ptr == uh)
965 x += up->len;
966 else
967 x += NBITS;
968 }
969}
970
971static void
972test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
973{
974 int j;
975
976 if (a[i]) {
977 VPRINTF("F %u\n", i);
978 free_unr(uh, i);
979 a[i] = 0;
980 } else {
981 no_alloc = 1;
982 j = alloc_unr(uh);
983 if (j != -1) {
984 a[j] = 1;
985 VPRINTF("A %d\n", j);
986 }
987 no_alloc = 0;
988 }
989}
990
991static void
992test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
993{
994 int j;
995
996 j = alloc_unr_specific(uh, i);
997 if (j == -1) {
998 VPRINTF("F %u\n", i);
999 a[i] = 0;
1000 free_unr(uh, i);
1001 } else {
1002 a[i] = 1;
1003 VPRINTF("A %d\n", j);
1004 }
1005}
1006
1007static void
1008usage(char** argv)
1009{
1010 printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
1011}
1012
1013int
1014main(int argc, char **argv)
1015{
1016 struct unrhdr *uh;
1017 char *a;
1018 long count = 10000; /* Number of unrs to test */
1019 long reps = 1, m;
1020 int ch;
1021 u_int i;
1022
1023 verbose = false;
1024
1025 while ((ch = getopt(argc, argv, "hr:v")) != -1) {
1026 switch (ch) {
1027 case 'r':
1028 errno = 0;
1029 reps = strtol(optarg, NULL, 0);
1030 if (errno == ERANGE || errno == EINVAL) {
1031 usage(argv);
1032 exit(2);
1033 }
1034
1035 break;
1036 case 'v':
1037 verbose = true;
1038 break;
1039 case 'h':
1040 default:
1041 usage(argv);
1042 exit(2);
1043 }
1044 }
1045
1046 setbuf(stdout, NULL);
1047 uh = new_unrhdr(0, count - 1, NULL);
1048 print_unrhdr(uh);
1049
1050 a = calloc(count, sizeof(char));
1051 if (a == NULL)
1052 err(1, "calloc failed");
1053
1054 printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1055 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1056 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1057 printf("NBITS %lu\n", (unsigned long)NBITS);
1058 for (m = 0; m < count * reps; m++) {
1059 i = arc4random_uniform(count);
1060#if 0
1061 if (a[i] && (j & 1))
1062 continue;
1063#endif
1064 if ((arc4random() & 1) != 0)
1065 test_alloc_unr(uh, i, a);
1066 else
1067 test_alloc_unr_specific(uh, i, a);
1068
1069 if (verbose)
1070 print_unrhdr(uh);
1071 check_unrhdr(uh, __LINE__);
1072 }
1073 for (i = 0; i < (u_int)count; i++) {
1074 if (a[i]) {
1075 if (verbose) {
1076 printf("C %u\n", i);
1077 print_unrhdr(uh);
1078 }
1079 free_unr(uh, i);
1080 }
1081 }
1082 print_unrhdr(uh);
1083 delete_unrhdr(uh);
1084 free(a);
1085 return (0);
1086}
1087#endif
int * count
Definition: cpufreq_if.m:63
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
Definition: subr_unit.c:193
bitstr_t map[sizeof(struct unr)/sizeof(bitstr_t)]
Definition: subr_unit.c:200
int printf(const char *fmt,...)
Definition: subr_prf.c:397
static void collapse_unr(struct unrhdr *uh, struct unr *up)
Definition: subr_unit.c:530
#define Free(foo)
Definition: subr_unit.c:95
MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF)
void clean_unrhdrl(struct unrhdr *uh)
Definition: subr_unit.c:311
void clear_unrhdr(struct unrhdr *uh)
Definition: subr_unit.c:382
static int optimize_unr(struct unrhdr *uh)
Definition: subr_unit.c:417
static struct mtx unitmtx
Definition: subr_unit.c:97
static void free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
Definition: subr_unit.c:792
static int alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
Definition: subr_unit.c:662
static bool ub_empty(struct unrb *ub, int len)
Definition: subr_unit.c:210
void clean_unrhdr(struct unrhdr *uh)
Definition: subr_unit.c:326
static __inline void delete_unr(struct unrhdr *uh, void *ptr)
Definition: subr_unit.c:301
#define Malloc(foo)
Definition: subr_unit.c:94
int alloc_unrl(struct unrhdr *uh)
Definition: subr_unit.c:602
#define NBITS
Definition: subr_unit.c:206
int alloc_unr(struct unrhdr *uh)
Definition: subr_unit.c:650
void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
Definition: subr_unit.c:335
void delete_unrhdr(struct unrhdr *uh)
Definition: subr_unit.c:370
int alloc_unr_specific(struct unrhdr *uh, u_int item)
Definition: subr_unit.c:764
static __inline int is_bitmap(struct unrhdr *uh, struct unr *up)
Definition: subr_unit.c:402
static __inline void * new_unr(struct unrhdr *uh, void **p1, void **p2)
Definition: subr_unit.c:283
struct unrhdr * new_unrhdr(int low, int high, struct mtx *mutex)
Definition: subr_unit.c:360
void free_unr(struct unrhdr *uh, u_int item)
Definition: subr_unit.c:900
static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation")
static bool ub_full(struct unrb *ub, int len)
Definition: subr_unit.c:219
CTASSERT((sizeof(struct unr) % sizeof(bitstr_t))==0)
static __inline void check_unrhdr(struct unrhdr *uh __unused, int line __unused)
Definition: subr_unit.c:270
struct mtx mtx
Definition: uipc_ktls.c:0
int flag