FreeBSD kernel kern code
subr_rman.c
Go to the documentation of this file.
1/*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58#include "opt_ddb.h"
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD$");
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/limits.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mutex.h>
70#include <sys/bus.h> /* XXX debugging */
71#include <machine/bus.h>
72#include <sys/rman.h>
73#include <sys/sysctl.h>
74
75#ifdef DDB
76#include <ddb/ddb.h>
77#endif
78
79/*
80 * We use a linked list rather than a bitmap because we need to be able to
81 * represent potentially huge objects (like all of a processor's physical
82 * address space).
83 */
84struct resource_i {
85 struct resource r_r;
86 TAILQ_ENTRY(resource_i) r_link;
87 LIST_ENTRY(resource_i) r_sharelink;
88 LIST_HEAD(, resource_i) *r_sharehead;
89 rman_res_t r_start; /* index of the first entry in this resource */
90 rman_res_t r_end; /* index of the last entry (inclusive) */
91 u_int r_flags;
92 void *r_virtual; /* virtual address of this resource */
93 void *r_irq_cookie; /* interrupt cookie for this (interrupt) resource */
94 device_t r_dev; /* device which has allocated this resource */
95 struct rman *r_rm; /* resource manager from whence this came */
96 int r_rid; /* optional rid for this resource. */
97};
98
99static int rman_debug = 0;
100SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
101 &rman_debug, 0, "rman debug");
102
103#define DPRINTF(params) if (rman_debug) printf params
104
105static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
106
108static struct mtx rman_mtx; /* mutex to protect rman_head */
109static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
110
111static __inline struct resource_i *
112int_alloc_resource(int malloc_flag)
113{
114 struct resource_i *r;
115
116 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
117 if (r != NULL) {
118 r->r_r.__r_i = r;
119 }
120 return (r);
121}
122
123int
124rman_init(struct rman *rm)
125{
126 static int once = 0;
127
128 if (once == 0) {
129 once = 1;
130 TAILQ_INIT(&rman_head);
131 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
132 }
133
134 if (rm->rm_start == 0 && rm->rm_end == 0)
135 rm->rm_end = ~0;
136 if (rm->rm_type == RMAN_UNINIT)
137 panic("rman_init");
138 if (rm->rm_type == RMAN_GAUGE)
139 panic("implement RMAN_GAUGE");
140
141 TAILQ_INIT(&rm->rm_list);
142 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
143 if (rm->rm_mtx == NULL)
144 return ENOMEM;
145 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
146
147 mtx_lock(&rman_mtx);
148 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
149 mtx_unlock(&rman_mtx);
150 return 0;
151}
152
153int
154rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
155{
156 struct resource_i *r, *s, *t;
157 int rv = 0;
158
159 DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
160 rm->rm_descr, start, end));
161 if (start < rm->rm_start || end > rm->rm_end)
162 return EINVAL;
163 r = int_alloc_resource(M_NOWAIT);
164 if (r == NULL)
165 return ENOMEM;
166 r->r_start = start;
167 r->r_end = end;
168 r->r_rm = rm;
169
170 mtx_lock(rm->rm_mtx);
171
172 /* Skip entries before us. */
173 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
174 if (s->r_end == ~0)
175 break;
176 if (s->r_end + 1 >= r->r_start)
177 break;
178 }
179
180 /* If we ran off the end of the list, insert at the tail. */
181 if (s == NULL) {
182 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
183 } else {
184 /* Check for any overlap with the current region. */
185 if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
186 rv = EBUSY;
187 goto out;
188 }
189
190 /* Check for any overlap with the next region. */
191 t = TAILQ_NEXT(s, r_link);
192 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
193 rv = EBUSY;
194 goto out;
195 }
196
197 /*
198 * See if this region can be merged with the next region. If
199 * not, clear the pointer.
200 */
201 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
202 t = NULL;
203
204 /* See if we can merge with the current region. */
205 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
206 /* Can we merge all 3 regions? */
207 if (t != NULL) {
208 s->r_end = t->r_end;
209 TAILQ_REMOVE(&rm->rm_list, t, r_link);
210 free(r, M_RMAN);
211 free(t, M_RMAN);
212 } else {
213 s->r_end = r->r_end;
214 free(r, M_RMAN);
215 }
216 } else if (t != NULL) {
217 /* Can we merge with just the next region? */
218 t->r_start = r->r_start;
219 free(r, M_RMAN);
220 } else if (s->r_end < r->r_start) {
221 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
222 } else {
223 TAILQ_INSERT_BEFORE(s, r, r_link);
224 }
225 }
226out:
227 mtx_unlock(rm->rm_mtx);
228 return rv;
229}
230
231int
232rman_init_from_resource(struct rman *rm, struct resource *r)
233{
234 int rv;
235
236 if ((rv = rman_init(rm)) != 0)
237 return (rv);
238 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
239}
240
241int
242rman_fini(struct rman *rm)
243{
244 struct resource_i *r;
245
246 mtx_lock(rm->rm_mtx);
247 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
248 if (r->r_flags & RF_ALLOCATED) {
249 mtx_unlock(rm->rm_mtx);
250 return EBUSY;
251 }
252 }
253
254 /*
255 * There really should only be one of these if we are in this
256 * state and the code is working properly, but it can't hurt.
257 */
258 while (!TAILQ_EMPTY(&rm->rm_list)) {
259 r = TAILQ_FIRST(&rm->rm_list);
260 TAILQ_REMOVE(&rm->rm_list, r, r_link);
261 free(r, M_RMAN);
262 }
263 mtx_unlock(rm->rm_mtx);
264 mtx_lock(&rman_mtx);
265 TAILQ_REMOVE(&rman_head, rm, rm_link);
266 mtx_unlock(&rman_mtx);
267 mtx_destroy(rm->rm_mtx);
268 free(rm->rm_mtx, M_RMAN);
269
270 return 0;
271}
272
273int
274rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
275{
276 struct resource_i *r;
277
278 mtx_lock(rm->rm_mtx);
279 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
280 if (!(r->r_flags & RF_ALLOCATED)) {
281 *start = r->r_start;
282 *end = r->r_end;
283 mtx_unlock(rm->rm_mtx);
284 return (0);
285 }
286 }
287 mtx_unlock(rm->rm_mtx);
288 return (ENOENT);
289}
290
291int
292rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
293{
294 struct resource_i *r;
295
296 mtx_lock(rm->rm_mtx);
297 TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
298 if (!(r->r_flags & RF_ALLOCATED)) {
299 *start = r->r_start;
300 *end = r->r_end;
301 mtx_unlock(rm->rm_mtx);
302 return (0);
303 }
304 }
305 mtx_unlock(rm->rm_mtx);
306 return (ENOENT);
307}
308
309/* Shrink or extend one or both ends of an allocated resource. */
310int
311rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
312{
313 struct resource_i *r, *s, *t, *new;
314 struct rman *rm;
315
316 /* Not supported for shared resources. */
317 r = rr->__r_i;
318 if (r->r_flags & RF_SHAREABLE)
319 return (EINVAL);
320
321 /*
322 * This does not support wholesale moving of a resource. At
323 * least part of the desired new range must overlap with the
324 * existing resource.
325 */
326 if (end < r->r_start || r->r_end < start)
327 return (EINVAL);
328
329 /*
330 * Find the two resource regions immediately adjacent to the
331 * allocated resource.
332 */
333 rm = r->r_rm;
334 mtx_lock(rm->rm_mtx);
335#ifdef INVARIANTS
336 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
337 if (s == r)
338 break;
339 }
340 if (s == NULL)
341 panic("resource not in list");
342#endif
343 s = TAILQ_PREV(r, resource_head, r_link);
344 t = TAILQ_NEXT(r, r_link);
345 KASSERT(s == NULL || s->r_end + 1 == r->r_start,
346 ("prev resource mismatch"));
347 KASSERT(t == NULL || r->r_end + 1 == t->r_start,
348 ("next resource mismatch"));
349
350 /*
351 * See if the changes are permitted. Shrinking is always allowed,
352 * but growing requires sufficient room in the adjacent region.
353 */
354 if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
355 s->r_start > start)) {
356 mtx_unlock(rm->rm_mtx);
357 return (EBUSY);
358 }
359 if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
360 t->r_end < end)) {
361 mtx_unlock(rm->rm_mtx);
362 return (EBUSY);
363 }
364
365 /*
366 * While holding the lock, grow either end of the resource as
367 * needed and shrink either end if the shrinking does not require
368 * allocating a new resource. We can safely drop the lock and then
369 * insert a new range to handle the shrinking case afterwards.
370 */
371 if (start < r->r_start ||
372 (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
373 KASSERT(s->r_flags == 0, ("prev is busy"));
374 r->r_start = start;
375 if (s->r_start == start) {
376 TAILQ_REMOVE(&rm->rm_list, s, r_link);
377 free(s, M_RMAN);
378 } else
379 s->r_end = start - 1;
380 }
381 if (end > r->r_end ||
382 (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
383 KASSERT(t->r_flags == 0, ("next is busy"));
384 r->r_end = end;
385 if (t->r_end == end) {
386 TAILQ_REMOVE(&rm->rm_list, t, r_link);
387 free(t, M_RMAN);
388 } else
389 t->r_start = end + 1;
390 }
391 mtx_unlock(rm->rm_mtx);
392
393 /*
394 * Handle the shrinking cases that require allocating a new
395 * resource to hold the newly-free region. We have to recheck
396 * if we still need this new region after acquiring the lock.
397 */
398 if (start > r->r_start) {
399 new = int_alloc_resource(M_WAITOK);
400 new->r_start = r->r_start;
401 new->r_end = start - 1;
402 new->r_rm = rm;
403 mtx_lock(rm->rm_mtx);
404 r->r_start = start;
405 s = TAILQ_PREV(r, resource_head, r_link);
406 if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
407 s->r_end = start - 1;
408 free(new, M_RMAN);
409 } else
410 TAILQ_INSERT_BEFORE(r, new, r_link);
411 mtx_unlock(rm->rm_mtx);
412 }
413 if (end < r->r_end) {
414 new = int_alloc_resource(M_WAITOK);
415 new->r_start = end + 1;
416 new->r_end = r->r_end;
417 new->r_rm = rm;
418 mtx_lock(rm->rm_mtx);
419 r->r_end = end;
420 t = TAILQ_NEXT(r, r_link);
421 if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
422 t->r_start = end + 1;
423 free(new, M_RMAN);
424 } else
425 TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
426 mtx_unlock(rm->rm_mtx);
427 }
428 return (0);
429}
430
431#define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE))
432
433struct resource *
434rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
435 rman_res_t count, rman_res_t bound, u_int flags,
436 device_t dev)
437{
438 u_int new_rflags;
439 struct resource_i *r, *s, *rv;
440 rman_res_t rstart, rend, amask, bmask;
441
442 rv = NULL;
443
444 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
445 "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
446 count, flags,
447 dev == NULL ? "<null>" : device_get_nameunit(dev)));
448 KASSERT(count != 0, ("%s: attempted to allocate an empty range",
449 __func__));
450 KASSERT((flags & RF_FIRSTSHARE) == 0,
451 ("invalid flags %#x", flags));
452 new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
453
454 mtx_lock(rm->rm_mtx);
455
456 r = TAILQ_FIRST(&rm->rm_list);
457 if (r == NULL) {
458 DPRINTF(("NULL list head\n"));
459 } else {
460 DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
461 r->r_end, start, count-1));
462 }
463 for (r = TAILQ_FIRST(&rm->rm_list);
464 r && r->r_end < start + count - 1;
465 r = TAILQ_NEXT(r, r_link)) {
466 ;
467 DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
468 r->r_end, start, count-1));
469 }
470
471 if (r == NULL) {
472 DPRINTF(("could not find a region\n"));
473 goto out;
474 }
475
476 amask = (1ull << RF_ALIGNMENT(flags)) - 1;
477 KASSERT(start <= RM_MAX_END - amask,
478 ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
479
480 /* If bound is 0, bmask will also be 0 */
481 bmask = ~(bound - 1);
482 /*
483 * First try to find an acceptable totally-unshared region.
484 */
485 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
486 DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
487 /*
488 * The resource list is sorted, so there is no point in
489 * searching further once r_start is too large.
490 */
491 if (s->r_start > end - (count - 1)) {
492 DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
493 s->r_start, end));
494 break;
495 }
496 if (s->r_start > RM_MAX_END - amask) {
497 DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
498 s->r_start, amask));
499 break;
500 }
501 if (s->r_flags & RF_ALLOCATED) {
502 DPRINTF(("region is allocated\n"));
503 continue;
504 }
505 rstart = ummax(s->r_start, start);
506 /*
507 * Try to find a region by adjusting to boundary and alignment
508 * until both conditions are satisfied. This is not an optimal
509 * algorithm, but in most cases it isn't really bad, either.
510 */
511 do {
512 rstart = (rstart + amask) & ~amask;
513 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
514 rstart += bound - (rstart & ~bmask);
515 } while ((rstart & amask) != 0 && rstart < end &&
516 rstart < s->r_end);
517 rend = ummin(s->r_end, ummax(rstart + count - 1, end));
518 if (rstart > rend) {
519 DPRINTF(("adjusted start exceeds end\n"));
520 continue;
521 }
522 DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
523 rstart, rend, (rend - rstart + 1), count));
524
525 if ((rend - rstart) >= (count - 1)) {
526 DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
527 rstart, rend, (rend - rstart + 1)));
528 if ((s->r_end - s->r_start + 1) == count) {
529 DPRINTF(("candidate region is entire chunk\n"));
530 rv = s;
531 rv->r_flags = new_rflags;
532 rv->r_dev = dev;
533 goto out;
534 }
535
536 /*
537 * If s->r_start < rstart and
538 * s->r_end > rstart + count - 1, then
539 * we need to split the region into three pieces
540 * (the middle one will get returned to the user).
541 * Otherwise, we are allocating at either the
542 * beginning or the end of s, so we only need to
543 * split it in two. The first case requires
544 * two new allocations; the second requires but one.
545 */
546 rv = int_alloc_resource(M_NOWAIT);
547 if (rv == NULL)
548 goto out;
549 rv->r_start = rstart;
550 rv->r_end = rstart + count - 1;
551 rv->r_flags = new_rflags;
552 rv->r_dev = dev;
553 rv->r_rm = rm;
554
555 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
556 DPRINTF(("splitting region in three parts: "
557 "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
558 s->r_start, rv->r_start - 1,
559 rv->r_start, rv->r_end,
560 rv->r_end + 1, s->r_end));
561 /*
562 * We are allocating in the middle.
563 */
564 r = int_alloc_resource(M_NOWAIT);
565 if (r == NULL) {
566 free(rv, M_RMAN);
567 rv = NULL;
568 goto out;
569 }
570 r->r_start = rv->r_end + 1;
571 r->r_end = s->r_end;
572 r->r_flags = s->r_flags;
573 r->r_rm = rm;
574 s->r_end = rv->r_start - 1;
575 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
576 r_link);
577 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
578 r_link);
579 } else if (s->r_start == rv->r_start) {
580 DPRINTF(("allocating from the beginning\n"));
581 /*
582 * We are allocating at the beginning.
583 */
584 s->r_start = rv->r_end + 1;
585 TAILQ_INSERT_BEFORE(s, rv, r_link);
586 } else {
587 DPRINTF(("allocating at the end\n"));
588 /*
589 * We are allocating at the end.
590 */
591 s->r_end = rv->r_start - 1;
592 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
593 r_link);
594 }
595 goto out;
596 }
597 }
598
599 /*
600 * Now find an acceptable shared region, if the client's requirements
601 * allow sharing. By our implementation restriction, a candidate
602 * region must match exactly by both size and sharing type in order
603 * to be considered compatible with the client's request. (The
604 * former restriction could probably be lifted without too much
605 * additional work, but this does not seem warranted.)
606 */
607 DPRINTF(("no unshared regions found\n"));
608 if ((flags & RF_SHAREABLE) == 0)
609 goto out;
610
611 for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
612 if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
613 s->r_start >= start &&
614 (s->r_end - s->r_start + 1) == count &&
615 (s->r_start & amask) == 0 &&
616 ((s->r_start ^ s->r_end) & bmask) == 0) {
617 rv = int_alloc_resource(M_NOWAIT);
618 if (rv == NULL)
619 goto out;
620 rv->r_start = s->r_start;
621 rv->r_end = s->r_end;
622 rv->r_flags = new_rflags;
623 rv->r_dev = dev;
624 rv->r_rm = rm;
625 if (s->r_sharehead == NULL) {
626 s->r_sharehead = malloc(sizeof *s->r_sharehead,
627 M_RMAN, M_NOWAIT | M_ZERO);
628 if (s->r_sharehead == NULL) {
629 free(rv, M_RMAN);
630 rv = NULL;
631 goto out;
632 }
633 LIST_INIT(s->r_sharehead);
634 LIST_INSERT_HEAD(s->r_sharehead, s,
635 r_sharelink);
636 s->r_flags |= RF_FIRSTSHARE;
637 }
638 rv->r_sharehead = s->r_sharehead;
639 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
640 goto out;
641 }
642 }
643 /*
644 * We couldn't find anything.
645 */
646
647out:
648 mtx_unlock(rm->rm_mtx);
649 return (rv == NULL ? NULL : &rv->r_r);
650}
651
652struct resource *
653rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
654 rman_res_t count, u_int flags, device_t dev)
655{
656
657 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
658 dev));
659}
660
661int
662rman_activate_resource(struct resource *re)
663{
664 struct resource_i *r;
665 struct rman *rm;
666
667 r = re->__r_i;
668 rm = r->r_rm;
669 mtx_lock(rm->rm_mtx);
670 r->r_flags |= RF_ACTIVE;
671 mtx_unlock(rm->rm_mtx);
672 return 0;
673}
674
675int
676rman_deactivate_resource(struct resource *r)
677{
678 struct rman *rm;
679
680 rm = r->__r_i->r_rm;
681 mtx_lock(rm->rm_mtx);
682 r->__r_i->r_flags &= ~RF_ACTIVE;
683 mtx_unlock(rm->rm_mtx);
684 return 0;
685}
686
687static int
688int_rman_release_resource(struct rman *rm, struct resource_i *r)
689{
690 struct resource_i *s, *t;
691
692 if (r->r_flags & RF_ACTIVE)
693 r->r_flags &= ~RF_ACTIVE;
694
695 /*
696 * Check for a sharing list first. If there is one, then we don't
697 * have to think as hard.
698 */
699 if (r->r_sharehead) {
700 /*
701 * If a sharing list exists, then we know there are at
702 * least two sharers.
703 *
704 * If we are in the main circleq, appoint someone else.
705 */
706 LIST_REMOVE(r, r_sharelink);
707 s = LIST_FIRST(r->r_sharehead);
708 if (r->r_flags & RF_FIRSTSHARE) {
709 s->r_flags |= RF_FIRSTSHARE;
710 TAILQ_INSERT_BEFORE(r, s, r_link);
711 TAILQ_REMOVE(&rm->rm_list, r, r_link);
712 }
713
714 /*
715 * Make sure that the sharing list goes away completely
716 * if the resource is no longer being shared at all.
717 */
718 if (LIST_NEXT(s, r_sharelink) == NULL) {
719 free(s->r_sharehead, M_RMAN);
720 s->r_sharehead = NULL;
721 s->r_flags &= ~RF_FIRSTSHARE;
722 }
723 goto out;
724 }
725
726 /*
727 * Look at the adjacent resources in the list and see if our
728 * segment can be merged with any of them. If either of the
729 * resources is allocated or is not exactly adjacent then they
730 * cannot be merged with our segment.
731 */
732 s = TAILQ_PREV(r, resource_head, r_link);
733 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
734 s->r_end + 1 != r->r_start))
735 s = NULL;
736 t = TAILQ_NEXT(r, r_link);
737 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
738 r->r_end + 1 != t->r_start))
739 t = NULL;
740
741 if (s != NULL && t != NULL) {
742 /*
743 * Merge all three segments.
744 */
745 s->r_end = t->r_end;
746 TAILQ_REMOVE(&rm->rm_list, r, r_link);
747 TAILQ_REMOVE(&rm->rm_list, t, r_link);
748 free(t, M_RMAN);
749 } else if (s != NULL) {
750 /*
751 * Merge previous segment with ours.
752 */
753 s->r_end = r->r_end;
754 TAILQ_REMOVE(&rm->rm_list, r, r_link);
755 } else if (t != NULL) {
756 /*
757 * Merge next segment with ours.
758 */
759 t->r_start = r->r_start;
760 TAILQ_REMOVE(&rm->rm_list, r, r_link);
761 } else {
762 /*
763 * At this point, we know there is nothing we
764 * can potentially merge with, because on each
765 * side, there is either nothing there or what is
766 * there is still allocated. In that case, we don't
767 * want to remove r from the list; we simply want to
768 * change it to an unallocated region and return
769 * without freeing anything.
770 */
771 r->r_flags &= ~RF_ALLOCATED;
772 r->r_dev = NULL;
773 return 0;
774 }
775
776out:
777 free(r, M_RMAN);
778 return 0;
779}
780
781int
782rman_release_resource(struct resource *re)
783{
784 int rv;
785 struct resource_i *r;
786 struct rman *rm;
787
788 r = re->__r_i;
789 rm = r->r_rm;
790 mtx_lock(rm->rm_mtx);
791 rv = int_rman_release_resource(rm, r);
792 mtx_unlock(rm->rm_mtx);
793 return (rv);
794}
795
796uint32_t
798{
799 int i;
800
801 /*
802 * Find the hightest bit set, and add one if more than one bit
803 * set. We're effectively computing the ceil(log2(size)) here.
804 */
805 for (i = 31; i > 0; i--)
806 if ((1 << i) & size)
807 break;
808 if (~(1 << i) & size)
809 i++;
810
811 return(RF_ALIGNMENT_LOG2(i));
812}
813
814void
815rman_set_start(struct resource *r, rman_res_t start)
816{
817
818 r->__r_i->r_start = start;
819}
820
821rman_res_t
822rman_get_start(struct resource *r)
823{
824
825 return (r->__r_i->r_start);
826}
827
828void
829rman_set_end(struct resource *r, rman_res_t end)
830{
831
832 r->__r_i->r_end = end;
833}
834
835rman_res_t
836rman_get_end(struct resource *r)
837{
838
839 return (r->__r_i->r_end);
840}
841
842rman_res_t
843rman_get_size(struct resource *r)
844{
845
846 return (r->__r_i->r_end - r->__r_i->r_start + 1);
847}
848
849u_int
850rman_get_flags(struct resource *r)
851{
852
853 return (r->__r_i->r_flags);
854}
855
856void
857rman_set_virtual(struct resource *r, void *v)
858{
859
860 r->__r_i->r_virtual = v;
861}
862
863void *
864rman_get_virtual(struct resource *r)
865{
866
867 return (r->__r_i->r_virtual);
868}
869
870void
871rman_set_irq_cookie(struct resource *r, void *c)
872{
873
874 r->__r_i->r_irq_cookie = c;
875}
876
877void *
878rman_get_irq_cookie(struct resource *r)
879{
880
881 return (r->__r_i->r_irq_cookie);
882}
883
884void
885rman_set_bustag(struct resource *r, bus_space_tag_t t)
886{
887
888 r->r_bustag = t;
889}
890
891bus_space_tag_t
892rman_get_bustag(struct resource *r)
893{
894
895 return (r->r_bustag);
896}
897
898void
899rman_set_bushandle(struct resource *r, bus_space_handle_t h)
900{
901
902 r->r_bushandle = h;
903}
904
905bus_space_handle_t
906rman_get_bushandle(struct resource *r)
907{
908
909 return (r->r_bushandle);
910}
911
912void
913rman_set_mapping(struct resource *r, struct resource_map *map)
914{
915
916 KASSERT(rman_get_size(r) == map->r_size,
917 ("rman_set_mapping: size mismatch"));
918 rman_set_bustag(r, map->r_bustag);
919 rman_set_bushandle(r, map->r_bushandle);
920 rman_set_virtual(r, map->r_vaddr);
921}
922
923void
924rman_get_mapping(struct resource *r, struct resource_map *map)
925{
926
927 map->r_bustag = rman_get_bustag(r);
928 map->r_bushandle = rman_get_bushandle(r);
929 map->r_size = rman_get_size(r);
930 map->r_vaddr = rman_get_virtual(r);
931}
932
933void
934rman_set_rid(struct resource *r, int rid)
935{
936
937 r->__r_i->r_rid = rid;
938}
939
940int
941rman_get_rid(struct resource *r)
942{
943
944 return (r->__r_i->r_rid);
945}
946
947void
948rman_set_device(struct resource *r, device_t dev)
949{
950
951 r->__r_i->r_dev = dev;
952}
953
954device_t
955rman_get_device(struct resource *r)
956{
957
958 return (r->__r_i->r_dev);
959}
960
961int
962rman_is_region_manager(struct resource *r, struct rman *rm)
963{
964
965 return (r->__r_i->r_rm == rm);
966}
967
968/*
969 * Sysctl interface for scanning the resource lists.
970 *
971 * We take two input parameters; the index into the list of resource
972 * managers, and the resource offset into the list.
973 */
974static int
975sysctl_rman(SYSCTL_HANDLER_ARGS)
976{
977 int *name = (int *)arg1;
978 u_int namelen = arg2;
979 int rman_idx, res_idx;
980 struct rman *rm;
981 struct resource_i *res;
982 struct resource_i *sres;
983 struct u_rman urm;
984 struct u_resource ures;
985 int error;
986
987 if (namelen != 3)
988 return (EINVAL);
989
991 return (EINVAL);
992 rman_idx = name[1];
993 res_idx = name[2];
994
995 /*
996 * Find the indexed resource manager
997 */
998 mtx_lock(&rman_mtx);
999 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1000 if (rman_idx-- == 0)
1001 break;
1002 }
1003 mtx_unlock(&rman_mtx);
1004 if (rm == NULL)
1005 return (ENOENT);
1006
1007 /*
1008 * If the resource index is -1, we want details on the
1009 * resource manager.
1010 */
1011 if (res_idx == -1) {
1012 bzero(&urm, sizeof(urm));
1013 urm.rm_handle = (uintptr_t)rm;
1014 if (rm->rm_descr != NULL)
1015 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1016 urm.rm_start = rm->rm_start;
1017 urm.rm_size = rm->rm_end - rm->rm_start + 1;
1018 urm.rm_type = rm->rm_type;
1019
1020 error = SYSCTL_OUT(req, &urm, sizeof(urm));
1021 return (error);
1022 }
1023
1024 /*
1025 * Find the indexed resource and return it.
1026 */
1027 mtx_lock(rm->rm_mtx);
1028 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1029 if (res->r_sharehead != NULL) {
1030 LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1031 if (res_idx-- == 0) {
1032 res = sres;
1033 goto found;
1034 }
1035 }
1036 else if (res_idx-- == 0)
1037 goto found;
1038 }
1039 mtx_unlock(rm->rm_mtx);
1040 return (ENOENT);
1041
1042found:
1043 bzero(&ures, sizeof(ures));
1044 ures.r_handle = (uintptr_t)res;
1045 ures.r_parent = (uintptr_t)res->r_rm;
1046 ures.r_device = (uintptr_t)res->r_dev;
1047 if (res->r_dev != NULL) {
1048 if (device_get_name(res->r_dev) != NULL) {
1049 snprintf(ures.r_devname, RM_TEXTLEN,
1050 "%s%d",
1051 device_get_name(res->r_dev),
1052 device_get_unit(res->r_dev));
1053 } else {
1054 strlcpy(ures.r_devname, "nomatch",
1055 RM_TEXTLEN);
1056 }
1057 } else {
1058 ures.r_devname[0] = '\0';
1059 }
1060 ures.r_start = res->r_start;
1061 ures.r_size = res->r_end - res->r_start + 1;
1062 ures.r_flags = res->r_flags;
1063
1064 mtx_unlock(rm->rm_mtx);
1065 error = SYSCTL_OUT(req, &ures, sizeof(ures));
1066 return (error);
1067}
1068
1069static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1071 "kernel resource manager");
1072
1073#ifdef DDB
1074static void
1075dump_rman_header(struct rman *rm)
1076{
1077
1078 if (db_pager_quit)
1079 return;
1080 db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1081 rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1082}
1083
1084static void
1085dump_rman(struct rman *rm)
1086{
1087 struct resource_i *r;
1088 const char *devname;
1089
1090 if (db_pager_quit)
1091 return;
1092 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1093 if (r->r_dev != NULL) {
1094 devname = device_get_nameunit(r->r_dev);
1095 if (devname == NULL)
1096 devname = "nomatch";
1097 } else
1098 devname = NULL;
1099 db_printf(" 0x%jx-0x%jx (RID=%d) ",
1100 r->r_start, r->r_end, r->r_rid);
1101 if (devname != NULL)
1102 db_printf("(%s)\n", devname);
1103 else
1104 db_printf("----\n");
1105 if (db_pager_quit)
1106 return;
1107 }
1108}
1109
1110DB_SHOW_COMMAND(rman, db_show_rman)
1111{
1112
1113 if (have_addr) {
1114 dump_rman_header((struct rman *)addr);
1115 dump_rman((struct rman *)addr);
1116 }
1117}
1118
1119DB_SHOW_COMMAND(rmans, db_show_rmans)
1120{
1121 struct rman *rm;
1122
1123 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1124 dump_rman_header(rm);
1125 }
1126}
1127
1128DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1129{
1130 struct rman *rm;
1131
1132 TAILQ_FOREACH(rm, &rman_head, rm_link) {
1133 dump_rman_header(rm);
1134 dump_rman(rm);
1135 }
1136}
1137DB_SHOW_ALIAS(allrman, db_show_all_rman);
1138#endif
int * count
Definition: cpufreq_if.m:63
const char * name
Definition: kern_fail.c:145
static LIST_HEAD(alq)
Definition: kern_alq.c:99
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
void panic(const char *fmt,...)
void *** start
Definition: linker_if.m:98
uint64_t * addr
Definition: msi_if.m:89
struct resource * res
Definition: pic_if.m:98
struct resource r_r
Definition: subr_rman.c:85
int device_get_unit(device_t dev)
Return the device's unit number.
Definition: subr_bus.c:2364
int bus_data_generation_check(int generation)
Definition: subr_bus.c:5682
const char * device_get_name(device_t dev)
Return the name of the device's devclass or NULL if there is none.
Definition: subr_bus.c:2342
const char * device_get_nameunit(device_t dev)
Return a string containing the device's devclass name followed by an ascii representation of the devi...
Definition: subr_bus.c:2355
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:550
int rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
Definition: subr_rman.c:154
device_t rman_get_device(struct resource *r)
Definition: subr_rman.c:955
static struct mtx rman_mtx
Definition: subr_rman.c:108
bus_space_tag_t rman_get_bustag(struct resource *r)
Definition: subr_rman.c:892
void rman_set_start(struct resource *r, rman_res_t start)
Definition: subr_rman.c:815
#define DPRINTF(params)
Definition: subr_rman.c:103
SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN, &rman_debug, 0, "rman debug")
bus_space_handle_t rman_get_bushandle(struct resource *r)
Definition: subr_rman.c:906
static __inline struct resource_i * int_alloc_resource(int malloc_flag)
Definition: subr_rman.c:112
static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager")
void rman_set_bustag(struct resource *r, bus_space_tag_t t)
Definition: subr_rman.c:885
int rman_init(struct rman *rm)
Definition: subr_rman.c:124
static int int_rman_release_resource(struct rman *rm, struct resource_i *r)
Definition: subr_rman.c:688
void * rman_get_irq_cookie(struct resource *r)
Definition: subr_rman.c:878
u_int rman_get_flags(struct resource *r)
Definition: subr_rman.c:850
uint32_t rman_make_alignment_flags(uint32_t size)
Definition: subr_rman.c:797
struct resource * rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags, device_t dev)
Definition: subr_rman.c:653
void rman_set_virtual(struct resource *r, void *v)
Definition: subr_rman.c:857
int rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
Definition: subr_rman.c:292
int rman_fini(struct rman *rm)
Definition: subr_rman.c:242
void rman_set_end(struct resource *r, rman_res_t end)
Definition: subr_rman.c:829
int rman_init_from_resource(struct rman *rm, struct resource *r)
Definition: subr_rman.c:232
__FBSDID("$FreeBSD$")
void * rman_get_virtual(struct resource *r)
Definition: subr_rman.c:864
#define SHARE_TYPE(f)
Definition: subr_rman.c:431
rman_res_t rman_get_end(struct resource *r)
Definition: subr_rman.c:836
void rman_set_mapping(struct resource *r, struct resource_map *map)
Definition: subr_rman.c:913
void rman_set_device(struct resource *r, device_t dev)
Definition: subr_rman.c:948
int rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
Definition: subr_rman.c:311
void rman_set_irq_cookie(struct resource *r, void *c)
Definition: subr_rman.c:871
void rman_set_bushandle(struct resource *r, bus_space_handle_t h)
Definition: subr_rman.c:899
void rman_set_rid(struct resource *r, int rid)
Definition: subr_rman.c:934
void rman_get_mapping(struct resource *r, struct resource_map *map)
Definition: subr_rman.c:924
static int sysctl_rman(SYSCTL_HANDLER_ARGS)
Definition: subr_rman.c:975
struct rman_head rman_head
Definition: subr_rman.c:107
int rman_get_rid(struct resource *r)
Definition: subr_rman.c:941
static int rman_debug
Definition: subr_rman.c:99
struct resource * rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end, rman_res_t count, rman_res_t bound, u_int flags, device_t dev)
Definition: subr_rman.c:434
int rman_is_region_manager(struct resource *r, struct rman *rm)
Definition: subr_rman.c:962
int rman_activate_resource(struct resource *re)
Definition: subr_rman.c:662
int rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
Definition: subr_rman.c:274
static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_rman, "kernel resource manager")
rman_res_t rman_get_size(struct resource *r)
Definition: subr_rman.c:843
rman_res_t rman_get_start(struct resource *r)
Definition: subr_rman.c:822
int rman_release_resource(struct resource *re)
Definition: subr_rman.c:782
int rman_deactivate_resource(struct resource *r)
Definition: subr_rman.c:676
uint16_t flags
Definition: subr_stats.c:2
struct mtx mtx
Definition: uipc_ktls.c:0