FreeBSD kernel sound device code
clone.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/proc.h>
37
38#ifdef HAVE_KERNEL_OPTION_HEADERS
39#include "opt_snd.h"
40#endif
41
42#if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
43#include <dev/sound/pcm/sound.h>
44#endif
45
46#include <dev/sound/clone.h>
47
48/*
49 * So here we go again, another clonedevs manager. Unlike default clonedevs,
50 * this clone manager is designed to withstand various abusive behavior
51 * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
52 * after reaching certain expiration threshold, aggressive garbage collector,
53 * transparent device allocator and concurrency handling across multiple
54 * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
55 * we don't have much clues whether the caller wants a real open() or simply
56 * making fun of us with things like stat(), mtime() etc. Assuming that:
57 * 1) Time window between dev_clone EH <-> real open() should be small
58 * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
59 * operation, we can decide whether a new cdev must be created, old
60 * (expired) cdev can be reused or an existing cdev can be shared.
61 *
62 * Most of the operations and logics are generic enough and can be applied
63 * on other places (such as if_tap, snp, etc). Perhaps this can be
64 * rearranged to complement clone_*(). However, due to this still being
65 * specific to the sound driver (and as a proof of concept on how it can be
66 * done), si_drv2 is used to keep the pointer of the clone list entry to
67 * avoid expensive lookup.
68 */
69
70/* clone entry */
72 TAILQ_ENTRY(snd_clone_entry) link;
73 struct snd_clone *parent;
74 struct cdev *devt;
75 struct timespec tsp;
76 uint32_t flags;
77 pid_t pid;
78 int unit;
79};
80
81/* clone manager */
82struct snd_clone {
83 TAILQ_HEAD(link_head, snd_clone_entry) head;
84 struct timespec tsp;
85 int refcount;
86 int size;
87 int typemask;
88 int maxunit;
89 int deadline;
90 uint32_t flags;
91};
92
93#ifdef SND_DIAGNOSTIC
94#define SND_CLONE_ASSERT(x, y) do { \
95 if (!(x)) \
96 panic y; \
97} while (0)
98#else
99#define SND_CLONE_ASSERT(...) KASSERT(__VA_ARGS__)
100#endif
101
102/*
103 * snd_clone_create() : Return opaque allocated clone manager.
104 */
105struct snd_clone *
106snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
107{
108 struct snd_clone *c;
109
110 SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
111 ("invalid typemask: 0x%08x", typemask));
112 SND_CLONE_ASSERT(maxunit == -1 ||
113 !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
114 ("maxunit overflow: typemask=0x%08x maxunit=%d",
115 typemask, maxunit));
117 ("invalid clone flags=0x%08x", flags));
118
119 c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
120 c->refcount = 0;
121 c->size = 0;
122 c->typemask = typemask;
123 c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
124 maxunit;
125 c->deadline = deadline;
126 c->flags = flags;
127 getnanouptime(&c->tsp);
128 TAILQ_INIT(&c->head);
129
130 return (c);
131}
132
133int
135{
136 struct snd_clone_entry *ce;
137
138 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
139
140 if (c->size == 0)
141 return (0);
142
143 TAILQ_FOREACH(ce, &c->head, link) {
144 if ((ce->flags & SND_CLONE_BUSY) ||
145 (ce->devt != NULL && ce->devt->si_threadcount != 0))
146 return (EBUSY);
147 }
148
149 return (0);
150}
151
152/*
153 * snd_clone_enable()/disable() : Suspend/resume clone allocation through
154 * snd_clone_alloc(). Everything else will not be affected by this.
155 */
156int
158{
159 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
160
161 if (c->flags & SND_CLONE_ENABLE)
162 return (EINVAL);
163
165
166 return (0);
167}
168
169int
171{
172 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
173
174 if (!(c->flags & SND_CLONE_ENABLE))
175 return (EINVAL);
176
177 c->flags &= ~SND_CLONE_ENABLE;
178
179 return (0);
180}
181
182/*
183 * Getters / Setters. Not worth explaining :)
184 */
185int
187{
188 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
189
190 return (c->size);
191}
192
193int
195{
196 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
197
198 return (c->maxunit);
199}
200
201int
202snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
203{
204 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
205 SND_CLONE_ASSERT(maxunit == -1 ||
206 !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
207 ("maxunit overflow: typemask=0x%08x maxunit=%d",
208 c->typemask, maxunit));
209
210 c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
211 maxunit;
212
213 return (c->maxunit);
214}
215
216int
218{
219 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
220
221 return (c->deadline);
222}
223
224int
225snd_clone_setdeadline(struct snd_clone *c, int deadline)
226{
227 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
228
229 c->deadline = deadline;
230
231 return (c->deadline);
232}
233
234uint32_t
236{
237 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
238
239 return (c->flags);
240}
241
242uint32_t
243snd_clone_setflags(struct snd_clone *c, uint32_t flags)
244{
245 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
247 ("invalid clone flags=0x%08x", flags));
248
249 c->flags = flags;
250
251 return (c->flags);
252}
253
254uint32_t
256{
257 struct snd_clone_entry *ce;
258
259 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
260
261 ce = dev->si_drv2;
262 if (ce == NULL)
263 return (0xffffffff);
264
265 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
266
267 return (ce->flags);
268}
269
270uint32_t
271snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
272{
273 struct snd_clone_entry *ce;
274
275 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
277 ("invalid clone dev flags=0x%08x", flags));
278
279 ce = dev->si_drv2;
280 if (ce == NULL)
281 return (0xffffffff);
282
283 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
284
285 ce->flags = flags;
286
287 return (ce->flags);
288}
289
290/* Elapsed time conversion to ms */
291#define SND_CLONE_ELAPSED(x, y) \
292 ((((x)->tv_sec - (y)->tv_sec) * 1000) + \
293 (((y)->tv_nsec > (x)->tv_nsec) ? \
294 (((1000000000L + (x)->tv_nsec - \
295 (y)->tv_nsec) / 1000000) - 1000) : \
296 (((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
297
298#define SND_CLONE_EXPIRED(x, y, z) \
299 ((x)->deadline < 1 || \
300 ((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) || \
301 SND_CLONE_ELAPSED(y, z) > (x)->deadline)
302
303/*
304 * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
305 * clone.h for explanations on GC settings.
306 */
307int
309{
310 struct snd_clone_entry *ce, *tce;
311 struct timespec now;
312 int pruned;
313
314 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
315
316 if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
317 return (0);
318
319 getnanouptime(&now);
320
321 /*
322 * Bail out if the last clone handler was invoked below the deadline
323 * threshold.
324 */
325 if ((c->flags & SND_CLONE_GC_EXPIRED) &&
326 !SND_CLONE_EXPIRED(c, &now, &c->tsp))
327 return (0);
328
329 pruned = 0;
330
331 /*
332 * Visit each object in reverse order. If the object is still being
333 * referenced by a valid open(), skip it. Look for expired objects
334 * and either revoke its clone invocation status or mercilessly
335 * throw it away.
336 */
337 TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
338 if (!(ce->flags & SND_CLONE_BUSY) &&
339 (!(ce->flags & SND_CLONE_INVOKE) ||
340 SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
341 if ((c->flags & SND_CLONE_GC_REVOKE) ||
342 ce->devt->si_threadcount != 0) {
343 ce->flags &= ~SND_CLONE_INVOKE;
344 ce->pid = -1;
345 } else {
346 TAILQ_REMOVE(&c->head, ce, link);
347 destroy_dev(ce->devt);
348 free(ce, M_DEVBUF);
349 c->size--;
350 }
351 pruned++;
352 }
353 }
354
355 /* return total pruned objects */
356 return (pruned);
357}
358
359void
361{
362 struct snd_clone_entry *ce, *tmp;
363
364 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
365
366 ce = TAILQ_FIRST(&c->head);
367 while (ce != NULL) {
368 tmp = TAILQ_NEXT(ce, link);
369 if (ce->devt != NULL)
370 destroy_dev(ce->devt);
371 free(ce, M_DEVBUF);
372 ce = tmp;
373 }
374
375 free(c, M_DEVBUF);
376}
377
378/*
379 * snd_clone_acquire() : The vital part of concurrency management. Must be
380 * called somewhere at the beginning of open() handler. ENODEV is not really
381 * fatal since it just tell the caller that this is not cloned stuff.
382 * EBUSY is *real*, don't forget that!
383 */
384int
386{
387 struct snd_clone_entry *ce;
388
389 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
390
391 ce = dev->si_drv2;
392 if (ce == NULL)
393 return (ENODEV);
394
395 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
396
397 ce->flags &= ~SND_CLONE_INVOKE;
398
399 if (ce->flags & SND_CLONE_BUSY)
400 return (EBUSY);
401
402 ce->flags |= SND_CLONE_BUSY;
403
404 return (0);
405}
406
407/*
408 * snd_clone_release() : Release busy status. Must be called somewhere at
409 * the end of close() handler, or somewhere after fail open().
410 */
411int
413{
414 struct snd_clone_entry *ce;
415
416 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
417
418 ce = dev->si_drv2;
419 if (ce == NULL)
420 return (ENODEV);
421
422 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
423
424 ce->flags &= ~SND_CLONE_INVOKE;
425
426 if (!(ce->flags & SND_CLONE_BUSY))
427 return (EBADF);
428
429 ce->flags &= ~SND_CLONE_BUSY;
430 ce->pid = -1;
431
432 return (0);
433}
434
435/*
436 * snd_clone_ref/unref() : Garbage collector reference counter. To make
437 * garbage collector run automatically, the sequence must be something like
438 * this (both in open() and close() handlers):
439 *
440 * open() - 1) snd_clone_acquire()
441 * 2) .... check check ... if failed, snd_clone_release()
442 * 3) Success. Call snd_clone_ref()
443 *
444 * close() - 1) .... check check check ....
445 * 2) Success. snd_clone_release()
446 * 3) snd_clone_unref() . Garbage collector will run at this point
447 * if this is the last referenced object.
448 */
449int
450snd_clone_ref(struct cdev *dev)
451{
452 struct snd_clone_entry *ce;
453 struct snd_clone *c;
454
455 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
456
457 ce = dev->si_drv2;
458 if (ce == NULL)
459 return (0);
460
461 c = ce->parent;
462 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
463 SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
464
465 return (++c->refcount);
466}
467
468int
469snd_clone_unref(struct cdev *dev)
470{
471 struct snd_clone_entry *ce;
472 struct snd_clone *c;
473
474 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
475
476 ce = dev->si_drv2;
477 if (ce == NULL)
478 return (0);
479
480 c = ce->parent;
481 SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
482 SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
483
484 c->refcount--;
485
486 /*
487 * Run automatic garbage collector, if needed.
488 */
489 if ((c->flags & SND_CLONE_GC_UNREF) &&
491 (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
492 (void)snd_clone_gc(c);
493
494 return (c->refcount);
495}
496
497void
498snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
499{
500 SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
501 SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
502 SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
504 ("invalid clone alloc flags=0x%08x", ce->flags));
505 SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
506 SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
507 ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
508 ce->unit, dev2unit(dev)));
509
510 SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
511
512 dev->si_drv2 = ce;
513 ce->devt = dev;
514 ce->flags &= ~SND_CLONE_ALLOC;
515 ce->flags |= SND_CLONE_INVOKE;
516}
517
518struct snd_clone_entry *
519snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
520{
521 struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
522 struct timespec now;
523 int cunit, allocunit;
524 pid_t curpid;
525
526 SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
527 SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
528 SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
529 ("invalid tmask: typemask=0x%08x tmask=0x%08x",
530 c->typemask, tmask));
531 SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
532 SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
533 ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
534 c->typemask, tmask, *unit));
535
536 if (!(c->flags & SND_CLONE_ENABLE) ||
537 (*unit != -1 && *unit > c->maxunit))
538 return (NULL);
539
540 ce = NULL;
541 after = NULL;
542 bce = NULL; /* "b"usy candidate */
543 cce = NULL; /* "c"urthread/proc candidate */
544 nce = NULL; /* "n"ull, totally unbusy candidate */
545 tce = NULL; /* Last "t"ry candidate */
546 cunit = 0;
547 allocunit = (*unit == -1) ? 0 : *unit;
548 curpid = curthread->td_proc->p_pid;
549
550 getnanouptime(&now);
551
552 TAILQ_FOREACH(ce, &c->head, link) {
553 /*
554 * Sort incrementally according to device type.
555 */
556 if (tmask > (ce->unit & c->typemask)) {
557 if (cunit == 0)
558 after = ce;
559 continue;
560 } else if (tmask < (ce->unit & c->typemask))
561 break;
562
563 /*
564 * Shoot.. this is where the grumpiness begin. Just
565 * return immediately.
566 */
567 if (*unit != -1 && *unit == (ce->unit & ~tmask))
568 goto snd_clone_alloc_out;
569
570 cunit++;
571 /*
572 * Simmilar device type. Sort incrementally according
573 * to allocation unit. While here, look for free slot
574 * and possible collision for new / future allocation.
575 */
576 if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
577 allocunit++;
578 if ((ce->unit & ~tmask) < allocunit)
579 after = ce;
580 /*
581 * Clone logic:
582 * 1. Look for non busy, but keep track of the best
583 * possible busy cdev.
584 * 2. Look for the best (oldest referenced) entry that is
585 * in a same process / thread.
586 * 3. Look for the best (oldest referenced), absolute free
587 * entry.
588 * 4. Lastly, look for the best (oldest referenced)
589 * any entries that doesn't fit with anything above.
590 */
591 if (ce->flags & SND_CLONE_BUSY) {
592 if (ce->devt != NULL && (bce == NULL ||
593 timespeccmp(&ce->tsp, &bce->tsp, <)))
594 bce = ce;
595 continue;
596 }
597 if (ce->pid == curpid &&
598 (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
599 cce = ce;
600 else if (!(ce->flags & SND_CLONE_INVOKE) &&
601 (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
602 nce = ce;
603 else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
604 tce = ce;
605 }
606 if (*unit != -1)
607 goto snd_clone_alloc_new;
608 else if (cce != NULL) {
609 /* Same proc entry found, go for it */
610 ce = cce;
611 goto snd_clone_alloc_out;
612 } else if (nce != NULL) {
613 /*
614 * Next, try absolute free entry. If the calculated
615 * allocunit is smaller, create new entry instead.
616 */
617 if (allocunit < (nce->unit & ~tmask))
618 goto snd_clone_alloc_new;
619 ce = nce;
620 goto snd_clone_alloc_out;
621 } else if (allocunit > c->maxunit) {
622 /*
623 * Maximum allowable unit reached. Try returning any
624 * available cdev and hope for the best. If the lookup is
625 * done for things like stat(), mtime() etc. , things should
626 * be ok. Otherwise, open() handler should do further checks
627 * and decide whether to return correct error code or not.
628 */
629 if (tce != NULL) {
630 ce = tce;
631 goto snd_clone_alloc_out;
632 } else if (bce != NULL) {
633 ce = bce;
634 goto snd_clone_alloc_out;
635 }
636 return (NULL);
637 }
638
639snd_clone_alloc_new:
640 /*
641 * No free entries found, and we still haven't reached maximum
642 * allowable units. Allocate, setup a minimal unique entry with busy
643 * status so nobody will monkey on this new entry. Unit magic is set
644 * right here to avoid collision with other contesting handler.
645 * The caller must be carefull here to maintain its own
646 * synchronization, as long as it will not conflict with malloc(9)
647 * operations.
648 *
649 * That said, go figure.
650 */
651 ce = malloc(sizeof(*ce), M_DEVBUF,
652 ((c->flags & SND_CLONE_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
653 if (ce == NULL) {
654 if (*unit != -1)
655 return (NULL);
656 /*
657 * We're being dense, ignorance is bliss,
658 * Super Regulatory Measure (TM).. TRY AGAIN!
659 */
660 if (nce != NULL) {
661 ce = nce;
662 goto snd_clone_alloc_out;
663 } else if (tce != NULL) {
664 ce = tce;
665 goto snd_clone_alloc_out;
666 } else if (bce != NULL) {
667 ce = bce;
668 goto snd_clone_alloc_out;
669 }
670 return (NULL);
671 }
672 /* Setup new entry */
673 ce->parent = c;
674 ce->unit = tmask | allocunit;
675 ce->pid = curpid;
676 ce->tsp = now;
677 ce->flags |= SND_CLONE_ALLOC;
678 if (after != NULL) {
679 TAILQ_INSERT_AFTER(&c->head, after, ce, link);
680 } else {
681 TAILQ_INSERT_HEAD(&c->head, ce, link);
682 }
683 c->size++;
684 c->tsp = now;
685 /*
686 * Save new allocation unit for caller which will be used
687 * by make_dev().
688 */
689 *unit = allocunit;
690
691 return (ce);
692
693snd_clone_alloc_out:
694 /*
695 * Set, mark, timestamp the entry if this is a truly free entry.
696 * Leave busy entry alone.
697 */
698 if (!(ce->flags & SND_CLONE_BUSY)) {
699 ce->pid = curpid;
700 ce->tsp = now;
701 ce->flags |= SND_CLONE_INVOKE;
702 }
703 c->tsp = now;
704 *dev = ce->devt;
705
706 return (NULL);
707}
struct pcm_channel * c
Definition: channel_if.m:106
METHOD int free
Definition: channel_if.m:110
uint32_t snd_clone_getdevflags(struct cdev *dev)
Definition: clone.c:255
int snd_clone_busy(struct snd_clone *c)
Definition: clone.c:134
struct snd_clone * snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
Definition: clone.c:106
struct snd_clone_entry * snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
Definition: clone.c:519
#define SND_CLONE_ASSERT(...)
Definition: clone.c:99
uint32_t snd_clone_setflags(struct snd_clone *c, uint32_t flags)
Definition: clone.c:243
int snd_clone_getsize(struct snd_clone *c)
Definition: clone.c:186
int snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
Definition: clone.c:202
int snd_clone_acquire(struct cdev *dev)
Definition: clone.c:385
int snd_clone_disable(struct snd_clone *c)
Definition: clone.c:170
uint32_t snd_clone_getflags(struct snd_clone *c)
Definition: clone.c:235
void snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
Definition: clone.c:498
int snd_clone_setdeadline(struct snd_clone *c, int deadline)
Definition: clone.c:225
int snd_clone_enable(struct snd_clone *c)
Definition: clone.c:157
int snd_clone_release(struct cdev *dev)
Definition: clone.c:412
uint32_t snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
Definition: clone.c:271
int snd_clone_getdeadline(struct snd_clone *c)
Definition: clone.c:217
int snd_clone_gc(struct snd_clone *c)
Definition: clone.c:308
int snd_clone_getmaxunit(struct snd_clone *c)
Definition: clone.c:194
int snd_clone_unref(struct cdev *dev)
Definition: clone.c:469
#define SND_CLONE_EXPIRED(x, y, z)
Definition: clone.c:298
int snd_clone_ref(struct cdev *dev)
Definition: clone.c:450
void snd_clone_destroy(struct snd_clone *c)
Definition: clone.c:360
#define SND_CLONE_INVOKE
Definition: clone.h:91
#define SND_CLONE_BUSY
Definition: clone.h:92
#define SND_CLONE_DEVMASK
Definition: clone.h:100
#define SND_CLONE_MAXUNIT
Definition: clone.h:46
#define SND_CLONE_GC_UNREF
Definition: clone.h:66
#define SND_CLONE_ALLOC
Definition: clone.h:97
#define SND_CLONE_MASK
Definition: clone.h:78
#define SND_CLONE_WAITOK
Definition: clone.h:70
#define SND_CLONE_GC_REVOKE
Definition: clone.h:69
#define SND_CLONE_GC_ENABLE
Definition: clone.h:65
#define SND_CLONE_ENABLE
Definition: clone.h:64
#define SND_CLONE_GC_LASTREF
Definition: clone.h:67
#define SND_CLONE_GC_EXPIRED
Definition: clone.h:68
uint8_t size
TAILQ_HEAD(snd_midi)
Definition: midi.c:181
unsigned dev
Definition: mixer_if.m:59
u_int32_t flags
Definition: channel.h:96
int refcount
Definition: channel.h:89
Definition: clone.c:71