FreeBSD kernel CAM code
cam_periph.c
Go to the documentation of this file.
1/*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/types.h>
38#include <sys/malloc.h>
39#include <sys/kernel.h>
40#include <sys/bio.h>
41#include <sys/conf.h>
42#include <sys/devctl.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/buf.h>
46#include <sys/proc.h>
47#include <sys/devicestat.h>
48#include <sys/sbuf.h>
49#include <sys/sysctl.h>
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_queue.h>
56#include <cam/cam_xpt_periph.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_debug.h>
60#include <cam/cam_sim.h>
61
62#include <cam/scsi/scsi_all.h>
64#include <cam/scsi/scsi_pass.h>
65
66static u_int camperiphnextunit(struct periph_driver *p_drv,
67 u_int newunit, bool wired,
68 path_id_t pathid, target_id_t target,
69 lun_id_t lun);
70static u_int camperiphunit(struct periph_driver *p_drv,
71 path_id_t pathid, target_id_t target,
72 lun_id_t lun,
73 const char *sn);
74static void camperiphdone(struct cam_periph *periph,
75 union ccb *done_ccb);
76static void camperiphfree(struct cam_periph *periph);
77static int camperiphscsistatuserror(union ccb *ccb,
78 union ccb **orig_ccb,
79 cam_flags camflags,
80 u_int32_t sense_flags,
81 int *openings,
82 u_int32_t *relsim_flags,
83 u_int32_t *timeout,
84 u_int32_t *action,
85 const char **action_string);
86static int camperiphscsisenseerror(union ccb *ccb,
87 union ccb **orig_ccb,
88 cam_flags camflags,
89 u_int32_t sense_flags,
90 int *openings,
91 u_int32_t *relsim_flags,
92 u_int32_t *timeout,
93 u_int32_t *action,
94 const char **action_string);
95static void cam_periph_devctl_notify(union ccb *ccb);
96
97static int nperiph_drivers;
98static int initialized = 0;
100
101static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
102
103static int periph_selto_delay = 1000;
104TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
105static int periph_noresrc_delay = 500;
106TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
107static int periph_busy_delay = 500;
108TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
109
110static u_int periph_mapmem_thresh = 65536;
111SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
112 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
113
114void
116{
117 struct periph_driver *drv = (struct periph_driver *)data;
118 struct periph_driver **newdrivers, **old;
119 int ndrivers;
120
121again:
122 ndrivers = nperiph_drivers + 2;
123 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
124 M_WAITOK);
126 if (ndrivers != nperiph_drivers + 2) {
127 /*
128 * Lost race against itself; go around.
129 */
131 free(newdrivers, M_CAMPERIPH);
132 goto again;
133 }
134 if (periph_drivers)
135 bcopy(periph_drivers, newdrivers,
136 sizeof(*newdrivers) * nperiph_drivers);
137 newdrivers[nperiph_drivers] = drv;
138 newdrivers[nperiph_drivers + 1] = NULL;
139 old = periph_drivers;
140 periph_drivers = newdrivers;
143 if (old)
144 free(old, M_CAMPERIPH);
145 /* If driver marked as early or it is late now, initialize it. */
146 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
147 initialized > 1)
148 (*drv->init)();
149}
150
151int
153{
154 struct periph_driver *drv = (struct periph_driver *)data;
155 int error, n;
156
157 /* If driver marked as early or it is late now, deinitialize it. */
158 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
159 initialized > 1) {
160 if (drv->deinit == NULL) {
161 printf("CAM periph driver '%s' doesn't have deinit.\n",
162 drv->driver_name);
163 return (EOPNOTSUPP);
164 }
165 error = drv->deinit();
166 if (error != 0)
167 return (error);
168 }
169
171 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
172 ;
173 KASSERT(n < nperiph_drivers,
174 ("Periph driver '%s' was not registered", drv->driver_name));
175 for (; n + 1 < nperiph_drivers; n++)
176 periph_drivers[n] = periph_drivers[n + 1];
177 periph_drivers[n + 1] = NULL;
180 return (0);
181}
182
183void
185{
186 int i, early;
187
188 initialized = max(initialized, level);
189 for (i = 0; periph_drivers[i] != NULL; i++) {
190 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
191 if (early == initialized)
192 (*periph_drivers[i]->init)();
193 }
194}
195
198 periph_oninv_t *periph_oninvalidate,
199 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
200 char *name, cam_periph_type type, struct cam_path *path,
201 ac_callback_t *ac_callback, ac_code code, void *arg)
202{
203 struct periph_driver **p_drv;
204 struct cam_sim *sim;
205 struct cam_periph *periph;
206 struct cam_periph *cur_periph;
207 path_id_t path_id;
208 target_id_t target_id;
209 lun_id_t lun_id;
210 cam_status status;
211 u_int init_level;
212
213 init_level = 0;
214 /*
215 * Handle Hot-Plug scenarios. If there is already a peripheral
216 * of our type assigned to this path, we are likely waiting for
217 * final close on an old, invalidated, peripheral. If this is
218 * the case, queue up a deferred call to the peripheral's async
219 * handler. If it looks like a mistaken re-allocation, complain.
220 */
221 if ((periph = cam_periph_find(path, name)) != NULL) {
222 if ((periph->flags & CAM_PERIPH_INVALID) != 0
223 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
225 periph->deferred_callback = ac_callback;
226 periph->deferred_ac = code;
227 return (CAM_REQ_INPROG);
228 } else {
229 printf("cam_periph_alloc: attempt to re-allocate "
230 "valid device %s%d rejected flags %#x "
231 "refcount %d\n", periph->periph_name,
232 periph->unit_number, periph->flags,
233 periph->refcount);
234 }
235 return (CAM_REQ_INVALID);
236 }
237
238 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
239 M_NOWAIT|M_ZERO);
240
241 if (periph == NULL)
242 return (CAM_RESRC_UNAVAIL);
243
244 init_level++;
245
247 path_id = xpt_path_path_id(path);
248 target_id = xpt_path_target_id(path);
249 lun_id = xpt_path_lun_id(path);
250 periph->periph_start = periph_start;
251 periph->periph_dtor = periph_dtor;
252 periph->periph_oninval = periph_oninvalidate;
253 periph->type = type;
254 periph->periph_name = name;
257 periph->refcount = 1; /* Dropped by invalidation. */
258 periph->sim = sim;
259 SLIST_INIT(&periph->ccb_list);
260 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
261 if (status != CAM_REQ_CMP)
262 goto failure;
263 periph->path = path;
264
266 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
267 if (strcmp((*p_drv)->driver_name, name) == 0)
268 break;
269 }
270 if (*p_drv == NULL) {
271 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
273 xpt_free_path(periph->path);
274 free(periph, M_CAMPERIPH);
275 return (CAM_REQ_INVALID);
276 }
277 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id,
279 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
280 while (cur_periph != NULL
281 && cur_periph->unit_number < periph->unit_number)
282 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
283 if (cur_periph != NULL) {
284 KASSERT(cur_periph->unit_number != periph->unit_number,
285 ("duplicate units on periph list"));
286 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
287 } else {
288 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
289 (*p_drv)->generation++;
290 }
292
293 init_level++;
294
295 status = xpt_add_periph(periph);
296 if (status != CAM_REQ_CMP)
297 goto failure;
298
299 init_level++;
300 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
301
302 status = periph_ctor(periph, arg);
303
304 if (status == CAM_REQ_CMP)
305 init_level++;
306
307failure:
308 switch (init_level) {
309 case 4:
310 /* Initialized successfully */
311 break;
312 case 3:
313 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
314 xpt_remove_periph(periph);
315 /* FALLTHROUGH */
316 case 2:
318 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
320 xpt_free_path(periph->path);
321 /* FALLTHROUGH */
322 case 1:
323 free(periph, M_CAMPERIPH);
324 /* FALLTHROUGH */
325 case 0:
326 /* No cleanup to perform. */
327 break;
328 default:
329 panic("%s: Unknown init level", __func__);
330 }
331 return(status);
332}
333
334/*
335 * Find a peripheral structure with the specified path, target, lun,
336 * and (optionally) type. If the name is NULL, this function will return
337 * the first peripheral driver that matches the specified path.
338 */
339struct cam_periph *
340cam_periph_find(struct cam_path *path, char *name)
341{
342 struct periph_driver **p_drv;
343 struct cam_periph *periph;
344
346 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
347 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
348 continue;
349
350 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
351 if (xpt_path_comp(periph->path, path) == 0) {
353 cam_periph_assert(periph, MA_OWNED);
354 return(periph);
355 }
356 }
357 if (name != NULL) {
359 return(NULL);
360 }
361 }
363 return(NULL);
364}
365
366/*
367 * Find peripheral driver instances attached to the specified path.
368 */
369int
370cam_periph_list(struct cam_path *path, struct sbuf *sb)
371{
372 struct sbuf local_sb;
373 struct periph_driver **p_drv;
374 struct cam_periph *periph;
375 int count;
376 int sbuf_alloc_len;
377
378 sbuf_alloc_len = 16;
379retry:
380 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
381 count = 0;
383 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
384 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
385 if (xpt_path_comp(periph->path, path) != 0)
386 continue;
387
388 if (sbuf_len(&local_sb) != 0)
389 sbuf_cat(&local_sb, ",");
390
391 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
392 periph->unit_number);
393
394 if (sbuf_error(&local_sb) == ENOMEM) {
395 sbuf_alloc_len *= 2;
397 sbuf_delete(&local_sb);
398 goto retry;
399 }
400 count++;
401 }
402 }
404 sbuf_finish(&local_sb);
405 if (sbuf_len(sb) != 0)
406 sbuf_cat(sb, ",");
407 sbuf_cat(sb, sbuf_data(&local_sb));
408 sbuf_delete(&local_sb);
409 return (count);
410}
411
412int
414{
415 int status;
416
417 if (periph == NULL)
418 return (EINVAL);
419
420 status = ENOENT;
422 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
423 periph->refcount++;
424 status = 0;
425 }
427
428 return (status);
429}
430
431void
433{
434
436 KASSERT(periph->refcount >= 1,
437 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
438 periph->refcount++;
440}
441
442void
444{
445
446 cam_periph_assert(periph, MA_OWNED);
447 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
448 if (--periph->refcount == 0)
449 camperiphfree(periph);
450}
451
452void
454{
455
456 if (periph == NULL)
457 return;
458
462}
463
464void
466{
467 struct mtx *mtx;
468
469 if (periph == NULL)
470 return;
471
472 cam_periph_assert(periph, MA_NOTOWNED);
473 mtx = cam_periph_mtx(periph);
474 mtx_lock(mtx);
476 mtx_unlock(mtx);
477}
478
479/*
480 * hold/unhold act as mutual exclusion for sections of the code that
481 * need to sleep and want to make sure that other sections that
482 * will interfere are held off. This only protects exclusive sections
483 * from each other.
484 */
485int
486cam_periph_hold(struct cam_periph *periph, int priority)
487{
488 int error;
489
490 /*
491 * Increment the reference count on the peripheral
492 * while we wait for our lock attempt to succeed
493 * to ensure the peripheral doesn't disappear out
494 * from user us while we sleep.
495 */
496
497 if (cam_periph_acquire(periph) != 0)
498 return (ENXIO);
499
500 cam_periph_assert(periph, MA_OWNED);
501 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
502 periph->flags |= CAM_PERIPH_LOCK_WANTED;
503 if ((error = cam_periph_sleep(periph, periph, priority,
504 "caplck", 0)) != 0) {
506 return (error);
507 }
508 if (periph->flags & CAM_PERIPH_INVALID) {
510 return (ENXIO);
511 }
512 }
513
514 periph->flags |= CAM_PERIPH_LOCKED;
515 return (0);
516}
517
518void
520{
521
522 cam_periph_assert(periph, MA_OWNED);
523
524 periph->flags &= ~CAM_PERIPH_LOCKED;
525 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
526 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
527 wakeup(periph);
528 }
529
531}
532
533/*
534 * Look for the next unit number that is not currently in use for this
535 * peripheral type starting at "newunit". Also exclude unit numbers that
536 * are reserved by for future "hardwiring" unless we already know that this
537 * is a potential wired device. Only assume that the device is "wired" the
538 * first time through the loop since after that we'll be looking at unit
539 * numbers that did not match a wiring entry.
540 */
541static u_int
542camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired,
543 path_id_t pathid, target_id_t target, lun_id_t lun)
544{
545 struct cam_periph *periph;
546 char *periph_name;
547 int i, val, dunit, r;
548 const char *dname, *strval;
549
550 periph_name = p_drv->driver_name;
551 for (;;newunit++) {
552 for (periph = TAILQ_FIRST(&p_drv->units);
553 periph != NULL && periph->unit_number != newunit;
554 periph = TAILQ_NEXT(periph, unit_links))
555 ;
556
557 if (periph != NULL && periph->unit_number == newunit) {
558 if (wired) {
559 xpt_print(periph->path, "Duplicate Wired "
560 "Device entry!\n");
561 xpt_print(periph->path, "Second device (%s "
562 "device at scbus%d target %d lun %d) will "
563 "not be wired\n", periph_name, pathid,
564 target, lun);
565 wired = false;
566 }
567 continue;
568 }
569 if (wired)
570 break;
571
572 /*
573 * Don't allow the mere presence of any attributes of a device
574 * means that it is for a wired down entry. Instead, insist that
575 * one of the matching criteria from camperiphunit be present
576 * for the device.
577 */
578 i = 0;
579 dname = periph_name;
580 for (;;) {
581 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
582 if (r != 0)
583 break;
584
585 if (newunit != dunit)
586 continue;
587 if (resource_string_value(dname, dunit, "sn", &strval) == 0 ||
588 resource_int_value(dname, dunit, "lun", &val) == 0 ||
589 resource_int_value(dname, dunit, "target", &val) == 0 ||
590 resource_string_value(dname, dunit, "at", &strval) == 0)
591 break;
592 }
593 if (r != 0)
594 break;
595 }
596 return (newunit);
597}
598
599static u_int
601 target_id_t target, lun_id_t lun, const char *sn)
602{
603 bool wired = false;
604 u_int unit;
605 int i, val, dunit;
606 const char *dname, *strval;
607 char pathbuf[32], *periph_name;
608
609 periph_name = p_drv->driver_name;
610 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
611 unit = 0;
612 i = 0;
613 dname = periph_name;
614
615 for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
616 wired = false) {
617 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
618 if (strcmp(strval, pathbuf) != 0)
619 continue;
620 wired = true;
621 }
622 if (resource_int_value(dname, dunit, "target", &val) == 0) {
623 if (val != target)
624 continue;
625 wired = true;
626 }
627 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
628 if (val != lun)
629 continue;
630 wired = true;
631 }
632 if (resource_string_value(dname, dunit, "sn", &strval) == 0) {
633 if (sn == NULL || strcmp(strval, sn) != 0)
634 continue;
635 wired = true;
636 }
637 if (wired) {
638 unit = dunit;
639 break;
640 }
641 }
642
643 /*
644 * Either start from 0 looking for the next unit or from
645 * the unit number given in the resource config. This way,
646 * if we have wildcard matches, we don't return the same
647 * unit number twice.
648 */
649 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
650
651 return (unit);
652}
653
654void
656{
657
658 cam_periph_assert(periph, MA_OWNED);
659 /*
660 * We only tear down the device the first time a peripheral is
661 * invalidated.
662 */
663 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
664 return;
665
666 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
667 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
668 struct sbuf sb;
669 char buffer[160];
670
671 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
672 xpt_denounce_periph_sbuf(periph, &sb);
673 sbuf_finish(&sb);
674 sbuf_putbuf(&sb);
675 }
676 periph->flags |= CAM_PERIPH_INVALID;
677 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
678 if (periph->periph_oninval != NULL)
679 periph->periph_oninval(periph);
681}
682
683static void
685{
686 struct periph_driver **p_drv;
687 struct periph_driver *drv;
688
689 cam_periph_assert(periph, MA_OWNED);
690 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
691 periph->periph_name, periph->unit_number));
692 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
693 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
694 break;
695 }
696 if (*p_drv == NULL) {
697 printf("camperiphfree: attempt to free non-existant periph\n");
698 return;
699 }
700 /*
701 * Cache a pointer to the periph_driver structure. If a
702 * periph_driver is added or removed from the array (see
703 * periphdriver_register()) while we drop the toplogy lock
704 * below, p_drv may change. This doesn't protect against this
705 * particular periph_driver going away. That will require full
706 * reference counting in the periph_driver infrastructure.
707 */
708 drv = *p_drv;
709
710 /*
711 * We need to set this flag before dropping the topology lock, to
712 * let anyone who is traversing the list that this peripheral is
713 * about to be freed, and there will be no more reference count
714 * checks.
715 */
716 periph->flags |= CAM_PERIPH_FREE;
717
718 /*
719 * The peripheral destructor semantics dictate calling with only the
720 * SIM mutex held. Since it might sleep, it should not be called
721 * with the topology lock held.
722 */
724
725 /*
726 * We need to call the peripheral destructor prior to removing the
727 * peripheral from the list. Otherwise, we risk running into a
728 * scenario where the peripheral unit number may get reused
729 * (because it has been removed from the list), but some resources
730 * used by the peripheral are still hanging around. In particular,
731 * the devfs nodes used by some peripherals like the pass(4) driver
732 * aren't fully cleaned up until the destructor is run. If the
733 * unit number is reused before the devfs instance is fully gone,
734 * devfs will panic.
735 */
736 if (periph->periph_dtor != NULL)
737 periph->periph_dtor(periph);
738
739 /*
740 * The peripheral list is protected by the topology lock. We have to
741 * remove the periph from the drv list before we call deferred_ac. The
742 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
743 */
745
746 TAILQ_REMOVE(&drv->units, periph, unit_links);
747 drv->generation++;
748
749 xpt_remove_periph(periph);
750
752 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
753 xpt_print(periph->path, "Periph destroyed\n");
754 else
755 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
756
757 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
758 union ccb ccb;
759 void *arg;
760
761 memset(&ccb, 0, sizeof(ccb));
762 switch (periph->deferred_ac) {
763 case AC_FOUND_DEVICE:
766 xpt_action(&ccb);
767 arg = &ccb;
768 break;
770 xpt_path_inq(&ccb.cpi, periph->path);
771 arg = &ccb;
772 break;
773 default:
774 arg = NULL;
775 break;
776 }
777 periph->deferred_callback(NULL, periph->deferred_ac,
778 periph->path, arg);
779 }
780 xpt_free_path(periph->path);
781 free(periph, M_CAMPERIPH);
783}
784
785/*
786 * Map user virtual pointers into kernel virtual address space, so we can
787 * access the memory. This is now a generic function that centralizes most
788 * of the sanity checks on the data flags, if any.
789 * This also only works for up to maxphys memory. Since we use
790 * buffers to map stuff in and out, we're limited to the buffer size.
791 */
792int
794 u_int maxmap)
795{
796 int numbufs, i;
797 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
798 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
799 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
800
801 bzero(mapinfo, sizeof(*mapinfo));
802 if (maxmap == 0)
803 maxmap = DFLTPHYS; /* traditional default */
804 else if (maxmap > maxphys)
805 maxmap = maxphys; /* for safety */
806 switch(ccb->ccb_h.func_code) {
807 case XPT_DEV_MATCH:
808 if (ccb->cdm.match_buf_len == 0) {
809 printf("cam_periph_mapmem: invalid match buffer "
810 "length 0\n");
811 return(EINVAL);
812 }
813 if (ccb->cdm.pattern_buf_len > 0) {
814 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
815 lengths[0] = ccb->cdm.pattern_buf_len;
816 dirs[0] = CAM_DIR_OUT;
817 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
818 lengths[1] = ccb->cdm.match_buf_len;
819 dirs[1] = CAM_DIR_IN;
820 numbufs = 2;
821 } else {
822 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
823 lengths[0] = ccb->cdm.match_buf_len;
824 dirs[0] = CAM_DIR_IN;
825 numbufs = 1;
826 }
827 /*
828 * This request will not go to the hardware, no reason
829 * to be so strict. vmapbuf() is able to map up to maxphys.
830 */
831 maxmap = maxphys;
832 break;
833 case XPT_SCSI_IO:
836 return(0);
838 return (EINVAL);
839 data_ptrs[0] = &ccb->csio.data_ptr;
840 lengths[0] = ccb->csio.dxfer_len;
841 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
842 numbufs = 1;
843 break;
844 case XPT_ATA_IO:
846 return(0);
848 return (EINVAL);
849 data_ptrs[0] = &ccb->ataio.data_ptr;
850 lengths[0] = ccb->ataio.dxfer_len;
851 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
852 numbufs = 1;
853 break;
854 case XPT_MMC_IO:
856 return(0);
857 /* Two mappings: one for cmd->data and one for cmd->data->data */
858 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
859 lengths[0] = sizeof(struct mmc_data *);
860 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
861 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
862 lengths[1] = ccb->mmcio.cmd.data->len;
863 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
864 numbufs = 2;
865 break;
866 case XPT_SMP_IO:
867 data_ptrs[0] = &ccb->smpio.smp_request;
868 lengths[0] = ccb->smpio.smp_request_len;
869 dirs[0] = CAM_DIR_OUT;
870 data_ptrs[1] = &ccb->smpio.smp_response;
871 lengths[1] = ccb->smpio.smp_response_len;
872 dirs[1] = CAM_DIR_IN;
873 numbufs = 2;
874 break;
875 case XPT_NVME_IO:
876 case XPT_NVME_ADMIN:
878 return (0);
880 return (EINVAL);
881 data_ptrs[0] = &ccb->nvmeio.data_ptr;
882 lengths[0] = ccb->nvmeio.dxfer_len;
883 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
884 numbufs = 1;
885 break;
886 case XPT_DEV_ADVINFO:
887 if (ccb->cdai.bufsiz == 0)
888 return (0);
889
890 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
891 lengths[0] = ccb->cdai.bufsiz;
892 dirs[0] = CAM_DIR_IN;
893 numbufs = 1;
894
895 /*
896 * This request will not go to the hardware, no reason
897 * to be so strict. vmapbuf() is able to map up to maxphys.
898 */
899 maxmap = maxphys;
900 break;
901 default:
902 return(EINVAL);
903 break; /* NOTREACHED */
904 }
905
906 /*
907 * Check the transfer length and permissions first, so we don't
908 * have to unmap any previously mapped buffers.
909 */
910 for (i = 0; i < numbufs; i++) {
911 if (lengths[i] > maxmap) {
912 printf("cam_periph_mapmem: attempt to map %lu bytes, "
913 "which is greater than %lu\n",
914 (long)(lengths[i]), (u_long)maxmap);
915 return (E2BIG);
916 }
917 }
918
919 /*
920 * This keeps the kernel stack of current thread from getting
921 * swapped. In low-memory situations where the kernel stack might
922 * otherwise get swapped out, this holds it and allows the thread
923 * to make progress and release the kernel mapped pages sooner.
924 *
925 * XXX KDM should I use P_NOSWAP instead?
926 */
927 PHOLD(curproc);
928
929 for (i = 0; i < numbufs; i++) {
930 /* Save the user's data address. */
931 mapinfo->orig[i] = *data_ptrs[i];
932
933 /*
934 * For small buffers use malloc+copyin/copyout instead of
935 * mapping to KVA to avoid expensive TLB shootdowns. For
936 * small allocations malloc is backed by UMA, and so much
937 * cheaper on SMP systems.
938 */
939 if (lengths[i] <= periph_mapmem_thresh &&
941 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
942 M_WAITOK);
943 if (dirs[i] != CAM_DIR_IN) {
944 if (copyin(mapinfo->orig[i], *data_ptrs[i],
945 lengths[i]) != 0) {
946 free(*data_ptrs[i], M_CAMPERIPH);
947 *data_ptrs[i] = mapinfo->orig[i];
948 goto fail;
949 }
950 } else
951 bzero(*data_ptrs[i], lengths[i]);
952 continue;
953 }
954
955 /*
956 * Get the buffer.
957 */
958 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
959
960 /* set the direction */
961 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
962 BIO_WRITE : BIO_READ;
963
964 /* Map the buffer into kernel memory. */
965 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
966 uma_zfree(pbuf_zone, mapinfo->bp[i]);
967 goto fail;
968 }
969
970 /* set our pointer to the new mapped area */
971 *data_ptrs[i] = mapinfo->bp[i]->b_data;
972 }
973
974 /*
975 * Now that we've gotten this far, change ownership to the kernel
976 * of the buffers so that we don't run afoul of returning to user
977 * space with locks (on the buffer) held.
978 */
979 for (i = 0; i < numbufs; i++) {
980 if (mapinfo->bp[i])
981 BUF_KERNPROC(mapinfo->bp[i]);
982 }
983
984 mapinfo->num_bufs_used = numbufs;
985 return(0);
986
987fail:
988 for (i--; i >= 0; i--) {
989 if (mapinfo->bp[i]) {
990 vunmapbuf(mapinfo->bp[i]);
991 uma_zfree(pbuf_zone, mapinfo->bp[i]);
992 } else
993 free(*data_ptrs[i], M_CAMPERIPH);
994 *data_ptrs[i] = mapinfo->orig[i];
995 }
996 PRELE(curproc);
997 return(EACCES);
998}
999
1000/*
1001 * Unmap memory segments mapped into kernel virtual address space by
1002 * cam_periph_mapmem().
1003 */
1004void
1006{
1007 int numbufs, i;
1008 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1009 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
1010 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
1011
1012 if (mapinfo->num_bufs_used <= 0) {
1013 /* nothing to free and the process wasn't held. */
1014 return;
1015 }
1016
1017 switch (ccb->ccb_h.func_code) {
1018 case XPT_DEV_MATCH:
1019 if (ccb->cdm.pattern_buf_len > 0) {
1020 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1021 lengths[0] = ccb->cdm.pattern_buf_len;
1022 dirs[0] = CAM_DIR_OUT;
1023 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1024 lengths[1] = ccb->cdm.match_buf_len;
1025 dirs[1] = CAM_DIR_IN;
1026 numbufs = 2;
1027 } else {
1028 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1029 lengths[0] = ccb->cdm.match_buf_len;
1030 dirs[0] = CAM_DIR_IN;
1031 numbufs = 1;
1032 }
1033 break;
1034 case XPT_SCSI_IO:
1035 case XPT_CONT_TARGET_IO:
1036 data_ptrs[0] = &ccb->csio.data_ptr;
1037 lengths[0] = ccb->csio.dxfer_len;
1038 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1039 numbufs = 1;
1040 break;
1041 case XPT_ATA_IO:
1042 data_ptrs[0] = &ccb->ataio.data_ptr;
1043 lengths[0] = ccb->ataio.dxfer_len;
1044 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1045 numbufs = 1;
1046 break;
1047 case XPT_MMC_IO:
1048 data_ptrs[0] = (u_int8_t **)&ccb->mmcio.cmd.data;
1049 lengths[0] = sizeof(struct mmc_data *);
1050 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1051 data_ptrs[1] = (u_int8_t **)&ccb->mmcio.cmd.data->data;
1052 lengths[1] = ccb->mmcio.cmd.data->len;
1053 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1054 numbufs = 2;
1055 break;
1056 case XPT_SMP_IO:
1057 data_ptrs[0] = &ccb->smpio.smp_request;
1058 lengths[0] = ccb->smpio.smp_request_len;
1059 dirs[0] = CAM_DIR_OUT;
1060 data_ptrs[1] = &ccb->smpio.smp_response;
1061 lengths[1] = ccb->smpio.smp_response_len;
1062 dirs[1] = CAM_DIR_IN;
1063 numbufs = 2;
1064 break;
1065 case XPT_NVME_IO:
1066 case XPT_NVME_ADMIN:
1067 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1068 lengths[0] = ccb->nvmeio.dxfer_len;
1069 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1070 numbufs = 1;
1071 break;
1072 case XPT_DEV_ADVINFO:
1073 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1074 lengths[0] = ccb->cdai.bufsiz;
1075 dirs[0] = CAM_DIR_IN;
1076 numbufs = 1;
1077 break;
1078 default:
1079 /* allow ourselves to be swapped once again */
1080 PRELE(curproc);
1081 return;
1082 break; /* NOTREACHED */
1083 }
1084
1085 for (i = 0; i < numbufs; i++) {
1086 if (mapinfo->bp[i]) {
1087 /* unmap the buffer */
1088 vunmapbuf(mapinfo->bp[i]);
1089
1090 /* release the buffer */
1091 uma_zfree(pbuf_zone, mapinfo->bp[i]);
1092 } else {
1093 if (dirs[i] != CAM_DIR_OUT) {
1094 copyout(*data_ptrs[i], mapinfo->orig[i],
1095 lengths[i]);
1096 }
1097 free(*data_ptrs[i], M_CAMPERIPH);
1098 }
1099
1100 /* Set the user's pointer back to the original value */
1101 *data_ptrs[i] = mapinfo->orig[i];
1102 }
1103
1104 /* allow ourselves to be swapped once again */
1105 PRELE(curproc);
1106}
1107
1108int
1109cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1110 int (*error_routine)(union ccb *ccb,
1111 cam_flags camflags,
1112 u_int32_t sense_flags))
1113{
1114 union ccb *ccb;
1115 int error;
1116 int found;
1117
1118 error = found = 0;
1119
1120 switch(cmd){
1121 case CAMGETPASSTHRU:
1124 ccb->ccb_h.path,
1127
1128 /*
1129 * Basically, the point of this is that we go through
1130 * getting the list of devices, until we find a passthrough
1131 * device. In the current version of the CAM code, the
1132 * only way to determine what type of device we're dealing
1133 * with is by its name.
1134 */
1135 while (found == 0) {
1136 ccb->cgdl.index = 0;
1139 /* we want the next device in the list */
1140 xpt_action(ccb);
1141 if (strncmp(ccb->cgdl.periph_name,
1142 "pass", 4) == 0){
1143 found = 1;
1144 break;
1145 }
1146 }
1148 (found == 0)) {
1149 ccb->cgdl.periph_name[0] = '\0';
1150 ccb->cgdl.unit_number = 0;
1151 break;
1152 }
1153 }
1154
1155 /* copy the result back out */
1156 bcopy(ccb, addr, sizeof(union ccb));
1157
1158 /* and release the ccb */
1160
1161 break;
1162 default:
1163 error = ENOTTY;
1164 break;
1165 }
1166 return(error);
1167}
1168
1169static void
1170cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1171{
1172
1173 panic("%s: already done with ccb %p", __func__, done_ccb);
1174}
1175
1176static void
1177cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1178{
1179
1180 /* Caller will release the CCB */
1181 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1183 wakeup(&done_ccb->ccb_h.cbfcnp);
1184}
1185
1186static void
1188{
1189
1190 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1193 PRIBIO, "cbwait", 0);
1194 }
1195 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1197 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1198 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1200}
1201
1202/*
1203 * Dispatch a CCB and wait for it to complete. If the CCB has set a
1204 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1205 */
1206int
1208 int (*error_routine)(union ccb *ccb,
1209 cam_flags camflags,
1210 u_int32_t sense_flags),
1211 cam_flags camflags, u_int32_t sense_flags,
1212 struct devstat *ds)
1213{
1214 struct bintime *starttime;
1215 struct bintime ltime;
1216 int error;
1217 bool must_poll;
1218 uint32_t timeout = 1;
1219
1220 starttime = NULL;
1221 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1222 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1223 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1225
1226 /*
1227 * If the user has supplied a stats structure, and if we understand
1228 * this particular type of ccb, record the transaction start.
1229 */
1230 if (ds != NULL &&
1234 starttime = &ltime;
1235 binuptime(starttime);
1236 devstat_start_transaction(ds, starttime);
1237 }
1238
1239 /*
1240 * We must poll the I/O while we're dumping. The scheduler is normally
1241 * stopped for dumping, except when we call doadump from ddb. While the
1242 * scheduler is running in this case, we still need to poll the I/O to
1243 * avoid sleeping waiting for the ccb to complete.
1244 *
1245 * A panic triggered dump stops the scheduler, any callback from the
1246 * shutdown_post_sync event will run with the scheduler stopped, but
1247 * before we're officially dumping. To avoid hanging in adashutdown
1248 * initiated commands (or other similar situations), we have to test for
1249 * either SCHEDULER_STOPPED() here as well.
1250 *
1251 * To avoid locking problems, dumping/polling callers must call
1252 * without a periph lock held.
1253 */
1254 must_poll = dumping || SCHEDULER_STOPPED();
1256
1257 /*
1258 * If we're polling, then we need to ensure that we have ample resources
1259 * in the periph. cam_periph_error can reschedule the ccb by calling
1260 * xpt_action and returning ERESTART, so we have to effect the polling
1261 * in the do loop below.
1262 */
1263 if (must_poll) {
1265 timeout = xpt_poll_setup(ccb);
1266 else
1267 timeout = 0;
1268 }
1269
1270 if (timeout == 0) {
1272 error = EBUSY;
1273 } else {
1274 xpt_action(ccb);
1275 do {
1276 if (must_poll) {
1277 xpt_pollwait(ccb, timeout);
1278 timeout = ccb->ccb_h.timeout * 10;
1279 } else {
1281 }
1283 error = 0;
1284 else if (error_routine != NULL) {
1286 error = (*error_routine)(ccb, camflags, sense_flags);
1287 } else
1288 error = 0;
1289 } while (error == ERESTART);
1290 }
1291
1292 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1294 /* relsim_flags */0,
1295 /* openings */0,
1296 /* timeout */0,
1297 /* getcount_only */ FALSE);
1298 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1299 }
1300
1301 if (ds != NULL) {
1302 uint32_t bytes;
1303 devstat_tag_type tag;
1304 bool valid = true;
1305
1306 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1307 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1308 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1309 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1310 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1311 tag = (devstat_tag_type)0;
1312 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1313 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1314 tag = (devstat_tag_type)0;
1315 } else {
1316 valid = false;
1317 }
1318 if (valid)
1319 devstat_end_transaction(ds, bytes, tag,
1321 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1322 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1323 }
1324
1325 return(error);
1326}
1327
1328void
1330{
1331 struct ccb_hdr ccb_h;
1332
1333 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1334 memset(&ccb_h, 0, sizeof(ccb_h));
1335 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1336 ccb_h.func_code = XPT_NOOP;
1337 ccb_h.flags = CAM_DEV_QFREEZE;
1338 xpt_action((union ccb *)&ccb_h);
1339}
1340
1341u_int32_t
1342cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1343 u_int32_t openings, u_int32_t arg,
1344 int getcount_only)
1345{
1346 struct ccb_relsim crs;
1347
1348 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1349 relsim_flags, openings, arg, getcount_only));
1350 memset(&crs, 0, sizeof(crs));
1353 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1354 crs.release_flags = relsim_flags;
1355 crs.openings = openings;
1356 crs.release_timeout = arg;
1357 xpt_action((union ccb *)&crs);
1358 return (crs.qfrozen_cnt);
1359}
1360
1361#define saved_ccb_ptr ppriv_ptr0
1362static void
1363camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1364{
1365 union ccb *saved_ccb;
1366 cam_status status;
1367 struct scsi_start_stop_unit *scsi_cmd;
1368 int error = 0, error_code, sense_key, asc, ascq;
1369 u_int16_t done_flags;
1370
1371 scsi_cmd = (struct scsi_start_stop_unit *)
1372 &done_ccb->csio.cdb_io.cdb_bytes;
1373 status = done_ccb->ccb_h.status;
1374
1375 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1376 if (scsi_extract_sense_ccb(done_ccb,
1377 &error_code, &sense_key, &asc, &ascq)) {
1378 /*
1379 * If the error is "invalid field in CDB",
1380 * and the load/eject flag is set, turn the
1381 * flag off and try again. This is just in
1382 * case the drive in question barfs on the
1383 * load eject flag. The CAM code should set
1384 * the load/eject flag by default for
1385 * removable media.
1386 */
1387 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1388 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1389 (asc == 0x24) && (ascq == 0x00)) {
1390 scsi_cmd->how &= ~SSS_LOEJ;
1391 if (status & CAM_DEV_QFRZN) {
1392 cam_release_devq(done_ccb->ccb_h.path,
1393 0, 0, 0, 0);
1394 done_ccb->ccb_h.status &=
1395 ~CAM_DEV_QFRZN;
1396 }
1397 xpt_action(done_ccb);
1398 goto out;
1399 }
1400 }
1401 error = cam_periph_error(done_ccb, 0,
1403 if (error == ERESTART)
1404 goto out;
1405 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1406 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1407 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1408 }
1409 } else {
1410 /*
1411 * If we have successfully taken a device from the not
1412 * ready to ready state, re-scan the device and re-get
1413 * the inquiry information. Many devices (mostly disks)
1414 * don't properly report their inquiry information unless
1415 * they are spun up.
1416 */
1417 if (scsi_cmd->opcode == START_STOP_UNIT)
1418 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1419 }
1420
1421 /* If we tried long wait and still failed, remember that. */
1422 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1423 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1424 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1425 if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1427 }
1428
1429 /*
1430 * After recovery action(s) completed, return to the original CCB.
1431 * If the recovery CCB has failed, considering its own possible
1432 * retries and recovery, assume we are back in state where we have
1433 * been originally, but without recovery hopes left. In such case,
1434 * after the final attempt below, we cancel any further retries,
1435 * blocking by that also any new recovery attempts for this CCB,
1436 * and the result will be the final one returned to the CCB owher.
1437 */
1438
1439 /*
1440 * Copy the CCB back, preserving the alloc_flags field. Things
1441 * will crash horribly if the CCBs are not of the same size.
1442 */
1443 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1444 KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO,
1445 ("%s: saved_ccb func_code %#x != XPT_SCSI_IO",
1446 __func__, saved_ccb->ccb_h.func_code));
1447 KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO,
1448 ("%s: done_ccb func_code %#x != XPT_SCSI_IO",
1449 __func__, done_ccb->ccb_h.func_code));
1450 done_flags = done_ccb->ccb_h.alloc_flags;
1451 bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio));
1452 done_ccb->ccb_h.alloc_flags = done_flags;
1453 xpt_free_ccb(saved_ccb);
1454 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1455 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1456 if (error != 0)
1457 done_ccb->ccb_h.retry_count = 0;
1458 xpt_action(done_ccb);
1459
1460out:
1461 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1462 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1463}
1464
1465/*
1466 * Generic Async Event handler. Peripheral drivers usually
1467 * filter out the events that require personal attention,
1468 * and leave the rest to this function.
1469 */
1470void
1471cam_periph_async(struct cam_periph *periph, u_int32_t code,
1472 struct cam_path *path, void *arg)
1473{
1474 switch (code) {
1475 case AC_LOST_DEVICE:
1476 cam_periph_invalidate(periph);
1477 break;
1478 default:
1479 break;
1480 }
1481}
1482
1483void
1484cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1485{
1486 struct ccb_getdevstats cgds;
1487
1488 memset(&cgds, 0, sizeof(cgds));
1491 xpt_action((union ccb *)&cgds);
1492 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1493}
1494
1495void
1497 struct timeval* event_time, u_int duration_ms)
1498{
1499 struct timeval delta;
1500 struct timeval duration_tv;
1501
1502 if (!timevalisset(event_time))
1503 return;
1504
1505 microtime(&delta);
1506 timevalsub(&delta, event_time);
1507 duration_tv.tv_sec = duration_ms / 1000;
1508 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1509 if (timevalcmp(&delta, &duration_tv, <)) {
1510 timevalsub(&duration_tv, &delta);
1511
1512 duration_ms = duration_tv.tv_sec * 1000;
1513 duration_ms += duration_tv.tv_usec / 1000;
1514 cam_freeze_devq(periph->path);
1515 cam_release_devq(periph->path,
1517 /*reduction*/0,
1518 /*timeout*/duration_ms,
1519 /*getcount_only*/0);
1520 }
1521
1522}
1523
1524static int
1525camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1526 cam_flags camflags, u_int32_t sense_flags,
1527 int *openings, u_int32_t *relsim_flags,
1528 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1529{
1530 struct cam_periph *periph;
1531 int error;
1532
1533 switch (ccb->csio.scsi_status) {
1534 case SCSI_STATUS_OK:
1538 error = 0;
1539 break;
1542 error = camperiphscsisenseerror(ccb, orig_ccb,
1543 camflags,
1544 sense_flags,
1545 openings,
1546 relsim_flags,
1547 timeout,
1548 action,
1549 action_string);
1550 break;
1552 {
1553 /* no decrement */
1554 struct ccb_getdevstats cgds;
1555
1556 /*
1557 * First off, find out what the current
1558 * transaction counts are.
1559 */
1560 memset(&cgds, 0, sizeof(cgds));
1561 xpt_setup_ccb(&cgds.ccb_h,
1562 ccb->ccb_h.path,
1565 xpt_action((union ccb *)&cgds);
1566
1567 /*
1568 * If we were the only transaction active, treat
1569 * the QUEUE FULL as if it were a BUSY condition.
1570 */
1571 if (cgds.dev_active != 0) {
1572 int total_openings;
1573
1574 /*
1575 * Reduce the number of openings to
1576 * be 1 less than the amount it took
1577 * to get a queue full bounded by the
1578 * minimum allowed tag count for this
1579 * device.
1580 */
1581 total_openings = cgds.dev_active + cgds.dev_openings;
1582 *openings = cgds.dev_active;
1583 if (*openings < cgds.mintags)
1584 *openings = cgds.mintags;
1585 if (*openings < total_openings)
1586 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1587 else {
1588 /*
1589 * Some devices report queue full for
1590 * temporary resource shortages. For
1591 * this reason, we allow a minimum
1592 * tag count to be entered via a
1593 * quirk entry to prevent the queue
1594 * count on these devices from falling
1595 * to a pessimisticly low value. We
1596 * still wait for the next successful
1597 * completion, however, before queueing
1598 * more transactions to the device.
1599 */
1600 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1601 }
1602 *timeout = 0;
1603 error = ERESTART;
1604 *action &= ~SSQ_PRINT_SENSE;
1605 break;
1606 }
1607 /* FALLTHROUGH */
1608 }
1609 case SCSI_STATUS_BUSY:
1610 /*
1611 * Restart the queue after either another
1612 * command completes or a 1 second timeout.
1613 */
1614 periph = xpt_path_periph(ccb->ccb_h.path);
1615 if (periph->flags & CAM_PERIPH_INVALID) {
1616 error = EIO;
1617 *action_string = "Periph was invalidated";
1618 } else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1619 ccb->ccb_h.retry_count > 0) {
1620 if ((sense_flags & SF_RETRY_BUSY) == 0)
1622 error = ERESTART;
1623 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1625 *timeout = 1000;
1626 } else {
1627 error = EIO;
1628 *action_string = "Retries exhausted";
1629 }
1630 break;
1632 default:
1633 error = EIO;
1634 break;
1635 }
1636 return (error);
1637}
1638
1639static int
1640camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1641 cam_flags camflags, u_int32_t sense_flags,
1642 int *openings, u_int32_t *relsim_flags,
1643 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1644{
1645 struct cam_periph *periph;
1646 union ccb *orig_ccb = ccb;
1647 int error, recoveryccb;
1648 u_int16_t flags;
1649
1650#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1651 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1652 biotrack(ccb->csio.bio, __func__);
1653#endif
1654
1655 periph = xpt_path_periph(ccb->ccb_h.path);
1656 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1657 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1658 /*
1659 * If error recovery is already in progress, don't attempt
1660 * to process this error, but requeue it unconditionally
1661 * and attempt to process it once error recovery has
1662 * completed. This failed command is probably related to
1663 * the error that caused the currently active error recovery
1664 * action so our current recovery efforts should also
1665 * address this command. Be aware that the error recovery
1666 * code assumes that only one recovery action is in progress
1667 * on a particular peripheral instance at any given time
1668 * (e.g. only one saved CCB for error recovery) so it is
1669 * imperitive that we don't violate this assumption.
1670 */
1671 error = ERESTART;
1672 *action &= ~SSQ_PRINT_SENSE;
1673 } else {
1674 scsi_sense_action err_action;
1675 struct ccb_getdev cgd;
1676
1677 /*
1678 * Grab the inquiry data for this device.
1679 */
1680 memset(&cgd, 0, sizeof(cgd));
1683 xpt_action((union ccb *)&cgd);
1684
1685 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1686 sense_flags);
1687 error = err_action & SS_ERRMASK;
1688
1689 /*
1690 * Do not autostart sequential access devices
1691 * to avoid unexpected tape loading.
1692 */
1693 if ((err_action & SS_MASK) == SS_START &&
1694 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1695 *action_string = "Will not autostart a "
1696 "sequential access device";
1697 goto sense_error_done;
1698 }
1699
1700 /*
1701 * Avoid recovery recursion if recovery action is the same.
1702 */
1703 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1704 if (((err_action & SS_MASK) == SS_START &&
1706 ((err_action & SS_MASK) == SS_TUR &&
1708 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1709 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1710 *timeout = 500;
1711 }
1712 }
1713
1714 /*
1715 * If the recovery action will consume a retry,
1716 * make sure we actually have retries available.
1717 */
1718 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1719 if (ccb->ccb_h.retry_count > 0 &&
1720 (periph->flags & CAM_PERIPH_INVALID) == 0)
1722 else {
1723 *action_string = "Retries exhausted";
1724 goto sense_error_done;
1725 }
1726 }
1727
1728 if ((err_action & SS_MASK) >= SS_START) {
1729 /*
1730 * Do common portions of commands that
1731 * use recovery CCBs.
1732 */
1733 orig_ccb = xpt_alloc_ccb_nowait();
1734 if (orig_ccb == NULL) {
1735 *action_string = "Can't allocate recovery CCB";
1736 goto sense_error_done;
1737 }
1738 /*
1739 * Clear freeze flag for original request here, as
1740 * this freeze will be dropped as part of ERESTART.
1741 */
1742 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1743
1744 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO,
1745 ("%s: ccb func_code %#x != XPT_SCSI_IO",
1746 __func__, ccb->ccb_h.func_code));
1747 flags = orig_ccb->ccb_h.alloc_flags;
1748 bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio));
1749 orig_ccb->ccb_h.alloc_flags = flags;
1750 }
1751
1752 switch (err_action & SS_MASK) {
1753 case SS_NOP:
1754 *action_string = "No recovery action needed";
1755 error = 0;
1756 break;
1757 case SS_RETRY:
1758 *action_string = "Retrying command (per sense data)";
1759 error = ERESTART;
1760 break;
1761 case SS_FAIL:
1762 *action_string = "Unretryable error";
1763 break;
1764 case SS_START:
1765 {
1766 int le;
1767
1768 /*
1769 * Send a start unit command to the device, and
1770 * then retry the command.
1771 */
1772 *action_string = "Attempting to start unit";
1774
1775 /*
1776 * Check for removable media and set
1777 * load/eject flag appropriately.
1778 */
1779 if (SID_IS_REMOVABLE(&cgd.inq_data))
1780 le = TRUE;
1781 else
1782 le = FALSE;
1783
1785 /*retries*/1,
1788 /*start*/TRUE,
1789 /*load/eject*/le,
1790 /*immediate*/FALSE,
1792 /*timeout*/50000);
1793 break;
1794 }
1795 case SS_TUR:
1796 {
1797 /*
1798 * Send a Test Unit Ready to the device.
1799 * If the 'many' flag is set, we send 120
1800 * test unit ready commands, one every half
1801 * second. Otherwise, we just send one TUR.
1802 * We only want to do this if the retry
1803 * count has not been exhausted.
1804 */
1805 int retries;
1806
1807 if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1810 *action_string = "Polling device for readiness";
1811 retries = 120;
1812 } else {
1813 *action_string = "Testing device for readiness";
1814 retries = 1;
1815 }
1818 retries,
1822 /*timeout*/5000);
1823
1824 /*
1825 * Accomplish our 500ms delay by deferring
1826 * the release of our device queue appropriately.
1827 */
1828 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1829 *timeout = 500;
1830 break;
1831 }
1832 default:
1833 panic("Unhandled error action %x", err_action);
1834 }
1835
1836 if ((err_action & SS_MASK) >= SS_START) {
1837 /*
1838 * Drop the priority, so that the recovery
1839 * CCB is the first to execute. Freeze the queue
1840 * after this command is sent so that we can
1841 * restore the old csio and have it queued in
1842 * the proper order before we release normal
1843 * transactions to the device.
1844 */
1847 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1848 error = ERESTART;
1849 *orig = orig_ccb;
1850 }
1851
1852sense_error_done:
1853 *action = err_action;
1854 }
1855 return (error);
1856}
1857
1858/*
1859 * Generic error handler. Peripheral drivers usually filter
1860 * out the errors that they handle in a unique manner, then
1861 * call this function.
1862 */
1863int
1865 u_int32_t sense_flags)
1866{
1867 struct cam_path *newpath;
1868 union ccb *orig_ccb, *scan_ccb;
1869 struct cam_periph *periph;
1870 const char *action_string;
1871 cam_status status;
1872 int frozen, error, openings, devctl_err;
1873 u_int32_t action, relsim_flags, timeout;
1874
1875 action = SSQ_PRINT_SENSE;
1876 periph = xpt_path_periph(ccb->ccb_h.path);
1877 action_string = NULL;
1878 status = ccb->ccb_h.status;
1879 frozen = (status & CAM_DEV_QFRZN) != 0;
1880 status &= CAM_STATUS_MASK;
1881 devctl_err = openings = relsim_flags = timeout = 0;
1882 orig_ccb = ccb;
1883
1884 /* Filter the errors that should be reported via devctl */
1885 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1886 case CAM_CMD_TIMEOUT:
1887 case CAM_REQ_ABORTED:
1888 case CAM_REQ_CMP_ERR:
1889 case CAM_REQ_TERMIO:
1891 case CAM_DATA_RUN_ERR:
1895 devctl_err++;
1896 break;
1897 default:
1898 break;
1899 }
1900
1901 switch (status) {
1902 case CAM_REQ_CMP:
1903 error = 0;
1904 action &= ~SSQ_PRINT_SENSE;
1905 break;
1907 error = camperiphscsistatuserror(ccb, &orig_ccb,
1908 camflags, sense_flags, &openings, &relsim_flags,
1909 &timeout, &action, &action_string);
1910 break;
1911 case CAM_AUTOSENSE_FAIL:
1912 error = EIO; /* we have to kill the command */
1913 break;
1914 case CAM_UA_ABORT:
1915 case CAM_UA_TERMIO:
1916 case CAM_MSG_REJECT_REC:
1917 /* XXX Don't know that these are correct */
1918 error = EIO;
1919 break;
1920 case CAM_SEL_TIMEOUT:
1921 if ((camflags & CAM_RETRY_SELTO) != 0) {
1922 if (ccb->ccb_h.retry_count > 0 &&
1923 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1925 error = ERESTART;
1926
1927 /*
1928 * Wait a bit to give the device
1929 * time to recover before we try again.
1930 */
1931 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1932 timeout = periph_selto_delay;
1933 break;
1934 }
1935 action_string = "Retries exhausted";
1936 }
1937 /* FALLTHROUGH */
1938 case CAM_DEV_NOT_THERE:
1939 error = ENXIO;
1940 action = SSQ_LOST;
1941 break;
1942 case CAM_REQ_INVALID:
1943 case CAM_PATH_INVALID:
1944 case CAM_NO_HBA:
1945 case CAM_PROVIDE_FAIL:
1946 case CAM_REQ_TOO_BIG:
1947 case CAM_LUN_INVALID:
1948 case CAM_TID_INVALID:
1949 case CAM_FUNC_NOTAVAIL:
1950 error = EINVAL;
1951 break;
1952 case CAM_SCSI_BUS_RESET:
1953 case CAM_BDR_SENT:
1954 /*
1955 * Commands that repeatedly timeout and cause these
1956 * kinds of error recovery actions, should return
1957 * CAM_CMD_TIMEOUT, which allows us to safely assume
1958 * that this command was an innocent bystander to
1959 * these events and should be unconditionally
1960 * retried.
1961 */
1962 case CAM_REQUEUE_REQ:
1963 /* Unconditional requeue if device is still there */
1964 if (periph->flags & CAM_PERIPH_INVALID) {
1965 action_string = "Periph was invalidated";
1966 error = EIO;
1967 } else if (sense_flags & SF_NO_RETRY) {
1968 error = EIO;
1969 action_string = "Retry was blocked";
1970 } else {
1971 error = ERESTART;
1972 action &= ~SSQ_PRINT_SENSE;
1973 }
1974 break;
1975 case CAM_RESRC_UNAVAIL:
1976 /* Wait a bit for the resource shortage to abate. */
1977 timeout = periph_noresrc_delay;
1978 /* FALLTHROUGH */
1979 case CAM_BUSY:
1980 if (timeout == 0) {
1981 /* Wait a bit for the busy condition to abate. */
1982 timeout = periph_busy_delay;
1983 }
1984 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1985 /* FALLTHROUGH */
1987 case CAM_REQ_CMP_ERR:
1988 case CAM_CMD_TIMEOUT:
1989 case CAM_UNEXP_BUSFREE:
1990 case CAM_UNCOR_PARITY:
1991 case CAM_DATA_RUN_ERR:
1992 default:
1993 if (periph->flags & CAM_PERIPH_INVALID) {
1994 error = EIO;
1995 action_string = "Periph was invalidated";
1996 } else if (ccb->ccb_h.retry_count == 0) {
1997 error = EIO;
1998 action_string = "Retries exhausted";
1999 } else if (sense_flags & SF_NO_RETRY) {
2000 error = EIO;
2001 action_string = "Retry was blocked";
2002 } else {
2004 error = ERESTART;
2005 }
2006 break;
2007 }
2008
2009 if ((sense_flags & SF_PRINT_ALWAYS) ||
2011 action |= SSQ_PRINT_SENSE;
2012 else if (sense_flags & SF_NO_PRINT)
2013 action &= ~SSQ_PRINT_SENSE;
2014 if ((action & SSQ_PRINT_SENSE) != 0)
2016 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2017 if (error != ERESTART) {
2018 if (action_string == NULL)
2019 action_string = "Unretryable error";
2020 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2021 error, action_string);
2022 } else if (action_string != NULL)
2023 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2024 else {
2026 "Retrying command, %d more tries remain\n",
2028 }
2029 }
2030
2031 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2032 cam_periph_devctl_notify(orig_ccb);
2033
2034 if ((action & SSQ_LOST) != 0) {
2035 lun_id_t lun_id;
2036
2037 /*
2038 * For a selection timeout, we consider all of the LUNs on
2039 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
2040 * then we only get rid of the device(s) specified by the
2041 * path in the original CCB.
2042 */
2043 if (status == CAM_SEL_TIMEOUT)
2044 lun_id = CAM_LUN_WILDCARD;
2045 else
2046 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2047
2048 /* Should we do more if we can't create the path?? */
2049 if (xpt_create_path(&newpath, periph,
2052 lun_id) == CAM_REQ_CMP) {
2053 /*
2054 * Let peripheral drivers know that this
2055 * device has gone away.
2056 */
2057 xpt_async(AC_LOST_DEVICE, newpath, NULL);
2058 xpt_free_path(newpath);
2059 }
2060 }
2061
2062 /* Broadcast UNIT ATTENTIONs to all periphs. */
2063 if ((action & SSQ_UA) != 0)
2064 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2065
2066 /* Rescan target on "Reported LUNs data has changed" */
2067 if ((action & SSQ_RESCAN) != 0) {
2068 if (xpt_create_path(&newpath, NULL,
2072 scan_ccb = xpt_alloc_ccb_nowait();
2073 if (scan_ccb != NULL) {
2074 scan_ccb->ccb_h.path = newpath;
2075 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2076 scan_ccb->crcn.flags = 0;
2077 xpt_rescan(scan_ccb);
2078 } else {
2079 xpt_print(newpath,
2080 "Can't allocate CCB to rescan target\n");
2081 xpt_free_path(newpath);
2082 }
2083 }
2084 }
2085
2086 /* Attempt a retry */
2087 if (error == ERESTART || error == 0) {
2088 if (frozen != 0)
2089 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2090 if (error == ERESTART)
2091 xpt_action(ccb);
2092 if (frozen != 0)
2094 relsim_flags,
2095 openings,
2096 timeout,
2097 /*getcount_only*/0);
2098 }
2099
2100 return (error);
2101}
2102
2103#define CAM_PERIPH_DEVD_MSG_SIZE 256
2104
2105static void
2107{
2108 struct cam_periph *periph;
2109 struct ccb_getdev *cgd;
2110 struct sbuf sb;
2111 int serr, sk, asc, ascq;
2112 char *sbmsg, *type;
2113
2114 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2115 if (sbmsg == NULL)
2116 return;
2117
2118 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2119
2120 periph = xpt_path_periph(ccb->ccb_h.path);
2121 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2122 periph->unit_number);
2123
2124 sbuf_printf(&sb, "serial=\"");
2125 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2129 xpt_action((union ccb *)cgd);
2130
2131 if (cgd->ccb_h.status == CAM_REQ_CMP)
2132 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2133 xpt_free_ccb((union ccb *)cgd);
2134 }
2135 sbuf_printf(&sb, "\" ");
2136 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2137
2138 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2139 case CAM_CMD_TIMEOUT:
2140 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2141 type = "timeout";
2142 break;
2144 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2145 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2146 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2147 serr, sk, asc, ascq);
2148 type = "error";
2149 break;
2151 sbuf_printf(&sb, "RES=\"");
2152 ata_res_sbuf(&ccb->ataio.res, &sb);
2153 sbuf_printf(&sb, "\" ");
2154 type = "error";
2155 break;
2156 default:
2157 type = "error";
2158 break;
2159 }
2160
2161 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2162 sbuf_printf(&sb, "CDB=\"");
2164 sbuf_printf(&sb, "\" ");
2165 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2166 sbuf_printf(&sb, "ACB=\"");
2167 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2168 sbuf_printf(&sb, "\" ");
2169 }
2170
2171 if (sbuf_finish(&sb) == 0)
2172 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2173 sbuf_delete(&sb);
2174 free(sbmsg, M_CAMPERIPH);
2175}
2176
2177/*
2178 * Sysctl to force an invalidation of the drive right now. Can be
2179 * called with CTLFLAG_MPSAFE since we take periph lock.
2180 */
2181int
2183{
2184 struct cam_periph *periph;
2185 int error, value;
2186
2187 periph = arg1;
2188 value = 0;
2189 error = sysctl_handle_int(oidp, &value, 0, req);
2190 if (error != 0 || req->newptr == NULL || value != 1)
2191 return (error);
2192
2193 cam_periph_lock(periph);
2194 cam_periph_invalidate(periph);
2195 cam_periph_unlock(periph);
2196
2197 return (0);
2198}
int ata_res_sbuf(struct ata_res *res, struct sbuf *sb)
Definition: ata_all.c:368
void ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb)
Definition: ata_all.c:333
void cam_error_print(union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags)
Definition: cam.c:510
@ CAM_EPF_ALL
Definition: cam.h:324
@ SF_PRINT_ALWAYS
Definition: cam.h:125
@ SF_RETRY_UA
Definition: cam.h:122
@ SF_RETRY_BUSY
Definition: cam.h:128
@ SF_NO_RETRY
Definition: cam.h:127
@ SF_NO_PRINT
Definition: cam.h:123
cam_flags
Definition: cam.h:115
@ CAM_RETRY_SELTO
Definition: cam.h:118
#define CAM_LUN_WILDCARD
Definition: cam.h:49
#define CAM_PRIORITY_NORMAL
Definition: cam.h:92
#define CAM_PRIORITY_NONE
Definition: cam.h:93
#define CAM_UNQUEUED_INDEX
Definition: cam.h:96
u_int path_id_t
Definition: cam.h:42
@ CAM_ESF_ALL
Definition: cam.h:317
cam_status
Definition: cam.h:132
@ CAM_ATA_STATUS_ERROR
Definition: cam.h:225
@ CAM_PATH_INVALID
Definition: cam.h:155
@ CAM_SCSI_BUS_RESET
Definition: cam.h:176
@ CAM_REQ_INVALID
Definition: cam.h:152
@ CAM_REQ_INPROG
Definition: cam.h:134
@ CAM_BUSY
Definition: cam.h:149
@ CAM_AUTOSENSE_FAIL
Definition: cam.h:182
@ CAM_PROVIDE_FAIL
Definition: cam.h:200
@ CAM_UA_TERMIO
Definition: cam.h:161
@ CAM_REQ_CMP
Definition: cam.h:137
@ CAM_REQUEUE_REQ
Definition: cam.h:222
@ CAM_NO_HBA
Definition: cam.h:185
@ CAM_DEV_NOT_THERE
Definition: cam.h:158
@ CAM_CMD_TIMEOUT
Definition: cam.h:167
@ CAM_UA_ABORT
Definition: cam.h:143
@ CAM_RESRC_UNAVAIL
Definition: cam.h:247
@ CAM_UNCOR_PARITY
Definition: cam.h:179
@ CAM_REQ_TOO_BIG
Definition: cam.h:212
@ CAM_REQ_CMP_ERR
Definition: cam.h:146
@ CAM_STATUS_MASK
Definition: cam.h:302
@ CAM_MSG_REJECT_REC
Definition: cam.h:173
@ CAM_REQ_ABORTED
Definition: cam.h:140
@ CAM_LUN_INVALID
Definition: cam.h:259
@ CAM_SMP_STATUS_ERROR
Definition: cam.h:231
@ CAM_FUNC_NOTAVAIL
Definition: cam.h:265
@ CAM_REQ_TERMIO
Definition: cam.h:206
@ CAM_UNREC_HBA_ERROR
Definition: cam.h:209
@ CAM_UNEXP_BUSFREE
Definition: cam.h:191
@ CAM_SCSI_STATUS_ERROR
Definition: cam.h:170
@ CAM_BDR_SENT
Definition: cam.h:203
@ CAM_TID_INVALID
Definition: cam.h:262
@ CAM_SEL_TIMEOUT
Definition: cam.h:164
@ CAM_DATA_RUN_ERR
Definition: cam.h:188
@ CAM_DEV_QFRZN
Definition: cam.h:287
u_int target_id_t
Definition: cam.h:43
u_int64_t lun_id_t
Definition: cam.h:44
@ CAM_GDEVLIST_MORE_DEVS
Definition: cam_ccb.h:411
@ CAM_GDEVLIST_LAST_DEVICE
Definition: cam_ccb.h:409
ac_code
Definition: cam_ccb.h:866
@ AC_FOUND_DEVICE
Definition: cam_ccb.h:874
@ AC_UNIT_ATTENTION
Definition: cam_ccb.h:867
@ AC_PATH_REGISTERED
Definition: cam_ccb.h:876
@ AC_INQ_CHANGED
Definition: cam_ccb.h:871
@ AC_LOST_DEVICE
Definition: cam_ccb.h:873
#define RELSIM_RELEASE_AFTER_TIMEOUT
Definition: cam_ccb.h:841
#define RELSIM_ADJUST_OPENINGS
Definition: cam_ccb.h:840
void ac_callback_t(void *softc, u_int32_t code, struct cam_path *path, void *args)
Definition: cam_ccb.h:883
@ CAM_DIR_IN
Definition: cam_ccb.h:79
@ CAM_DIR_NONE
Definition: cam_ccb.h:81
@ CAM_DIR_MASK
Definition: cam_ccb.h:82
@ CAM_DEV_QFREEZE
Definition: cam_ccb.h:92
@ CAM_DATA_VADDR
Definition: cam_ccb.h:83
@ CAM_UNLOCKED
Definition: cam_ccb.h:119
@ CAM_DATA_MASK
Definition: cam_ccb.h:88
@ CAM_DIR_OUT
Definition: cam_ccb.h:80
@ XPT_DEV_ADVINFO
Definition: cam_ccb.h:166
@ XPT_GDEV_STATS
Definition: cam_ccb.h:164
@ XPT_NVME_ADMIN
Definition: cam_ccb.h:227
@ XPT_FC_QUEUED
Definition: cam_ccb.h:131
@ XPT_SCSI_IO
Definition: cam_ccb.h:141
@ XPT_SCAN_TGT
Definition: cam_ccb.h:223
@ XPT_NOOP
Definition: cam_ccb.h:139
@ XPT_MMC_IO
Definition: cam_ccb.h:220
@ XPT_NVME_IO
Definition: cam_ccb.h:217
@ XPT_GDEVLIST
Definition: cam_ccb.h:145
@ XPT_REL_SIMQ
Definition: cam_ccb.h:149
@ XPT_ATA_IO
Definition: cam_ccb.h:199
@ XPT_SMP_IO
Definition: cam_ccb.h:214
@ XPT_GDEV_TYPE
Definition: cam_ccb.h:143
@ XPT_DEV_MATCH
Definition: cam_ccb.h:158
@ XPT_CONT_TARGET_IO
Definition: cam_ccb.h:243
static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb)
Definition: cam_ccb.h:782
#define RELSIM_RELEASE_AFTER_CMDCMPLT
Definition: cam_ccb.h:842
@ CAM_DEBUG_TRACE
Definition: cam_debug.h:41
@ CAM_DEBUG_INFO
Definition: cam_debug.h:40
#define CAM_DEBUG(path, flag, printfargs)
Definition: cam_debug.h:93
#define CAM_DEBUGGED(path, flag)
Definition: cam_debug.h:87
void cam_periph_release_locked(struct cam_periph *periph)
Definition: cam_periph.c:453
int cam_periph_list(struct cam_path *path, struct sbuf *sb)
Definition: cam_periph.c:370
static u_int periph_mapmem_thresh
Definition: cam_periph.c:110
void periphdriver_init(int level)
Definition: cam_periph.c:184
static u_int camperiphunit(struct periph_driver *p_drv, path_id_t pathid, target_id_t target, lun_id_t lun, const char *sn)
Definition: cam_periph.c:600
void cam_freeze_devq(struct cam_path *path)
Definition: cam_periph.c:1329
int periphdriver_unregister(void *data)
Definition: cam_periph.c:152
struct cam_periph * cam_periph_find(struct cam_path *path, char *name)
Definition: cam_periph.c:340
static int periph_noresrc_delay
Definition: cam_periph.c:105
u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t openings, u_int32_t arg, int getcount_only)
Definition: cam_periph.c:1342
static void cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
Definition: cam_periph.c:1170
static void cam_periph_ccbwait(union ccb *ccb)
Definition: cam_periph.c:1187
static void cam_periph_devctl_notify(union ccb *ccb)
Definition: cam_periph.c:2106
void periphdriver_register(void *data)
Definition: cam_periph.c:115
static int nperiph_drivers
Definition: cam_periph.c:97
TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay)
void cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
Definition: cam_periph.c:1005
int cam_periph_acquire(struct cam_periph *periph)
Definition: cam_periph.c:413
SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN, &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping")
void cam_periph_doacquire(struct cam_periph *periph)
Definition: cam_periph.c:432
static int periph_selto_delay
Definition: cam_periph.c:103
#define CAM_PERIPH_DEVD_MSG_SIZE
Definition: cam_periph.c:2103
static int periph_busy_delay
Definition: cam_periph.c:107
void cam_periph_freeze_after_event(struct cam_periph *periph, struct timeval *event_time, u_int duration_ms)
Definition: cam_periph.c:1496
void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg)
Definition: cam_periph.c:1471
void cam_periph_release(struct cam_periph *periph)
Definition: cam_periph.c:465
static void camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
Definition: cam_periph.c:1363
void cam_periph_unhold(struct cam_periph *periph)
Definition: cam_periph.c:519
static int camperiphscsisenseerror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string)
Definition: cam_periph.c:1640
__FBSDID("$FreeBSD$")
static u_int camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired, path_id_t pathid, target_id_t target, lun_id_t lun)
Definition: cam_periph.c:542
static void cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
Definition: cam_periph.c:1177
int cam_periph_hold(struct cam_periph *periph, int priority)
Definition: cam_periph.c:486
int cam_periph_runccb(union ccb *ccb, int(*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds)
Definition: cam_periph.c:1207
static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers")
int cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
Definition: cam_periph.c:2182
void cam_periph_invalidate(struct cam_periph *periph)
Definition: cam_periph.c:655
struct periph_driver ** periph_drivers
Definition: cam_periph.c:99
int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)
Definition: cam_periph.c:1864
static void camperiphfree(struct cam_periph *periph)
Definition: cam_periph.c:684
int cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, u_int maxmap)
Definition: cam_periph.c:793
void cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
Definition: cam_periph.c:1484
void cam_periph_release_locked_buses(struct cam_periph *periph)
Definition: cam_periph.c:443
int cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, int(*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags))
Definition: cam_periph.c:1109
static int initialized
Definition: cam_periph.c:98
cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *path, ac_callback_t *ac_callback, ac_code code, void *arg)
Definition: cam_periph.c:197
static int camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, cam_flags camflags, u_int32_t sense_flags, int *openings, u_int32_t *relsim_flags, u_int32_t *timeout, u_int32_t *action, const char **action_string)
Definition: cam_periph.c:1525
cam_status periph_ctor_t(struct cam_periph *periph, void *arg)
Definition: cam_periph.h:115
#define CAM_PERIPH_INVALID
Definition: cam_periph.h:133
void periph_oninv_t(struct cam_periph *periph)
Definition: cam_periph.h:117
#define cam_periph_assert(periph, what)
Definition: cam_periph.h:230
#define CAM_PERIPH_RECOVERY_INPROG
Definition: cam_periph.h:135
#define CAM_PERIPH_LOCK_WANTED
Definition: cam_periph.h:132
#define cam_periph_lock(periph)
Definition: cam_periph.h:224
#define CAM_PERIPH_MAXMAPS
Definition: cam_periph.h:155
#define CAM_PERIPH_DRV_EARLY
Definition: cam_periph.h:99
union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
Definition: cam_xpt.c:4695
#define CAM_PERIPH_NEW_DEV_FOUND
Definition: cam_periph.h:134
#define cam_periph_unlock(periph)
Definition: cam_periph.h:227
static __inline struct mtx * cam_periph_mtx(struct cam_periph *periph)
Definition: cam_periph.h:213
#define CAM_PERIPH_LOCKED
Definition: cam_periph.h:131
void periph_dtor_t(struct cam_periph *periph)
Definition: cam_periph.h:118
#define CAM_PERIPH_RECOVERY_WAIT_FAILED
Definition: cam_periph.h:140
void periph_start_t(struct cam_periph *periph, union ccb *start_ccb)
Definition: cam_periph.h:113
#define CAM_PERIPH_ANNOUNCED
Definition: cam_periph.h:138
#define CAM_PERIPH_FREE
Definition: cam_periph.h:137
#define cam_periph_sleep(periph, chan, priority, wmesg, timo)
Definition: cam_periph.h:233
cam_periph_type
Definition: cam_periph.h:103
#define CAM_PERIPH_RECOVERY_WAIT
Definition: cam_periph.h:139
static __inline bool cam_sim_pollable(const struct cam_sim *sim)
Definition: cam_sim.h:139
void xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
Definition: cam_xpt.c:1203
cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
Definition: cam_xpt.c:3527
int32_t xpt_add_periph(struct cam_periph *periph)
Definition: cam_xpt.c:1014
void xpt_unlock_buses(void)
Definition: cam_xpt.c:5322
void xpt_print(struct cam_path *path, const char *fmt,...)
Definition: cam_xpt.c:3814
void xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
Definition: cam_xpt.c:3214
void xpt_rescan(union ccb *ccb)
Definition: cam_xpt.c:840
int xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
Definition: cam_xpt.c:3718
path_id_t xpt_path_path_id(struct cam_path *path)
Definition: cam_xpt.c:3880
void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
Definition: cam_xpt.c:4350
void xpt_lock_buses(void)
Definition: cam_xpt.c:5316
void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
Definition: cam_xpt.c:3520
void xpt_action(union ccb *start_ccb)
Definition: cam_xpt.c:2601
uint32_t xpt_poll_setup(union ccb *start_ccb)
Definition: cam_xpt.c:3180
lun_id_t xpt_path_lun_id(struct cam_path *path)
Definition: cam_xpt.c:3895
struct cam_sim * xpt_path_sim(struct cam_path *path)
Definition: cam_xpt.c:3904
union ccb * xpt_alloc_ccb_nowait(void)
Definition: cam_xpt.c:4621
void xpt_free_path(struct cam_path *path)
Definition: cam_xpt.c:3672
struct cam_periph * xpt_path_periph(struct cam_path *path)
Definition: cam_xpt.c:3911
void xpt_release_ccb(union ccb *free_ccb)
Definition: cam_xpt.c:3924
target_id_t xpt_path_target_id(struct cam_path *path)
Definition: cam_xpt.c:3886
void xpt_free_ccb(union ccb *free_ccb)
Definition: cam_xpt.c:4630
void xpt_remove_periph(struct cam_periph *periph)
Definition: cam_xpt.c:1034
#define xpt_path_assert(path, what)
Definition: cam_xpt.h:130
static void xpt_path_inq(struct ccb_pathinq *cpi, struct cam_path *path)
Definition: cam_xpt.h:156
#define xpt_path_sleep(path, chan, priority, wmesg, timo)
Definition: cam_xpt.h:132
union ccb * ccb
Definition: mmc_sim_if.m:53
void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb)
Definition: scsi_all.c:3514
int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq)
Definition: scsi_all.c:5214
void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:7547
void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8901
scsi_sense_action scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags)
Definition: scsi_all.c:3394
#define SID_IS_REMOVABLE(inq_data)
Definition: scsi_all.h:2217
#define SCSI_STATUS_BUSY
Definition: scsi_all.h:3694
#define START_STOP_UNIT
Definition: scsi_all.h:2088
#define SCSI_STATUS_CMD_TERMINATED
Definition: scsi_all.h:3698
#define SCSI_STATUS_CHECK_COND
Definition: scsi_all.h:3692
#define SCSI_STATUS_INTERMED
Definition: scsi_all.h:3695
#define SSS_LOEJ
Definition: scsi_all.h:1420
@ SSQ_MANY
Definition: scsi_all.h:84
@ SSQ_UA
Definition: scsi_all.h:92
@ SSQ_DECREMENT_COUNT
Definition: scsi_all.h:83
@ SSQ_PRINT_SENSE
Definition: scsi_all.h:91
@ SSQ_LOST
Definition: scsi_all.h:94
@ SSQ_RESCAN
Definition: scsi_all.h:93
#define SCSI_STATUS_COND_MET
Definition: scsi_all.h:3693
scsi_sense_action
Definition: scsi_all.h:68
@ SS_RETRY
Definition: scsi_all.h:70
@ SS_TUR
Definition: scsi_all.h:75
@ SS_START
Definition: scsi_all.h:72
@ SS_NOP
Definition: scsi_all.h:69
@ SS_FAIL
Definition: scsi_all.h:71
@ SS_MASK
Definition: scsi_all.h:78
#define SCSI_STATUS_OK
Definition: scsi_all.h:3691
#define SID_TYPE(inq_data)
Definition: scsi_all.h:2206
#define SCSI_STATUS_RESERV_CONFLICT
Definition: scsi_all.h:3697
#define TEST_UNIT_READY
Definition: scsi_all.h:2081
#define SS_ERRMASK
Definition: scsi_all.h:99
#define T_SEQUENTIAL
Definition: scsi_all.h:2171
#define SSD_FULL_SIZE
Definition: scsi_all.h:3251
#define SCSI_STATUS_INTERMED_COND_MET
Definition: scsi_all.h:3696
#define SCSI_STATUS_QUEUE_FULL
Definition: scsi_all.h:3699
#define MSG_SIMPLE_Q_TAG
Definition: scsi_message.h:35
#define CAMGETPASSTHRU
Definition: scsi_pass.h:42
struct cam_sim * sim
u_int8_t * serial_num
struct cam_ed * device
struct cam_eb * bus
void * orig[CAM_PERIPH_MAXMAPS]
Definition: cam_periph.h:159
struct buf * bp[CAM_PERIPH_MAXMAPS]
Definition: cam_periph.h:160
periph_start_t * periph_start
Definition: cam_periph.h:120
cam_periph_type type
Definition: cam_periph.h:128
char * periph_name
Definition: cam_periph.h:123
int periph_allocating
Definition: cam_periph.h:143
u_int32_t unit_number
Definition: cam_periph.h:127
u_int32_t refcount
Definition: cam_periph.h:145
periph_dtor_t * periph_dtor
Definition: cam_periph.h:122
uint32_t scheduled_priority
Definition: cam_periph.h:141
struct cam_path * path
Definition: cam_periph.h:124
ac_callback_t * deferred_callback
Definition: cam_periph.h:149
periph_oninv_t * periph_oninval
Definition: cam_periph.h:121
uint32_t immediate_priority
Definition: cam_periph.h:142
u_int32_t flags
Definition: cam_periph.h:129
struct cam_sim * sim
Definition: cam_periph.h:126
ac_code deferred_ac
Definition: cam_periph.h:150
u_int32_t priority
Definition: cam.h:86
int index
Definition: cam.h:95
u_int8_t * data_ptr
Definition: cam_ccb.h:796
u_int32_t dxfer_len
Definition: cam_ccb.h:797
struct ata_res res
Definition: cam_ccb.h:795
struct ata_cmd cmd
Definition: cam_ccb.h:794
u_int32_t resid
Definition: cam_ccb.h:798
uint8_t * buf
Definition: cam_ccb.h:1326
struct dev_match_result * matches
Definition: cam_ccb.h:587
u_int32_t match_buf_len
Definition: cam_ccb.h:586
u_int32_t pattern_buf_len
Definition: cam_ccb.h:583
struct dev_match_pattern * patterns
Definition: cam_ccb.h:584
struct ccb_hdr ccb_h
Definition: cam_ccb.h:379
struct scsi_inquiry_data inq_data
Definition: cam_ccb.h:381
char periph_name[DEV_IDLEN]
Definition: cam_ccb.h:417
ccb_getdevlist_status_e status
Definition: cam_ccb.h:421
u_int32_t unit_number
Definition: cam_ccb.h:418
u_int32_t index
Definition: cam_ccb.h:420
struct ccb_hdr ccb_h
Definition: cam_ccb.h:391
struct timeval last_reset
Definition: cam_ccb.h:405
int dev_openings
Definition: cam_ccb.h:392
u_int32_t flags
Definition: cam_ccb.h:368
struct cam_path * path
Definition: cam_ccb.h:364
cam_pinfo pinfo
Definition: cam_ccb.h:349
u_int16_t alloc_flags
Definition: cam_ccb.h:355
xpt_opcode func_code
Definition: cam_ccb.h:362
u_int32_t timeout
Definition: cam_ccb.h:373
u_int32_t status
Definition: cam_ccb.h:363
u_int16_t retry_count
Definition: cam_ccb.h:354
void(* cbfcnp)(struct cam_periph *, union ccb *)
Definition: cam_ccb.h:360
struct mmc_command cmd
Definition: cam_ccb.h:813
uint32_t dxfer_len
Definition: cam_ccb.h:858
uint8_t * data_ptr
Definition: cam_ccb.h:857
u_int32_t qfrozen_cnt
Definition: cam_ccb.h:846
struct ccb_hdr ccb_h
Definition: cam_ccb.h:838
u_int32_t openings
Definition: cam_ccb.h:844
u_int32_t release_timeout
Definition: cam_ccb.h:845
u_int32_t release_flags
Definition: cam_ccb.h:839
cam_flags flags
Definition: cam_ccb.h:1191
cdb_t cdb_io
Definition: cam_ccb.h:763
u_int8_t * data_ptr
Definition: cam_ccb.h:753
u_int8_t tag_action
Definition: cam_ccb.h:766
u_int8_t scsi_status
Definition: cam_ccb.h:760
u_int32_t dxfer_len
Definition: cam_ccb.h:754
u_int32_t resid
Definition: cam_ccb.h:762
uint8_t * smp_request
Definition: cam_ccb.h:721
int smp_request_len
Definition: cam_ccb.h:722
uint8_t * smp_response
Definition: cam_ccb.h:724
int smp_response_len
Definition: cam_ccb.h:725
u_int generation
Definition: cam_periph.h:97
periph_deinit_t * deinit
Definition: cam_periph.h:100
periph_init_t * init
Definition: cam_periph.h:94
char * driver_name
Definition: cam_periph.h:95
Definition: cam_ccb.h:1345
struct ccb_dev_match cdm
Definition: cam_ccb.h:1356
struct ccb_getdevlist cgdl
Definition: cam_ccb.h:1349
struct ccb_mmcio mmcio
Definition: cam_ccb.h:1380
struct ccb_smpio smpio
Definition: cam_ccb.h:1373
struct ccb_dev_advinfo cdai
Definition: cam_ccb.h:1377
struct ccb_hdr ccb_h
Definition: cam_ccb.h:1346
struct ccb_scsiio csio
Definition: cam_ccb.h:1347
struct ccb_rescan crcn
Definition: cam_ccb.h:1374
struct ccb_nvmeio nvmeio
Definition: cam_ccb.h:1379
struct ccb_pathinq cpi
Definition: cam_ccb.h:1350
struct ccb_ataio ataio
Definition: cam_ccb.h:1376
u_int8_t cdb_bytes[IOCDBLEN]
Definition: cam_ccb.h:742