FreeBSD kernel CAM code
scsi_da.c
Go to the documentation of this file.
1/*-
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35
36#ifdef _KERNEL
37#include "opt_da.h"
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/bio.h>
41#include <sys/sysctl.h>
42#include <sys/taskqueue.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/conf.h>
46#include <sys/devicestat.h>
47#include <sys/eventhandler.h>
48#include <sys/malloc.h>
49#include <sys/cons.h>
50#include <sys/endian.h>
51#include <sys/proc.h>
52#include <sys/reboot.h>
53#include <sys/sbuf.h>
54#include <geom/geom.h>
55#include <geom/geom_disk.h>
56#include <machine/atomic.h>
57#endif /* _KERNEL */
58
59#ifndef _KERNEL
60#include <stdio.h>
61#include <string.h>
62#endif /* _KERNEL */
63
64#include <cam/cam.h>
65#include <cam/cam_ccb.h>
66#include <cam/cam_periph.h>
67#include <cam/cam_xpt_periph.h>
68#ifdef _KERNEL
70#endif /* _KERNEL */
71#include <cam/cam_sim.h>
72#include <cam/cam_iosched.h>
73
75#include <cam/scsi/scsi_da.h>
76
77#ifdef _KERNEL
78/*
79 * Note that there are probe ordering dependencies here. The order isn't
80 * controlled by this enumeration, but by explicit state transitions in
81 * dastart() and dadone(). Here are some of the dependencies:
82 *
83 * 1. RC should come first, before RC16, unless there is evidence that RC16
84 * is supported.
85 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
86 * 3. The ATA probes should go in this order:
87 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
88 */
89typedef enum {
104
105typedef enum {
114 DA_FLAG_OPEN = 0x000100,
117 DA_FLAG_PROBED = 0x000800,
118 DA_FLAG_DIRTY = 0x001000,
126 DA_FLAG_UNMAPPEDIO = 0x100000
128#define DA_FLAG_STRING \
129 "\020" \
130 "\001PACK_INVALID" \
131 "\002NEW_PACK" \
132 "\003PACK_LOCKED" \
133 "\004PACK_REMOVABLE" \
134 "\005ROTATING" \
135 "\006NEED_OTAG" \
136 "\007WAS_OTAG" \
137 "\010RETRY_UA" \
138 "\011OPEN" \
139 "\012SCTX_INIT" \
140 "\013CAN_RC16" \
141 "\014PROBED" \
142 "\015DIRTY" \
143 "\016ANNOUCNED" \
144 "\017CAN_ATA_DMA" \
145 "\020CAN_ATA_LOG" \
146 "\021CAN_ATA_IDLOG" \
147 "\022CAN_ATA_SUPACP" \
148 "\023CAN_ATA_ZONE" \
149 "\024TUR_PENDING" \
150 "\025UNMAPPEDIO"
151
152typedef enum {
153 DA_Q_NONE = 0x00,
157 DA_Q_4K = 0x08,
163 DA_Q_128KB = 0x200
165
166#define DA_Q_BIT_STRING \
167 "\020" \
168 "\001NO_SYNC_CACHE" \
169 "\002NO_6_BYTE" \
170 "\003NO_PREVENT" \
171 "\0044K" \
172 "\005NO_RC16" \
173 "\006NO_UNMAP" \
174 "\007RETRY_BUSY" \
175 "\010SMR_DM" \
176 "\011STRICT_UNMAP" \
177 "\012128KB"
178
179typedef enum {
197 DA_CCB_RETRY_UA = 0x20
199
200/*
201 * Order here is important for method choice
202 *
203 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
204 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
205 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
206 * import taking 5mins.
207 *
208 */
209typedef enum {
220
221/*
222 * For SCSI, host managed drives show up as a separate device type. For
223 * ATA, host managed drives also have a different device signature.
224 * XXX KDM figure out the ATA host managed signature.
225 */
226typedef enum {
232
233/*
234 * We distinguish between these interface cases in addition to the drive type:
235 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
236 * o ATA drive behind a SCSI translation layer that does not know about
237 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
238 * case, we would need to share the ATA code with the ada(4) driver.
239 * o SCSI drive.
240 */
241typedef enum {
246
247typedef enum {
266
267static struct da_zone_desc {
269 const char *desc;
270} da_zone_desc_table[] = {
271 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
272 {DA_ZONE_FLAG_OPEN_SUP, "Open" },
273 {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
274 {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
275 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
277
278typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
279 struct bio *bp);
283
284static const void * da_delete_functions[] = {
285 NULL,
286 NULL,
292};
293
294static const char *da_delete_method_names[] =
295 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
296static const char *da_delete_method_desc[] =
297 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
298 "WRITE SAME(10) with UNMAP", "ZERO" };
299
300/* Offsets into our private area for storing information */
301#define ccb_state ppriv_field0
302#define ccb_bp ppriv_ptr1
303
304struct disk_params {
305 u_int8_t heads;
306 u_int32_t cylinders;
307 u_int8_t secs_per_track;
308 u_int32_t secsize; /* Number of bytes/sector */
309 u_int64_t sectors; /* total number sectors */
312};
313
314#define UNMAP_RANGE_MAX 0xffffffff
315#define UNMAP_HEAD_SIZE 8
316#define UNMAP_RANGE_SIZE 16
317#define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */
318#define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
319 UNMAP_HEAD_SIZE)
320
321#define WS10_MAX_BLKS 0xffff
322#define WS16_MAX_BLKS 0xffffffff
323#define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \
324 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
325
326#define DA_WORK_TUR (1 << 16)
327
328typedef enum {
337 DA_REF_MAX /* KEEP LAST */
339
340struct da_softc {
342 struct bio_queue_head delete_run_queue;
343 LIST_HEAD(, ccb_hdr) pending_ccbs;
344 int refcount; /* Active xpt_action() calls */
345 da_state state;
346 da_flags flags;
347 da_quirks quirks;
348 int minimum_cmd_size;
349 int mode_page;
350 int error_inject;
351 int trim_max_ranges;
352 int delete_available; /* Delete methods possibly available */
353 da_zone_mode zone_mode;
354 da_zone_interface zone_interface;
355 da_zone_flags zone_flags;
356 struct ata_gp_log_dir ata_logdir;
357 int valid_logdir_len;
358 struct ata_identify_log_pages ata_iddir;
359 int valid_iddir_len;
360 uint64_t optimal_seq_zones;
361 uint64_t optimal_nonseq_zones;
362 uint64_t max_seq_zones;
363 u_int maxio;
364 uint32_t unmap_max_ranges;
365 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
366 uint32_t unmap_gran;
367 uint32_t unmap_gran_align;
368 uint64_t ws_max_blks;
369 uint64_t trim_count;
370 uint64_t trim_ranges;
371 uint64_t trim_lbas;
372 da_delete_methods delete_method_pref;
373 da_delete_methods delete_method;
374 da_delete_func_t *delete_func;
375 int p_type;
376 struct disk_params params;
377 struct disk *disk;
378 struct task sysctl_task;
379 struct sysctl_ctx_list sysctl_ctx;
380 struct sysctl_oid *sysctl_tree;
381 struct callout sendordered_c;
382 uint64_t wwpn;
383 uint8_t unmap_buf[UNMAP_BUF_SIZE];
384 struct scsi_read_capacity_data_long rcaplong;
385 struct callout mediapoll_c;
386 int ref_flags[DA_REF_MAX];
387#ifdef CAM_IO_STATS
388 struct sysctl_ctx_list sysctl_stats_ctx;
389 struct sysctl_oid *sysctl_stats_tree;
390 u_int errors;
391 u_int timeouts;
392 u_int invalidations;
393#endif
394#define DA_ANNOUNCETMP_SZ 160
395 char announce_temp[DA_ANNOUNCETMP_SZ];
396#define DA_ANNOUNCE_SZ 400
397 char announcebuf[DA_ANNOUNCE_SZ];
398};
399
400#define dadeleteflag(softc, delete_method, enable) \
401 if (enable) { \
402 softc->delete_available |= (1 << delete_method); \
403 } else { \
404 softc->delete_available &= ~(1 << delete_method); \
405 }
406
407static uma_zone_t da_ccb_zone;
408
412};
413
414static const char quantum[] = "QUANTUM";
415static const char microp[] = "MICROP";
416
418{
419 /* SPI, FC devices */
420 {
421 /*
422 * Fujitsu M2513A MO drives.
423 * Tested devices: M2513A2 firmware versions 1200 & 1300.
424 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
425 * Reported by: W.Scholten <whs@xs4all.nl>
426 */
427 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
428 /*quirks*/ DA_Q_NO_SYNC_CACHE
429 },
430 {
431 /* See above. */
432 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
433 /*quirks*/ DA_Q_NO_SYNC_CACHE
434 },
435 {
436 /*
437 * This particular Fujitsu drive doesn't like the
438 * synchronize cache command.
439 * Reported by: Tom Jackson <toj@gorilla.net>
440 */
441 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
442 /*quirks*/ DA_Q_NO_SYNC_CACHE
443 },
444 {
445 /*
446 * This drive doesn't like the synchronize cache command
447 * either. Reported by: Matthew Jacob <mjacob@feral.com>
448 * in NetBSD PR kern/6027, August 24, 1998.
449 */
450 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
451 /*quirks*/ DA_Q_NO_SYNC_CACHE
452 },
453 {
454 /*
455 * This drive doesn't like the synchronize cache command
456 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
457 * (PR 8882).
458 */
459 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
460 /*quirks*/ DA_Q_NO_SYNC_CACHE
461 },
462 {
463 /*
464 * Doesn't like the synchronize cache command.
465 * Reported by: Blaz Zupan <blaz@gold.amis.net>
466 */
467 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
468 /*quirks*/ DA_Q_NO_SYNC_CACHE
469 },
470 {
471 /*
472 * Doesn't like the synchronize cache command.
473 * Reported by: Blaz Zupan <blaz@gold.amis.net>
474 */
475 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
476 /*quirks*/ DA_Q_NO_SYNC_CACHE
477 },
478 {
479 /*
480 * Doesn't like the synchronize cache command.
481 */
482 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
483 /*quirks*/ DA_Q_NO_SYNC_CACHE
484 },
485 {
486 /*
487 * Doesn't like the synchronize cache command.
488 * Reported by: walter@pelissero.de
489 */
490 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
491 /*quirks*/ DA_Q_NO_SYNC_CACHE
492 },
493 {
494 /*
495 * Doesn't work correctly with 6 byte reads/writes.
496 * Returns illegal request, and points to byte 9 of the
497 * 6-byte CDB.
498 * Reported by: Adam McDougall <bsdx@spawnet.com>
499 */
500 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
501 /*quirks*/ DA_Q_NO_6_BYTE
502 },
503 {
504 /* See above. */
505 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
506 /*quirks*/ DA_Q_NO_6_BYTE
507 },
508 {
509 /*
510 * Doesn't like the synchronize cache command.
511 * Reported by: walter@pelissero.de
512 */
513 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
514 /*quirks*/ DA_Q_NO_SYNC_CACHE
515 },
516 {
517 /*
518 * The CISS RAID controllers do not support SYNC_CACHE
519 */
520 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
521 /*quirks*/ DA_Q_NO_SYNC_CACHE
522 },
523 {
524 /*
525 * The STEC SSDs sometimes hang on UNMAP.
526 */
527 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
528 /*quirks*/ DA_Q_NO_UNMAP
529 },
530 {
531 /*
532 * VMware returns BUSY status when storage has transient
533 * connectivity problems, so better wait.
534 * Also VMware returns odd errors on misaligned UNMAPs.
535 */
536 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
538 },
539 /* USB mass storage devices supported by umass(4) */
540 {
541 /*
542 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
543 * PR: kern/51675
544 */
545 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
546 /*quirks*/ DA_Q_NO_SYNC_CACHE
547 },
548 {
549 /*
550 * Power Quotient Int. (PQI) USB flash key
551 * PR: kern/53067
552 */
553 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
554 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
555 },
556 {
557 /*
558 * Creative Nomad MUVO mp3 player (USB)
559 * PR: kern/53094
560 */
561 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
563 },
564 {
565 /*
566 * Jungsoft NEXDISK USB flash key
567 * PR: kern/54737
568 */
569 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
570 /*quirks*/ DA_Q_NO_SYNC_CACHE
571 },
572 {
573 /*
574 * FreeDik USB Mini Data Drive
575 * PR: kern/54786
576 */
577 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
578 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
579 },
580 {
581 /*
582 * Sigmatel USB Flash MP3 Player
583 * PR: kern/57046
584 */
585 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
587 },
588 {
589 /*
590 * Neuros USB Digital Audio Computer
591 * PR: kern/63645
592 */
593 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
594 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
595 },
596 {
597 /*
598 * SEAGRAND NP-900 MP3 Player
599 * PR: kern/64563
600 */
601 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
603 },
604 {
605 /*
606 * iRiver iFP MP3 player (with UMS Firmware)
607 * PR: kern/54881, i386/63941, kern/66124
608 */
609 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
610 /*quirks*/ DA_Q_NO_SYNC_CACHE
611 },
612 {
613 /*
614 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
615 * PR: kern/70158
616 */
617 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
618 /*quirks*/ DA_Q_NO_SYNC_CACHE
619 },
620 {
621 /*
622 * ZICPlay USB MP3 Player with FM
623 * PR: kern/75057
624 */
625 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
626 /*quirks*/ DA_Q_NO_SYNC_CACHE
627 },
628 {
629 /*
630 * TEAC USB floppy mechanisms
631 */
632 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
633 /*quirks*/ DA_Q_NO_SYNC_CACHE
634 },
635 {
636 /*
637 * Kingston DataTraveler II+ USB Pen-Drive.
638 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
639 */
640 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
641 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
642 },
643 {
644 /*
645 * USB DISK Pro PMAP
646 * Reported by: jhs
647 * PR: usb/96381
648 */
649 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
650 /*quirks*/ DA_Q_NO_SYNC_CACHE
651 },
652 {
653 /*
654 * Motorola E398 Mobile Phone (TransFlash memory card).
655 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
656 * PR: usb/89889
657 */
658 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
659 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
660 },
661 {
662 /*
663 * Qware BeatZkey! Pro
664 * PR: usb/79164
665 */
666 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
667 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
668 },
669 {
670 /*
671 * Time DPA20B 1GB MP3 Player
672 * PR: usb/81846
673 */
674 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
675 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
676 },
677 {
678 /*
679 * Samsung USB key 128Mb
680 * PR: usb/90081
681 */
682 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
683 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
684 },
685 {
686 /*
687 * Kingston DataTraveler 2.0 USB Flash memory.
688 * PR: usb/89196
689 */
690 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
691 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
692 },
693 {
694 /*
695 * Creative MUVO Slim mp3 player (USB)
696 * PR: usb/86131
697 */
698 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
699 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
700 },
701 {
702 /*
703 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
704 * PR: usb/80487
705 */
706 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
707 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
708 },
709 {
710 /*
711 * SanDisk Micro Cruzer 128MB
712 * PR: usb/75970
713 */
714 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
715 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
716 },
717 {
718 /*
719 * TOSHIBA TransMemory USB sticks
720 * PR: kern/94660
721 */
722 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
723 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
724 },
725 {
726 /*
727 * PNY USB 3.0 Flash Drives
728 */
729 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
730 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
731 },
732 {
733 /*
734 * PNY USB Flash keys
735 * PR: usb/75578, usb/72344, usb/65436
736 */
737 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
738 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
739 },
740 {
741 /*
742 * Genesys GL3224
743 */
744 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
745 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
746 },
747 {
748 /*
749 * Genesys 6-in-1 Card Reader
750 * PR: usb/94647
751 */
752 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
753 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
754 },
755 {
756 /*
757 * Rekam Digital CAMERA
758 * PR: usb/98713
759 */
760 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
761 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
762 },
763 {
764 /*
765 * iRiver H10 MP3 player
766 * PR: usb/102547
767 */
768 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
769 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
770 },
771 {
772 /*
773 * iRiver U10 MP3 player
774 * PR: usb/92306
775 */
776 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
777 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
778 },
779 {
780 /*
781 * X-Micro Flash Disk
782 * PR: usb/96901
783 */
784 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
785 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
786 },
787 {
788 /*
789 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
790 * PR: usb/96546
791 */
792 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
793 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
794 },
795 {
796 /*
797 * Denver MP3 player
798 * PR: usb/107101
799 */
800 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
801 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
802 },
803 {
804 /*
805 * Philips USB Key Audio KEY013
806 * PR: usb/68412
807 */
808 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
810 },
811 {
812 /*
813 * JNC MP3 Player
814 * PR: usb/94439
815 */
816 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
817 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
818 },
819 {
820 /*
821 * SAMSUNG MP0402H
822 * PR: usb/108427
823 */
824 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
825 /*quirks*/ DA_Q_NO_SYNC_CACHE
826 },
827 {
828 /*
829 * I/O Magic USB flash - Giga Bank
830 * PR: usb/108810
831 */
832 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
833 /*quirks*/ DA_Q_NO_SYNC_CACHE
834 },
835 {
836 /*
837 * JoyFly 128mb USB Flash Drive
838 * PR: 96133
839 */
840 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
841 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
842 },
843 {
844 /*
845 * ChipsBnk usb stick
846 * PR: 103702
847 */
848 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
849 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
850 },
851 {
852 /*
853 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
854 * PR: 129858
855 */
856 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
857 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
858 },
859 {
860 /*
861 * Samsung YP-U3 mp3-player
862 * PR: 125398
863 */
864 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
865 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
866 },
867 {
868 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
869 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
870 },
871 {
872 /*
873 * Sony Cyber-Shot DSC cameras
874 * PR: usb/137035
875 */
876 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
878 },
879 {
880 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
881 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
882 },
883 {
884 /* At least several Transcent USB sticks lie on RC16. */
885 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
886 "*"}, /*quirks*/ DA_Q_NO_RC16
887 },
888 {
889 /*
890 * I-O Data USB Flash Disk
891 * PR: usb/211716
892 */
893 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
894 "*"}, /*quirks*/ DA_Q_NO_RC16
895 },
896 {
897 /*
898 * SLC CHIPFANCIER USB drives
899 * PR: usb/234503 (RC10 right, RC16 wrong)
900 * 16GB, 32GB and 128GB confirmed to have same issue
901 */
902 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
903 "*"}, /*quirks*/ DA_Q_NO_RC16
904 },
905 /* ATA/SATA devices over SAS/USB/... */
906 {
907 /* Sandisk X400 */
908 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
909 /*quirks*/DA_Q_128KB
910 },
911 {
912 /* Hitachi Advanced Format (4k) drives */
913 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
914 /*quirks*/DA_Q_4K
915 },
916 {
917 /* Micron Advanced Format (4k) drives */
918 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
919 /*quirks*/DA_Q_4K
920 },
921 {
922 /* Samsung Advanced Format (4k) drives */
923 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
924 /*quirks*/DA_Q_4K
925 },
926 {
927 /* Samsung Advanced Format (4k) drives */
928 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
929 /*quirks*/DA_Q_4K
930 },
931 {
932 /* Samsung Advanced Format (4k) drives */
933 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
934 /*quirks*/DA_Q_4K
935 },
936 {
937 /* Samsung Advanced Format (4k) drives */
938 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
939 /*quirks*/DA_Q_4K
940 },
941 {
942 /* Seagate Barracuda Green Advanced Format (4k) drives */
943 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
944 /*quirks*/DA_Q_4K
945 },
946 {
947 /* Seagate Barracuda Green Advanced Format (4k) drives */
948 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
949 /*quirks*/DA_Q_4K
950 },
951 {
952 /* Seagate Barracuda Green Advanced Format (4k) drives */
953 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
954 /*quirks*/DA_Q_4K
955 },
956 {
957 /* Seagate Barracuda Green Advanced Format (4k) drives */
958 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
959 /*quirks*/DA_Q_4K
960 },
961 {
962 /* Seagate Barracuda Green Advanced Format (4k) drives */
963 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
964 /*quirks*/DA_Q_4K
965 },
966 {
967 /* Seagate Barracuda Green Advanced Format (4k) drives */
968 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
969 /*quirks*/DA_Q_4K
970 },
971 {
972 /* Seagate Momentus Advanced Format (4k) drives */
973 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
974 /*quirks*/DA_Q_4K
975 },
976 {
977 /* Seagate Momentus Advanced Format (4k) drives */
978 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
979 /*quirks*/DA_Q_4K
980 },
981 {
982 /* Seagate Momentus Advanced Format (4k) drives */
983 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
984 /*quirks*/DA_Q_4K
985 },
986 {
987 /* Seagate Momentus Advanced Format (4k) drives */
988 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
989 /*quirks*/DA_Q_4K
990 },
991 {
992 /* Seagate Momentus Advanced Format (4k) drives */
993 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
994 /*quirks*/DA_Q_4K
995 },
996 {
997 /* Seagate Momentus Advanced Format (4k) drives */
998 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
999 /*quirks*/DA_Q_4K
1000 },
1001 {
1002 /* Seagate Momentus Advanced Format (4k) drives */
1003 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
1004 /*quirks*/DA_Q_4K
1005 },
1006 {
1007 /* Seagate Momentus Advanced Format (4k) drives */
1008 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
1009 /*quirks*/DA_Q_4K
1010 },
1011 {
1012 /* Seagate Momentus Advanced Format (4k) drives */
1013 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
1014 /*quirks*/DA_Q_4K
1015 },
1016 {
1017 /* Seagate Momentus Advanced Format (4k) drives */
1018 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
1019 /*quirks*/DA_Q_4K
1020 },
1021 {
1022 /* Seagate Momentus Advanced Format (4k) drives */
1023 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
1024 /*quirks*/DA_Q_4K
1025 },
1026 {
1027 /* Seagate Momentus Advanced Format (4k) drives */
1028 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
1029 /*quirks*/DA_Q_4K
1030 },
1031 {
1032 /* Seagate Momentus Advanced Format (4k) drives */
1033 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
1034 /*quirks*/DA_Q_4K
1035 },
1036 {
1037 /* Seagate Momentus Advanced Format (4k) drives */
1038 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
1039 /*quirks*/DA_Q_4K
1040 },
1041 {
1042 /* Seagate Momentus Thin Advanced Format (4k) drives */
1043 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
1044 /*quirks*/DA_Q_4K
1045 },
1046 {
1047 /* Seagate Momentus Thin Advanced Format (4k) drives */
1048 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
1049 /*quirks*/DA_Q_4K
1050 },
1051 {
1052 /* WDC Caviar Green Advanced Format (4k) drives */
1053 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
1054 /*quirks*/DA_Q_4K
1055 },
1056 {
1057 /* WDC Caviar Green Advanced Format (4k) drives */
1058 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1059 /*quirks*/DA_Q_4K
1060 },
1061 {
1062 /* WDC Caviar Green Advanced Format (4k) drives */
1063 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1064 /*quirks*/DA_Q_4K
1065 },
1066 {
1067 /* WDC Caviar Green Advanced Format (4k) drives */
1068 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1069 /*quirks*/DA_Q_4K
1070 },
1071 {
1072 /* WDC Caviar Green Advanced Format (4k) drives */
1073 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1074 /*quirks*/DA_Q_4K
1075 },
1076 {
1077 /* WDC Caviar Green Advanced Format (4k) drives */
1078 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1079 /*quirks*/DA_Q_4K
1080 },
1081 {
1082 /* WDC Caviar Green Advanced Format (4k) drives */
1083 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1084 /*quirks*/DA_Q_4K
1085 },
1086 {
1087 /* WDC Caviar Green Advanced Format (4k) drives */
1088 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1089 /*quirks*/DA_Q_4K
1090 },
1091 {
1092 /* WDC Scorpio Black Advanced Format (4k) drives */
1093 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1094 /*quirks*/DA_Q_4K
1095 },
1096 {
1097 /* WDC Scorpio Black Advanced Format (4k) drives */
1098 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1099 /*quirks*/DA_Q_4K
1100 },
1101 {
1102 /* WDC Scorpio Black Advanced Format (4k) drives */
1103 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1104 /*quirks*/DA_Q_4K
1105 },
1106 {
1107 /* WDC Scorpio Black Advanced Format (4k) drives */
1108 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1109 /*quirks*/DA_Q_4K
1110 },
1111 {
1112 /* WDC Scorpio Blue Advanced Format (4k) drives */
1113 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1114 /*quirks*/DA_Q_4K
1115 },
1116 {
1117 /* WDC Scorpio Blue Advanced Format (4k) drives */
1118 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1119 /*quirks*/DA_Q_4K
1120 },
1121 {
1122 /* WDC Scorpio Blue Advanced Format (4k) drives */
1123 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1124 /*quirks*/DA_Q_4K
1125 },
1126 {
1127 /* WDC Scorpio Blue Advanced Format (4k) drives */
1128 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1129 /*quirks*/DA_Q_4K
1130 },
1131 {
1132 /*
1133 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1134 * PR: usb/97472
1135 */
1136 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1138 },
1139 {
1140 /*
1141 * Olympus digital cameras (D-370)
1142 * PR: usb/97472
1143 */
1144 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1145 /*quirks*/ DA_Q_NO_6_BYTE
1146 },
1147 {
1148 /*
1149 * Olympus digital cameras (E-100RS, E-10).
1150 * PR: usb/97472
1151 */
1152 { T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1154 },
1155 {
1156 /*
1157 * Olympus FE-210 camera
1158 */
1159 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1160 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1161 },
1162 {
1163 /*
1164 * Pentax Digital Camera
1165 * PR: usb/93389
1166 */
1167 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA",
1168 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1169 },
1170 {
1171 /*
1172 * LG UP3S MP3 player
1173 */
1174 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1175 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1176 },
1177 {
1178 /*
1179 * Laser MP3-2GA13 MP3 player
1180 */
1181 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1182 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1183 },
1184 {
1185 /*
1186 * LaCie external 250GB Hard drive des by Porsche
1187 * Submitted by: Ben Stuyts <ben@altesco.nl>
1188 * PR: 121474
1189 */
1190 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1191 /*quirks*/ DA_Q_NO_SYNC_CACHE
1192 },
1193 /* SATA SSDs */
1194 {
1195 /*
1196 * Corsair Force 2 SSDs
1197 * 4k optimised & trim only works in 4k requests + 4k aligned
1198 */
1199 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1200 /*quirks*/DA_Q_4K
1201 },
1202 {
1203 /*
1204 * Corsair Force 3 SSDs
1205 * 4k optimised & trim only works in 4k requests + 4k aligned
1206 */
1207 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1208 /*quirks*/DA_Q_4K
1209 },
1210 {
1211 /*
1212 * Corsair Neutron GTX SSDs
1213 * 4k optimised & trim only works in 4k requests + 4k aligned
1214 */
1215 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1216 /*quirks*/DA_Q_4K
1217 },
1218 {
1219 /*
1220 * Corsair Force GT & GS SSDs
1221 * 4k optimised & trim only works in 4k requests + 4k aligned
1222 */
1223 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1224 /*quirks*/DA_Q_4K
1225 },
1226 {
1227 /*
1228 * Crucial M4 SSDs
1229 * 4k optimised & trim only works in 4k requests + 4k aligned
1230 */
1231 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1232 /*quirks*/DA_Q_4K
1233 },
1234 {
1235 /*
1236 * Crucial RealSSD C300 SSDs
1237 * 4k optimised
1238 */
1239 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1240 "*" }, /*quirks*/DA_Q_4K
1241 },
1242 {
1243 /*
1244 * Intel 320 Series SSDs
1245 * 4k optimised & trim only works in 4k requests + 4k aligned
1246 */
1247 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1248 /*quirks*/DA_Q_4K
1249 },
1250 {
1251 /*
1252 * Intel 330 Series SSDs
1253 * 4k optimised & trim only works in 4k requests + 4k aligned
1254 */
1255 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1256 /*quirks*/DA_Q_4K
1257 },
1258 {
1259 /*
1260 * Intel 510 Series SSDs
1261 * 4k optimised & trim only works in 4k requests + 4k aligned
1262 */
1263 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1264 /*quirks*/DA_Q_4K
1265 },
1266 {
1267 /*
1268 * Intel 520 Series SSDs
1269 * 4k optimised & trim only works in 4k requests + 4k aligned
1270 */
1271 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1272 /*quirks*/DA_Q_4K
1273 },
1274 {
1275 /*
1276 * Intel S3610 Series SSDs
1277 * 4k optimised & trim only works in 4k requests + 4k aligned
1278 */
1279 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1280 /*quirks*/DA_Q_4K
1281 },
1282 {
1283 /*
1284 * Intel X25-M Series SSDs
1285 * 4k optimised & trim only works in 4k requests + 4k aligned
1286 */
1287 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1288 /*quirks*/DA_Q_4K
1289 },
1290 {
1291 /*
1292 * Kingston E100 Series SSDs
1293 * 4k optimised & trim only works in 4k requests + 4k aligned
1294 */
1295 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1296 /*quirks*/DA_Q_4K
1297 },
1298 {
1299 /*
1300 * Kingston HyperX 3k SSDs
1301 * 4k optimised & trim only works in 4k requests + 4k aligned
1302 */
1303 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1304 /*quirks*/DA_Q_4K
1305 },
1306 {
1307 /*
1308 * Marvell SSDs (entry taken from OpenSolaris)
1309 * 4k optimised & trim only works in 4k requests + 4k aligned
1310 */
1311 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1312 /*quirks*/DA_Q_4K
1313 },
1314 {
1315 /*
1316 * OCZ Agility 2 SSDs
1317 * 4k optimised & trim only works in 4k requests + 4k aligned
1318 */
1319 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1320 /*quirks*/DA_Q_4K
1321 },
1322 {
1323 /*
1324 * OCZ Agility 3 SSDs
1325 * 4k optimised & trim only works in 4k requests + 4k aligned
1326 */
1327 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1328 /*quirks*/DA_Q_4K
1329 },
1330 {
1331 /*
1332 * OCZ Deneva R Series SSDs
1333 * 4k optimised & trim only works in 4k requests + 4k aligned
1334 */
1335 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1336 /*quirks*/DA_Q_4K
1337 },
1338 {
1339 /*
1340 * OCZ Vertex 2 SSDs (inc pro series)
1341 * 4k optimised & trim only works in 4k requests + 4k aligned
1342 */
1343 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1344 /*quirks*/DA_Q_4K
1345 },
1346 {
1347 /*
1348 * OCZ Vertex 3 SSDs
1349 * 4k optimised & trim only works in 4k requests + 4k aligned
1350 */
1351 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1352 /*quirks*/DA_Q_4K
1353 },
1354 {
1355 /*
1356 * OCZ Vertex 4 SSDs
1357 * 4k optimised & trim only works in 4k requests + 4k aligned
1358 */
1359 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1360 /*quirks*/DA_Q_4K
1361 },
1362 {
1363 /*
1364 * Samsung 750 Series SSDs
1365 * 4k optimised & trim only works in 4k requests + 4k aligned
1366 */
1367 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1368 /*quirks*/DA_Q_4K
1369 },
1370 {
1371 /*
1372 * Samsung 830 Series SSDs
1373 * 4k optimised & trim only works in 4k requests + 4k aligned
1374 */
1375 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1376 /*quirks*/DA_Q_4K
1377 },
1378 {
1379 /*
1380 * Samsung 840 SSDs
1381 * 4k optimised & trim only works in 4k requests + 4k aligned
1382 */
1383 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1384 /*quirks*/DA_Q_4K
1385 },
1386 {
1387 /*
1388 * Samsung 845 SSDs
1389 * 4k optimised & trim only works in 4k requests + 4k aligned
1390 */
1391 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1392 /*quirks*/DA_Q_4K
1393 },
1394 {
1395 /*
1396 * Samsung 850 SSDs
1397 * 4k optimised & trim only works in 4k requests + 4k aligned
1398 */
1399 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1400 /*quirks*/DA_Q_4K
1401 },
1402 {
1403 /*
1404 * Samsung 843T Series SSDs (MZ7WD*)
1405 * Samsung PM851 Series SSDs (MZ7TE*)
1406 * Samsung PM853T Series SSDs (MZ7GE*)
1407 * Samsung SM863 Series SSDs (MZ7KM*)
1408 * 4k optimised
1409 */
1410 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1411 /*quirks*/DA_Q_4K
1412 },
1413 {
1414 /*
1415 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1416 * starting with MZ7* too
1417 */
1418 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1419 /*quirks*/DA_Q_4K
1420 },
1421 {
1422 /*
1423 * Same as above but enable the quirks for SSD SAMSUNG MZ7*
1424 * connected via SATA-to-SAS interposer and because of this
1425 * starting without "ATA"
1426 */
1427 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MZ7*", "*" },
1428 /*quirks*/DA_Q_4K
1429 },
1430 {
1431 /*
1432 * SuperTalent TeraDrive CT SSDs
1433 * 4k optimised & trim only works in 4k requests + 4k aligned
1434 */
1435 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1436 /*quirks*/DA_Q_4K
1437 },
1438 {
1439 /*
1440 * XceedIOPS SATA SSDs
1441 * 4k optimised
1442 */
1443 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1444 /*quirks*/DA_Q_4K
1445 },
1446 {
1447 /*
1448 * Hama Innostor USB-Stick
1449 */
1450 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1451 /*quirks*/DA_Q_NO_RC16
1452 },
1453 {
1454 /*
1455 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1456 * Drive Managed SATA hard drive. This drive doesn't report
1457 * in firmware that it is a drive managed SMR drive.
1458 */
1459 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1460 /*quirks*/DA_Q_SMR_DM
1461 },
1462 {
1463 /*
1464 * MX-ES USB Drive by Mach Xtreme
1465 */
1466 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1467 /*quirks*/DA_Q_NO_RC16
1468 },
1469};
1470
1471static disk_strategy_t dastrategy;
1472static dumper_t dadump;
1474static void daasync(void *callback_arg, u_int32_t code,
1475 struct cam_path *path, void *arg);
1476static void dasysctlinit(void *context, int pending);
1477static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1478static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1479static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1480static int dabitsysctl(SYSCTL_HANDLER_ARGS);
1481static int daflagssysctl(SYSCTL_HANDLER_ARGS);
1482static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1483static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1484static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1485static void dadeletemethodset(struct da_softc *softc,
1486 da_delete_methods delete_method);
1487static off_t dadeletemaxsize(struct da_softc *softc,
1488 da_delete_methods delete_method);
1489static void dadeletemethodchoose(struct da_softc *softc,
1490 da_delete_methods default_method);
1491static void daprobedone(struct cam_periph *periph, union ccb *ccb);
1492
1497static void dazonedone(struct cam_periph *periph, union ccb *ccb);
1498static void dadone(struct cam_periph *periph,
1499 union ccb *done_ccb);
1500static void dadone_probewp(struct cam_periph *periph,
1501 union ccb *done_ccb);
1502static void dadone_proberc(struct cam_periph *periph,
1503 union ccb *done_ccb);
1504static void dadone_probelbp(struct cam_periph *periph,
1505 union ccb *done_ccb);
1506static void dadone_probeblklimits(struct cam_periph *periph,
1507 union ccb *done_ccb);
1508static void dadone_probebdc(struct cam_periph *periph,
1509 union ccb *done_ccb);
1510static void dadone_probeata(struct cam_periph *periph,
1511 union ccb *done_ccb);
1512static void dadone_probeatalogdir(struct cam_periph *periph,
1513 union ccb *done_ccb);
1514static void dadone_probeataiddir(struct cam_periph *periph,
1515 union ccb *done_ccb);
1516static void dadone_probeatasup(struct cam_periph *periph,
1517 union ccb *done_ccb);
1518static void dadone_probeatazone(struct cam_periph *periph,
1519 union ccb *done_ccb);
1520static void dadone_probezone(struct cam_periph *periph,
1521 union ccb *done_ccb);
1522static void dadone_tur(struct cam_periph *periph,
1523 union ccb *done_ccb);
1524static int daerror(union ccb *ccb, u_int32_t cam_flags,
1525 u_int32_t sense_flags);
1526static void daprevent(struct cam_periph *periph, int action);
1527static void dareprobe(struct cam_periph *periph);
1528static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
1529 uint64_t maxsector,
1530 struct scsi_read_capacity_data_long *rcaplong,
1531 size_t rcap_size);
1532static callout_func_t dasendorderedtag;
1533static void dashutdown(void *arg, int howto);
1534static callout_func_t damediapoll;
1535
1536#ifndef DA_DEFAULT_POLL_PERIOD
1537#define DA_DEFAULT_POLL_PERIOD 3
1538#endif
1539
1540#ifndef DA_DEFAULT_TIMEOUT
1541#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
1542#endif
1543
1544#ifndef DA_DEFAULT_SOFTTIMEOUT
1545#define DA_DEFAULT_SOFTTIMEOUT 0
1546#endif
1547
1548#ifndef DA_DEFAULT_RETRY
1549#define DA_DEFAULT_RETRY 4
1550#endif
1551
1552#ifndef DA_DEFAULT_SEND_ORDERED
1553#define DA_DEFAULT_SEND_ORDERED 1
1554#endif
1555
1563static int da_enable_uma_ccbs = 1;
1564
1565static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1566 "CAM Direct Access Disk driver");
1567SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1568 &da_poll_period, 0, "Media polling period in seconds");
1569SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1570 &da_retry_count, 0, "Normal I/O retry count");
1571SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1572 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1573SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1574 &da_send_ordered, 0, "Send Ordered Tags");
1575SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1577 "Disable detection of write-protected disks");
1578SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
1579 &da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
1580SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_uma_ccbs, CTLFLAG_RWTUN,
1581 &da_enable_uma_ccbs, 0, "Use UMA for CCBs");
1582
1583SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1584 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1586 "Soft I/O timeout (ms)");
1587TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1588
1589/*
1590 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1591 * to the default timeout, we check to see whether an ordered
1592 * tagged transaction is appropriate to prevent simple tag
1593 * starvation. Since we'd like to ensure that there is at least
1594 * 1/2 of the timeout length left for a starved transaction to
1595 * complete after we've sent an ordered tag, we must poll at least
1596 * four times in every timeout period. This takes care of the worst
1597 * case where a starved transaction starts during an interval that
1598 * meets the requirement "don't send an ordered tag" test so it takes
1599 * us two intervals to determine that a tag must be sent.
1600 */
1601#ifndef DA_ORDEREDTAG_INTERVAL
1602#define DA_ORDEREDTAG_INTERVAL 4
1603#endif
1604
1606{
1607 dainit, "da",
1608 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1609};
1610
1612
1613static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1614
1615/*
1616 * This driver takes out references / holds in well defined pairs, never
1617 * recursively. These macros / inline functions enforce those rules. They
1618 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is
1619 * defined to be 2 or larger, the tracking also includes debug printfs.
1620 */
1621#if defined(DA_TRACK_REFS) || defined(INVARIANTS)
1622
1623#ifndef DA_TRACK_REFS
1624#define DA_TRACK_REFS 1
1625#endif
1626
1627#if DA_TRACK_REFS > 1
1628static const char *da_ref_text[] = {
1629 "bogus",
1630 "open",
1631 "open hold",
1632 "close hold",
1633 "reprobe hold",
1634 "Test Unit Ready",
1635 "Geom",
1636 "sysctl",
1637 "reprobe",
1638 "max -- also bogus"
1639};
1640
1641#define DA_PERIPH_PRINT(periph, msg, args...) \
1642 CAM_PERIPH_PRINT(periph, msg, ##args)
1643#else
1644#define DA_PERIPH_PRINT(periph, msg, args...)
1645#endif
1646
1647static inline void
1648token_sanity(da_ref_token token)
1649{
1650 if ((unsigned)token >= DA_REF_MAX)
1651 panic("Bad token value passed in %d\n", token);
1652}
1653
1654static inline int
1655da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token)
1656{
1657 int err = cam_periph_hold(periph, priority);
1658
1659 token_sanity(token);
1660 DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n",
1661 da_ref_text[token], token, err);
1662 if (err == 0) {
1663 int cnt;
1664 struct da_softc *softc = periph->softc;
1665
1666 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1667 if (cnt != 0)
1668 panic("Re-holding for reason %d, cnt = %d", token, cnt);
1669 }
1670 return (err);
1671}
1672
1673static inline void
1674da_periph_unhold(struct cam_periph *periph, da_ref_token token)
1675{
1676 int cnt;
1677 struct da_softc *softc = periph->softc;
1678
1679 token_sanity(token);
1680 DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n",
1681 da_ref_text[token], token);
1682 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1683 if (cnt != 1)
1684 panic("Unholding %d with cnt = %d", token, cnt);
1685 cam_periph_unhold(periph);
1686}
1687
1688static inline int
1689da_periph_acquire(struct cam_periph *periph, da_ref_token token)
1690{
1691 int err = cam_periph_acquire(periph);
1692
1693 token_sanity(token);
1694 DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n",
1695 da_ref_text[token], token, err);
1696 if (err == 0) {
1697 int cnt;
1698 struct da_softc *softc = periph->softc;
1699
1700 cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1701 if (cnt != 0)
1702 panic("Re-refing for reason %d, cnt = %d", token, cnt);
1703 }
1704 return (err);
1705}
1706
1707static inline void
1708da_periph_release(struct cam_periph *periph, da_ref_token token)
1709{
1710 int cnt;
1711 struct da_softc *softc = periph->softc;
1712
1713 token_sanity(token);
1714 DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n",
1715 da_ref_text[token], token);
1716 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1717 if (cnt != 1)
1718 panic("Releasing %d with cnt = %d", token, cnt);
1719 cam_periph_release(periph);
1720}
1721
1722static inline void
1724{
1725 int cnt;
1726 struct da_softc *softc = periph->softc;
1727
1728 token_sanity(token);
1729 DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n",
1730 da_ref_text[token], token);
1731 cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1732 if (cnt != 1)
1733 panic("releasing (locked) %d with cnt = %d", token, cnt);
1735}
1736
1737#define cam_periph_hold POISON
1738#define cam_periph_unhold POISON
1739#define cam_periph_acquire POISON
1740#define cam_periph_release POISON
1741#define cam_periph_release_locked POISON
1742
1743#else
1744#define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio))
1745#define da_periph_unhold(periph, token) cam_periph_unhold((periph))
1746#define da_periph_acquire(periph, token) cam_periph_acquire((periph))
1747#define da_periph_release(periph, token) cam_periph_release((periph))
1748#define da_periph_release_locked(periph, token) cam_periph_release_locked((periph))
1749#endif
1750
1751static int
1752daopen(struct disk *dp)
1753{
1754 struct cam_periph *periph;
1755 struct da_softc *softc;
1756 int error;
1757
1758 periph = (struct cam_periph *)dp->d_drv1;
1759 if (da_periph_acquire(periph, DA_REF_OPEN) != 0) {
1760 return (ENXIO);
1761 }
1762
1763 cam_periph_lock(periph);
1764 if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) {
1765 cam_periph_unlock(periph);
1767 return (error);
1768 }
1769
1771 ("daopen\n"));
1772
1773 softc = (struct da_softc *)periph->softc;
1774 dareprobe(periph);
1775
1776 /* Wait for the disk size update. */
1777 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1778 "dareprobe", 0);
1779 if (error != 0)
1780 xpt_print(periph->path, "unable to retrieve capacity data\n");
1781
1782 if (periph->flags & CAM_PERIPH_INVALID)
1783 error = ENXIO;
1784
1785 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1786 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1787 daprevent(periph, PR_PREVENT);
1788
1789 if (error == 0) {
1790 softc->flags &= ~DA_FLAG_PACK_INVALID;
1791 softc->flags |= DA_FLAG_OPEN;
1792 }
1793
1795 cam_periph_unlock(periph);
1796
1797 if (error != 0)
1799
1800 return (error);
1801}
1802
1803static int
1804daclose(struct disk *dp)
1805{
1806 struct cam_periph *periph;
1807 struct da_softc *softc;
1808 union ccb *ccb;
1809
1810 periph = (struct cam_periph *)dp->d_drv1;
1811 softc = (struct da_softc *)periph->softc;
1812 cam_periph_lock(periph);
1814 ("daclose\n"));
1815
1816 if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) {
1817 /* Flush disk cache. */
1818 if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1819 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1820 (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1822 scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1823 /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG,
1824 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1825 5 * 60 * 1000);
1826 cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1827 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1828 softc->disk->d_devstat);
1829 softc->flags &= ~DA_FLAG_DIRTY;
1831 }
1832
1833 /* Allow medium removal. */
1834 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1835 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1836 daprevent(periph, PR_ALLOW);
1837
1839 }
1840
1841 /*
1842 * If we've got removable media, mark the blocksize as
1843 * unavailable, since it could change when new media is
1844 * inserted.
1845 */
1846 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1847 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1848
1849 softc->flags &= ~DA_FLAG_OPEN;
1850 while (softc->refcount != 0)
1851 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1852 cam_periph_unlock(periph);
1854 return (0);
1855}
1856
1857static void
1858daschedule(struct cam_periph *periph)
1859{
1860 struct da_softc *softc = (struct da_softc *)periph->softc;
1861
1862 if (softc->state != DA_STATE_NORMAL)
1863 return;
1864
1865 cam_iosched_schedule(softc->cam_iosched, periph);
1866}
1867
1868/*
1869 * Actually translate the requested transfer into one the physical driver
1870 * can understand. The transfer is described by a buf and will include
1871 * only one physical transfer.
1872 */
1873static void
1874dastrategy(struct bio *bp)
1875{
1876 struct cam_periph *periph;
1877 struct da_softc *softc;
1878
1879 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1880 softc = (struct da_softc *)periph->softc;
1881
1882 cam_periph_lock(periph);
1883
1884 /*
1885 * If the device has been made invalid, error out
1886 */
1887 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1888 cam_periph_unlock(periph);
1889 biofinish(bp, NULL, ENXIO);
1890 return;
1891 }
1892
1893 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1894
1895 /*
1896 * Zone commands must be ordered, because they can depend on the
1897 * effects of previously issued commands, and they may affect
1898 * commands after them.
1899 */
1900 if (bp->bio_cmd == BIO_ZONE)
1901 bp->bio_flags |= BIO_ORDERED;
1902
1903 /*
1904 * Place it in the queue of disk activities for this disk
1905 */
1906 cam_iosched_queue_work(softc->cam_iosched, bp);
1907
1908 /*
1909 * Schedule ourselves for performing the work.
1910 */
1911 daschedule(periph);
1912 cam_periph_unlock(periph);
1913
1914 return;
1915}
1916
1917static int
1918dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1919{
1920 struct cam_periph *periph;
1921 struct da_softc *softc;
1922 u_int secsize;
1923 struct ccb_scsiio csio;
1924 struct disk *dp;
1925 int error = 0;
1926
1927 dp = arg;
1928 periph = dp->d_drv1;
1929 softc = (struct da_softc *)periph->softc;
1930 secsize = softc->params.secsize;
1931
1932 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
1933 return (ENXIO);
1934
1935 memset(&csio, 0, sizeof(csio));
1936 if (length > 0) {
1938 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1939 scsi_read_write(&csio,
1940 /*retries*/0,
1941 /*cbfcnp*/NULL,
1943 /*read*/SCSI_RW_WRITE,
1944 /*byte2*/0,
1945 /*minimum_cmd_size*/ softc->minimum_cmd_size,
1946 offset / secsize,
1947 length / secsize,
1948 /*data_ptr*/(u_int8_t *) virtual,
1949 /*dxfer_len*/length,
1950 /*sense_len*/SSD_FULL_SIZE,
1951 da_default_timeout * 1000);
1952 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1953 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1954 if (error != 0)
1955 printf("Aborting dump due to I/O error.\n");
1956 return (error);
1957 }
1958
1959 /*
1960 * Sync the disk cache contents to the physical media.
1961 */
1962 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1964 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1966 /*retries*/0,
1967 /*cbfcnp*/NULL,
1969 /*begin_lba*/0,/* Cover the whole disk */
1970 /*lb_count*/0,
1972 5 * 1000);
1973 error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1974 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1975 if (error != 0)
1976 xpt_print(periph->path, "Synchronize cache failed\n");
1977 }
1978 return (error);
1979}
1980
1981static int
1982dagetattr(struct bio *bp)
1983{
1984 int ret;
1985 struct cam_periph *periph;
1986
1987 if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup))
1988 return (EJUSTRETURN);
1989
1990 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1991 cam_periph_lock(periph);
1992 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1993 periph->path);
1994 cam_periph_unlock(periph);
1995 if (ret == 0)
1996 bp->bio_completed = bp->bio_length;
1997 return ret;
1998}
1999
2000static void
2001dainit(void)
2002{
2003 cam_status status;
2004
2005 da_ccb_zone = uma_zcreate("da_ccb",
2006 sizeof(struct ccb_scsiio), NULL, NULL, NULL, NULL,
2007 UMA_ALIGN_PTR, 0);
2008
2009 /*
2010 * Install a global async callback. This callback will
2011 * receive async callbacks like "new device found".
2012 */
2013 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
2014
2015 if (status != CAM_REQ_CMP) {
2016 printf("da: Failed to attach master async callback "
2017 "due to status 0x%x!\n", status);
2018 } else if (da_send_ordered) {
2019 /* Register our shutdown event handler */
2020 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
2021 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
2022 printf("dainit: shutdown event registration failed!\n");
2023 }
2024}
2025
2026/*
2027 * Callback from GEOM, called when it has finished cleaning up its
2028 * resources.
2029 */
2030static void
2031dadiskgonecb(struct disk *dp)
2032{
2033 struct cam_periph *periph;
2034
2035 periph = (struct cam_periph *)dp->d_drv1;
2037}
2038
2039static void
2041{
2042 struct da_softc *softc;
2043
2044 cam_periph_assert(periph, MA_OWNED);
2045 softc = (struct da_softc *)periph->softc;
2046
2047 /*
2048 * De-register any async callbacks.
2049 */
2050 xpt_register_async(0, daasync, periph, periph->path);
2051
2052 softc->flags |= DA_FLAG_PACK_INVALID;
2053#ifdef CAM_IO_STATS
2054 softc->invalidations++;
2055#endif
2056
2057 /*
2058 * Return all queued I/O with ENXIO.
2059 * XXX Handle any transactions queued to the card
2060 * with XPT_ABORT_CCB.
2061 */
2062 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
2063
2064 /*
2065 * Tell GEOM that we've gone away, we'll get a callback when it is
2066 * done cleaning up its resources.
2067 */
2068 disk_gone(softc->disk);
2069}
2070
2071static void
2072dacleanup(struct cam_periph *periph)
2073{
2074 struct da_softc *softc;
2075
2076 softc = (struct da_softc *)periph->softc;
2077
2078 cam_periph_unlock(periph);
2079
2081
2082 /*
2083 * If we can't free the sysctl tree, oh well...
2084 */
2085 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
2086#ifdef CAM_IO_STATS
2087 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
2088 xpt_print(periph->path,
2089 "can't remove sysctl stats context\n");
2090#endif
2091 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
2092 xpt_print(periph->path,
2093 "can't remove sysctl context\n");
2094 }
2095
2096 callout_drain(&softc->mediapoll_c);
2097 disk_destroy(softc->disk);
2098 callout_drain(&softc->sendordered_c);
2099 free(softc, M_DEVBUF);
2100 cam_periph_lock(periph);
2101}
2102
2103static void
2104daasync(void *callback_arg, u_int32_t code,
2105 struct cam_path *path, void *arg)
2106{
2107 struct cam_periph *periph;
2108 struct da_softc *softc;
2109
2110 periph = (struct cam_periph *)callback_arg;
2111 switch (code) {
2112 case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */
2113 {
2114 struct ccb_getdev *cgd;
2115 cam_status status;
2116
2117 cgd = (struct ccb_getdev *)arg;
2118 if (cgd == NULL)
2119 break;
2120
2121 if (cgd->protocol != PROTO_SCSI)
2122 break;
2124 break;
2125 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
2126 && SID_TYPE(&cgd->inq_data) != T_RBC
2127 && SID_TYPE(&cgd->inq_data) != T_OPTICAL
2128 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
2129 break;
2130
2131 /*
2132 * Allocate a peripheral instance for
2133 * this device and start the probe
2134 * process.
2135 */
2138 "da", CAM_PERIPH_BIO,
2139 path, daasync,
2140 AC_FOUND_DEVICE, cgd);
2141
2142 if (status != CAM_REQ_CMP
2143 && status != CAM_REQ_INPROG)
2144 printf("daasync: Unable to attach to new device "
2145 "due to status 0x%x\n", status);
2146 return;
2147 }
2148 case AC_ADVINFO_CHANGED: /* Doesn't touch periph */
2149 {
2150 uintptr_t buftype;
2151
2152 buftype = (uintptr_t)arg;
2153 if (buftype == CDAI_TYPE_PHYS_PATH) {
2154 struct da_softc *softc;
2155
2156 softc = periph->softc;
2157 disk_attr_changed(softc->disk, "GEOM::physpath",
2158 M_NOWAIT);
2159 }
2160 break;
2161 }
2162 case AC_UNIT_ATTENTION: /* Called for this path: periph locked */
2163 {
2164 union ccb *ccb;
2165 int error_code, sense_key, asc, ascq;
2166
2167 softc = (struct da_softc *)periph->softc;
2168 ccb = (union ccb *)arg;
2169
2170 /*
2171 * Handle all UNIT ATTENTIONs except our own, as they will be
2172 * handled by daerror().
2173 */
2174 if (xpt_path_periph(ccb->ccb_h.path) != periph &&
2176 &error_code, &sense_key, &asc, &ascq)) {
2177 if (asc == 0x2A && ascq == 0x09) {
2179 "Capacity data has changed\n");
2180 cam_periph_assert(periph, MA_OWNED);
2181 softc->flags &= ~DA_FLAG_PROBED;
2182 dareprobe(periph);
2183 } else if (asc == 0x28 && ascq == 0x00) {
2184 cam_periph_assert(periph, MA_OWNED);
2185 softc->flags &= ~DA_FLAG_PROBED;
2186 disk_media_changed(softc->disk, M_NOWAIT);
2187 } else if (asc == 0x3F && ascq == 0x03) {
2189 "INQUIRY data has changed\n");
2190 cam_periph_assert(periph, MA_OWNED);
2191 softc->flags &= ~DA_FLAG_PROBED;
2192 dareprobe(periph);
2193 }
2194 }
2195 break;
2196 }
2197 case AC_SCSI_AEN: /* Called for this path: periph locked */
2198 /*
2199 * Appears to be currently unused for SCSI devices, only ata SIMs
2200 * generate this.
2201 */
2202 cam_periph_assert(periph, MA_OWNED);
2203 softc = (struct da_softc *)periph->softc;
2205 (softc->flags & DA_FLAG_TUR_PENDING) == 0) {
2206 if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
2208 daschedule(periph);
2209 }
2210 }
2211 /* FALLTHROUGH */
2212 case AC_SENT_BDR: /* Called for this path: periph locked */
2213 case AC_BUS_RESET: /* Called for this path: periph locked */
2214 {
2215 struct ccb_hdr *ccbh;
2216
2217 cam_periph_assert(periph, MA_OWNED);
2218 softc = (struct da_softc *)periph->softc;
2219 /*
2220 * Don't fail on the expected unit attention
2221 * that will occur.
2222 */
2223 softc->flags |= DA_FLAG_RETRY_UA;
2224 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
2225 ccbh->ccb_state |= DA_CCB_RETRY_UA;
2226 break;
2227 }
2228 case AC_INQ_CHANGED: /* Called for this path: periph locked */
2229 cam_periph_assert(periph, MA_OWNED);
2230 softc = (struct da_softc *)periph->softc;
2231 softc->flags &= ~DA_FLAG_PROBED;
2232 dareprobe(periph);
2233 break;
2234 default:
2235 break;
2236 }
2237 cam_periph_async(periph, code, path, arg);
2238}
2239
2240static void
2241dasysctlinit(void *context, int pending)
2242{
2243 struct cam_periph *periph;
2244 struct da_softc *softc;
2245 char tmpstr[32], tmpstr2[16];
2246 struct ccb_trans_settings cts;
2247
2248 periph = (struct cam_periph *)context;
2249 /*
2250 * periph was held for us when this task was enqueued
2251 */
2252 if (periph->flags & CAM_PERIPH_INVALID) {
2254 return;
2255 }
2256
2257 softc = (struct da_softc *)periph->softc;
2258 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2259 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2260
2261 sysctl_ctx_init(&softc->sysctl_ctx);
2262 cam_periph_lock(periph);
2263 softc->flags |= DA_FLAG_SCTX_INIT;
2264 cam_periph_unlock(periph);
2265 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
2266 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2267 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index");
2268 if (softc->sysctl_tree == NULL) {
2269 printf("dasysctlinit: unable to allocate sysctl tree\n");
2271 return;
2272 }
2273
2274 /*
2275 * Now register the sysctl handler, so the user can change the value on
2276 * the fly.
2277 */
2278 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2279 OID_AUTO, "delete_method",
2280 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
2281 softc, 0, dadeletemethodsysctl, "A",
2282 "BIO_DELETE execution method");
2283 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2284 OID_AUTO, "delete_max",
2285 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2286 softc, 0, dadeletemaxsysctl, "Q",
2287 "Maximum BIO_DELETE size");
2288 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2289 OID_AUTO, "minimum_cmd_size",
2290 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2291 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2292 "Minimum CDB size");
2293 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2294 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2295 "trim_count", CTLFLAG_RD, &softc->trim_count,
2296 "Total number of unmap/dsm commands sent");
2297 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2298 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2299 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
2300 "Total number of ranges in unmap/dsm commands");
2301 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2302 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2303 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
2304 "Total lbas in the unmap/dsm commands sent");
2305
2306 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2307 OID_AUTO, "zone_mode",
2308 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2309 softc, 0, dazonemodesysctl, "A",
2310 "Zone Mode");
2311 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2312 OID_AUTO, "zone_support",
2313 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2314 softc, 0, dazonesupsysctl, "A",
2315 "Zone Support");
2316 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2317 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2318 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2319 "Optimal Number of Open Sequential Write Preferred Zones");
2320 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2321 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2322 "optimal_nonseq_zones", CTLFLAG_RD,
2323 &softc->optimal_nonseq_zones,
2324 "Optimal Number of Non-Sequentially Written Sequential Write "
2325 "Preferred Zones");
2326 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2327 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2328 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2329 "Maximum Number of Open Sequential Write Required Zones");
2330
2331 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2332 SYSCTL_CHILDREN(softc->sysctl_tree),
2333 OID_AUTO,
2334 "error_inject",
2335 CTLFLAG_RW,
2336 &softc->error_inject,
2337 0,
2338 "error_inject leaf");
2339
2340 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2341 SYSCTL_CHILDREN(softc->sysctl_tree),
2342 OID_AUTO,
2343 "p_type",
2344 CTLFLAG_RD,
2345 &softc->p_type,
2346 0,
2347 "DIF protection type");
2348
2349 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2350 OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2351 softc, 0, daflagssysctl, "A",
2352 "Flags for drive");
2353 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2354 OID_AUTO, "rotating", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2355 &softc->flags, (u_int)DA_FLAG_ROTATING, dabitsysctl, "I",
2356 "Rotating media *DEPRECATED* gone in FreeBSD 14");
2357 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2358 OID_AUTO, "unmapped_io", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2359 &softc->flags, (u_int)DA_FLAG_UNMAPPEDIO, dabitsysctl, "I",
2360 "Unmapped I/O support *DEPRECATED* gone in FreeBSD 14");
2361
2362#ifdef CAM_TEST_FAILURE
2363 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2364 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2365 periph, 0, cam_periph_invalidate_sysctl, "I",
2366 "Write 1 to invalidate the drive immediately");
2367#endif
2368
2369 /*
2370 * Add some addressing info.
2371 */
2372 memset(&cts, 0, sizeof (cts));
2373 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2374 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2376 cam_periph_lock(periph);
2377 xpt_action((union ccb *)&cts);
2378 cam_periph_unlock(periph);
2379 if (cts.ccb_h.status != CAM_REQ_CMP) {
2381 return;
2382 }
2383 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2384 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2385 if (fc->valid & CTS_FC_VALID_WWPN) {
2386 softc->wwpn = fc->wwpn;
2387 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2388 SYSCTL_CHILDREN(softc->sysctl_tree),
2389 OID_AUTO, "wwpn", CTLFLAG_RD,
2390 &softc->wwpn, "World Wide Port Name");
2391 }
2392 }
2393
2394#ifdef CAM_IO_STATS
2395 /*
2396 * Now add some useful stats.
2397 * XXX These should live in cam_periph and be common to all periphs
2398 */
2399 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2400 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2401 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics");
2402 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2403 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2404 OID_AUTO,
2405 "errors",
2406 CTLFLAG_RD,
2407 &softc->errors,
2408 0,
2409 "Transport errors reported by the SIM");
2410 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2411 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2412 OID_AUTO,
2413 "timeouts",
2414 CTLFLAG_RD,
2415 &softc->timeouts,
2416 0,
2417 "Device timeouts reported by the SIM");
2418 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2419 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2420 OID_AUTO,
2421 "pack_invalidations",
2422 CTLFLAG_RD,
2423 &softc->invalidations,
2424 0,
2425 "Device pack invalidations");
2426#endif
2427
2428 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2429 softc->sysctl_tree);
2430
2432}
2433
2434static int
2435dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2436{
2437 int error;
2438 uint64_t value;
2439 struct da_softc *softc;
2440
2441 softc = (struct da_softc *)arg1;
2442
2443 value = softc->disk->d_delmaxsize;
2444 error = sysctl_handle_64(oidp, &value, 0, req);
2445 if ((error != 0) || (req->newptr == NULL))
2446 return (error);
2447
2448 /* only accept values smaller than the calculated value */
2449 if (value > dadeletemaxsize(softc, softc->delete_method)) {
2450 return (EINVAL);
2451 }
2452 softc->disk->d_delmaxsize = value;
2453
2454 return (0);
2455}
2456
2457static int
2458dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2459{
2460 int error, value;
2461
2462 value = *(int *)arg1;
2463
2464 error = sysctl_handle_int(oidp, &value, 0, req);
2465
2466 if ((error != 0)
2467 || (req->newptr == NULL))
2468 return (error);
2469
2470 /*
2471 * Acceptable values here are 6, 10, 12 or 16.
2472 */
2473 if (value < 6)
2474 value = 6;
2475 else if ((value > 6)
2476 && (value <= 10))
2477 value = 10;
2478 else if ((value > 10)
2479 && (value <= 12))
2480 value = 12;
2481 else if (value > 12)
2482 value = 16;
2483
2484 *(int *)arg1 = value;
2485
2486 return (0);
2487}
2488
2489static int
2490dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2491{
2492 sbintime_t value;
2493 int error;
2494
2495 value = da_default_softtimeout / SBT_1MS;
2496
2497 error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2498 if ((error != 0) || (req->newptr == NULL))
2499 return (error);
2500
2501 /* XXX Should clip this to a reasonable level */
2502 if (value > da_default_timeout * 1000)
2503 return (EINVAL);
2504
2505 da_default_softtimeout = value * SBT_1MS;
2506 return (0);
2507}
2508
2509static void
2510dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2511{
2512
2513 softc->delete_method = delete_method;
2514 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2515 softc->delete_func = da_delete_functions[delete_method];
2516
2517 if (softc->delete_method > DA_DELETE_DISABLE)
2518 softc->disk->d_flags |= DISKFLAG_CANDELETE;
2519 else
2520 softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2521}
2522
2523static off_t
2524dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2525{
2526 off_t sectors;
2527
2528 switch(delete_method) {
2529 case DA_DELETE_UNMAP:
2530 sectors = (off_t)softc->unmap_max_lba;
2531 break;
2532 case DA_DELETE_ATA_TRIM:
2533 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2534 break;
2535 case DA_DELETE_WS16:
2536 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2537 break;
2538 case DA_DELETE_ZERO:
2539 case DA_DELETE_WS10:
2540 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2541 break;
2542 default:
2543 return 0;
2544 }
2545
2546 return (off_t)softc->params.secsize *
2547 omin(sectors, softc->params.sectors);
2548}
2549
2550static void
2551daprobedone(struct cam_periph *periph, union ccb *ccb)
2552{
2553 struct da_softc *softc;
2554
2555 softc = (struct da_softc *)periph->softc;
2556
2557 cam_periph_assert(periph, MA_OWNED);
2558
2560
2561 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2562 char buf[80];
2563 int i, sep;
2564
2565 snprintf(buf, sizeof(buf), "Delete methods: <");
2566 sep = 0;
2567 for (i = 0; i <= DA_DELETE_MAX; i++) {
2568 if ((softc->delete_available & (1 << i)) == 0 &&
2569 i != softc->delete_method)
2570 continue;
2571 if (sep)
2572 strlcat(buf, ",", sizeof(buf));
2573 strlcat(buf, da_delete_method_names[i],
2574 sizeof(buf));
2575 if (i == softc->delete_method)
2576 strlcat(buf, "(*)", sizeof(buf));
2577 sep = 1;
2578 }
2579 strlcat(buf, ">", sizeof(buf));
2580 printf("%s%d: %s\n", periph->periph_name,
2581 periph->unit_number, buf);
2582 }
2583 if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2584 (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2585 printf("%s%d: Write Protected\n", periph->periph_name,
2586 periph->unit_number);
2587 }
2588
2589 /*
2590 * Since our peripheral may be invalidated by an error
2591 * above or an external event, we must release our CCB
2592 * before releasing the probe lock on the peripheral.
2593 * The peripheral will only go away once the last lock
2594 * is removed, and we need it around for the CCB release
2595 * operation.
2596 */
2598 softc->state = DA_STATE_NORMAL;
2599 softc->flags |= DA_FLAG_PROBED;
2600 daschedule(periph);
2601 wakeup(&softc->disk->d_mediasize);
2602 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2603 softc->flags |= DA_FLAG_ANNOUNCED;
2605 } else
2607}
2608
2609static void
2611{
2612 int i, methods;
2613
2614 /* If available, prefer the method requested by user. */
2615 i = softc->delete_method_pref;
2616 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2617 if (methods & (1 << i)) {
2618 dadeletemethodset(softc, i);
2619 return;
2620 }
2621
2622 /* Use the pre-defined order to choose the best performing delete. */
2623 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2624 if (i == DA_DELETE_ZERO)
2625 continue;
2626 if (softc->delete_available & (1 << i)) {
2627 dadeletemethodset(softc, i);
2628 return;
2629 }
2630 }
2631
2632 /* Fallback to default. */
2633 dadeletemethodset(softc, default_method);
2634}
2635
2636static int
2637dabitsysctl(SYSCTL_HANDLER_ARGS)
2638{
2639 u_int *flags = arg1;
2640 u_int test = arg2;
2641 int tmpout, error;
2642
2643 tmpout = !!(*flags & test);
2644 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
2645 if (error || !req->newptr)
2646 return (error);
2647
2648 return (EPERM);
2649}
2650
2651static int
2652daflagssysctl(SYSCTL_HANDLER_ARGS)
2653{
2654 struct sbuf sbuf;
2655 struct da_softc *softc = arg1;
2656 int error;
2657
2658 sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
2659 if (softc->flags != 0)
2660 sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, DA_FLAG_STRING);
2661 else
2662 sbuf_printf(&sbuf, "0");
2663 error = sbuf_finish(&sbuf);
2664 sbuf_delete(&sbuf);
2665
2666 return (error);
2667}
2668
2669static int
2670dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2671{
2672 char buf[16];
2673 const char *p;
2674 struct da_softc *softc;
2675 int i, error, value;
2676
2677 softc = (struct da_softc *)arg1;
2678
2679 value = softc->delete_method;
2680 if (value < 0 || value > DA_DELETE_MAX)
2681 p = "UNKNOWN";
2682 else
2683 p = da_delete_method_names[value];
2684 strncpy(buf, p, sizeof(buf));
2685 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2686 if (error != 0 || req->newptr == NULL)
2687 return (error);
2688 for (i = 0; i <= DA_DELETE_MAX; i++) {
2689 if (strcmp(buf, da_delete_method_names[i]) == 0)
2690 break;
2691 }
2692 if (i > DA_DELETE_MAX)
2693 return (EINVAL);
2694 softc->delete_method_pref = i;
2696 return (0);
2697}
2698
2699static int
2700dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2701{
2702 char tmpbuf[40];
2703 struct da_softc *softc;
2704 int error;
2705
2706 softc = (struct da_softc *)arg1;
2707
2708 switch (softc->zone_mode) {
2710 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2711 break;
2712 case DA_ZONE_HOST_AWARE:
2713 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2714 break;
2716 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2717 break;
2718 case DA_ZONE_NONE:
2719 default:
2720 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2721 break;
2722 }
2723
2724 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2725
2726 return (error);
2727}
2728
2729static int
2730dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2731{
2732 char tmpbuf[180];
2733 struct da_softc *softc;
2734 struct sbuf sb;
2735 int error, first;
2736 unsigned int i;
2737
2738 softc = (struct da_softc *)arg1;
2739
2740 error = 0;
2741 first = 1;
2742 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2743
2744 for (i = 0; i < sizeof(da_zone_desc_table) /
2745 sizeof(da_zone_desc_table[0]); i++) {
2746 if (softc->zone_flags & da_zone_desc_table[i].value) {
2747 if (first == 0)
2748 sbuf_printf(&sb, ", ");
2749 else
2750 first = 0;
2751 sbuf_cat(&sb, da_zone_desc_table[i].desc);
2752 }
2753 }
2754
2755 if (first == 1)
2756 sbuf_printf(&sb, "None");
2757
2758 sbuf_finish(&sb);
2759
2760 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2761
2762 return (error);
2763}
2764
2765static cam_status
2766daregister(struct cam_periph *periph, void *arg)
2767{
2768 struct da_softc *softc;
2769 struct ccb_pathinq cpi;
2770 struct ccb_getdev *cgd;
2771 char tmpstr[80];
2772 caddr_t match;
2773 int quirks;
2774
2775 cgd = (struct ccb_getdev *)arg;
2776 if (cgd == NULL) {
2777 printf("daregister: no getdev CCB, can't register device\n");
2778 return(CAM_REQ_CMP_ERR);
2779 }
2780
2781 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2782 M_NOWAIT|M_ZERO);
2783
2784 if (softc == NULL) {
2785 printf("daregister: Unable to probe new device. "
2786 "Unable to allocate softc\n");
2787 return(CAM_REQ_CMP_ERR);
2788 }
2789
2790 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2791 printf("daregister: Unable to probe new device. "
2792 "Unable to allocate iosched memory\n");
2793 free(softc, M_DEVBUF);
2794 return(CAM_REQ_CMP_ERR);
2795 }
2796
2797 LIST_INIT(&softc->pending_ccbs);
2798 softc->state = DA_STATE_PROBE_WP;
2799 bioq_init(&softc->delete_run_queue);
2800 if (SID_IS_REMOVABLE(&cgd->inq_data))
2801 softc->flags |= DA_FLAG_PACK_REMOVABLE;
2802 softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2803 softc->unmap_max_lba = UNMAP_RANGE_MAX;
2804 softc->unmap_gran = 0;
2805 softc->unmap_gran_align = 0;
2806 softc->ws_max_blks = WS16_MAX_BLKS;
2807 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2808 softc->flags |= DA_FLAG_ROTATING;
2809
2810 periph->softc = softc;
2811
2812 /*
2813 * See if this device has any quirks.
2814 */
2815 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2816 (caddr_t)da_quirk_table,
2817 nitems(da_quirk_table),
2819
2820 if (match != NULL)
2821 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2822 else
2823 softc->quirks = DA_Q_NONE;
2824
2825 /* Check if the SIM does not want 6 byte commands */
2826 xpt_path_inq(&cpi, periph->path);
2827 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2828 softc->quirks |= DA_Q_NO_6_BYTE;
2829
2830 /* Override quirks if tunable is set */
2831 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks",
2832 periph->unit_number);
2833 quirks = softc->quirks;
2834 TUNABLE_INT_FETCH(tmpstr, &quirks);
2835 softc->quirks = quirks;
2836
2837 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2838 softc->zone_mode = DA_ZONE_HOST_MANAGED;
2839 else if (softc->quirks & DA_Q_SMR_DM)
2840 softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2841 else
2842 softc->zone_mode = DA_ZONE_NONE;
2843
2844 if (softc->zone_mode != DA_ZONE_NONE) {
2847 softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2848 else
2849 softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2850 } else
2851 softc->zone_interface = DA_ZONE_IF_SCSI;
2852 }
2853
2854 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2855
2856 /*
2857 * Let XPT know we can use UMA-allocated CCBs.
2858 */
2859 if (da_enable_uma_ccbs) {
2860 KASSERT(da_ccb_zone != NULL,
2861 ("%s: NULL da_ccb_zone", __func__));
2862 periph->ccb_zone = da_ccb_zone;
2863 }
2864
2865 /*
2866 * Take an exclusive section lock on the periph while dastart is called
2867 * to finish the probe. The lock will be dropped in dadone at the end
2868 * of probe. This locks out daopen and daclose from racing with the
2869 * probe.
2870 *
2871 * XXX if cam_periph_hold returns an error, we don't hold a refcount.
2872 */
2873 (void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD);
2874
2875 /*
2876 * Schedule a periodic event to occasionally send an
2877 * ordered tag to a device.
2878 */
2879 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2880 callout_reset_sbt(&softc->sendordered_c,
2882 dasendorderedtag, periph, C_PREL(1));
2883
2884 cam_periph_unlock(periph);
2885 /*
2886 * RBC devices don't have to support READ(6), only READ(10).
2887 */
2888 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2889 softc->minimum_cmd_size = 10;
2890 else
2891 softc->minimum_cmd_size = 6;
2892
2893 /*
2894 * Load the user's default, if any.
2895 */
2896 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2897 periph->unit_number);
2898 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2899
2900 /*
2901 * 6, 10, 12 and 16 are the currently permissible values.
2902 */
2903 if (softc->minimum_cmd_size > 12)
2904 softc->minimum_cmd_size = 16;
2905 else if (softc->minimum_cmd_size > 10)
2906 softc->minimum_cmd_size = 12;
2907 else if (softc->minimum_cmd_size > 6)
2908 softc->minimum_cmd_size = 10;
2909 else
2910 softc->minimum_cmd_size = 6;
2911
2912 /* On first PROBE_WP request all more pages, then adjust. */
2913 softc->mode_page = SMS_ALL_PAGES_PAGE;
2914
2915 /* Predict whether device may support READ CAPACITY(16). */
2916 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2917 (softc->quirks & DA_Q_NO_RC16) == 0) {
2918 softc->flags |= DA_FLAG_CAN_RC16;
2919 }
2920
2921 /*
2922 * Register this media as a disk.
2923 */
2924 softc->disk = disk_alloc();
2925 softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2926 periph->unit_number, 0,
2927 DEVSTAT_BS_UNAVAILABLE,
2928 SID_TYPE(&cgd->inq_data) |
2930 DEVSTAT_PRIORITY_DISK);
2931 softc->disk->d_open = daopen;
2932 softc->disk->d_close = daclose;
2933 softc->disk->d_strategy = dastrategy;
2934 if (cam_sim_pollable(periph->sim))
2935 softc->disk->d_dump = dadump;
2936 softc->disk->d_getattr = dagetattr;
2937 softc->disk->d_gone = dadiskgonecb;
2938 softc->disk->d_name = "da";
2939 softc->disk->d_drv1 = periph;
2940 if (cpi.maxio == 0)
2941 softc->maxio = DFLTPHYS; /* traditional default */
2942 else if (cpi.maxio > maxphys)
2943 softc->maxio = maxphys; /* for safety */
2944 else
2945 softc->maxio = cpi.maxio;
2946 if (softc->quirks & DA_Q_128KB)
2947 softc->maxio = min(softc->maxio, 128 * 1024);
2948 softc->disk->d_maxsize = softc->maxio;
2949 softc->disk->d_unit = periph->unit_number;
2950 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2951 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2952 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2953 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2954 softc->flags |= DA_FLAG_UNMAPPEDIO;
2955 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2956 }
2957 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2958 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2959 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2960 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2961 cgd->inq_data.product, sizeof(cgd->inq_data.product),
2962 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2963 softc->disk->d_hba_vendor = cpi.hba_vendor;
2964 softc->disk->d_hba_device = cpi.hba_device;
2965 softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2966 softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2967 snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment),
2968 "%s%d", cpi.dev_name, cpi.unit_number);
2969
2970 /*
2971 * Acquire a reference to the periph before we register with GEOM.
2972 * We'll release this reference once GEOM calls us back (via
2973 * dadiskgonecb()) telling us that our provider has been freed.
2974 */
2975 if (da_periph_acquire(periph, DA_REF_GEOM) != 0) {
2976 xpt_print(periph->path, "%s: lost periph during "
2977 "registration!\n", __func__);
2978 cam_periph_lock(periph);
2979 return (CAM_REQ_CMP_ERR);
2980 }
2981
2982 disk_create(softc->disk, DISK_VERSION);
2983 cam_periph_lock(periph);
2984
2985 /*
2986 * Add async callbacks for events of interest.
2987 * I don't bother checking if this fails as,
2988 * in most cases, the system will function just
2989 * fine without them and the only alternative
2990 * would be to not attach the device on failure.
2991 */
2994 AC_INQ_CHANGED, daasync, periph, periph->path);
2995
2996 /*
2997 * Emit an attribute changed notification just in case
2998 * physical path information arrived before our async
2999 * event handler was registered, but after anyone attaching
3000 * to our disk device polled it.
3001 */
3002 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
3003
3004 /*
3005 * Schedule a periodic media polling events.
3006 */
3007 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
3008 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
3009 (cgd->inq_flags & SID_AEN) == 0 &&
3010 da_poll_period != 0) {
3011 callout_reset_sbt(&softc->mediapoll_c, da_poll_period * SBT_1S,
3012 0, damediapoll, periph, C_PREL(1));
3013 }
3014
3016
3017 return(CAM_REQ_CMP);
3018}
3019
3020static int
3021da_zone_bio_to_scsi(int disk_zone_cmd)
3022{
3023 switch (disk_zone_cmd) {
3024 case DISK_ZONE_OPEN:
3025 return ZBC_OUT_SA_OPEN;
3026 case DISK_ZONE_CLOSE:
3027 return ZBC_OUT_SA_CLOSE;
3028 case DISK_ZONE_FINISH:
3029 return ZBC_OUT_SA_FINISH;
3030 case DISK_ZONE_RWP:
3031 return ZBC_OUT_SA_RWP;
3032 }
3033
3034 return -1;
3035}
3036
3037static int
3038da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
3039 int *queue_ccb)
3040{
3041 struct da_softc *softc;
3042 int error;
3043
3044 error = 0;
3045
3046 if (bp->bio_cmd != BIO_ZONE) {
3047 error = EINVAL;
3048 goto bailout;
3049 }
3050
3051 softc = periph->softc;
3052
3053 switch (bp->bio_zone.zone_cmd) {
3054 case DISK_ZONE_OPEN:
3055 case DISK_ZONE_CLOSE:
3056 case DISK_ZONE_FINISH:
3057 case DISK_ZONE_RWP: {
3058 int zone_flags;
3059 int zone_sa;
3060 uint64_t lba;
3061
3062 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
3063 if (zone_sa == -1) {
3064 xpt_print(periph->path, "Cannot translate zone "
3065 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
3066 error = EINVAL;
3067 goto bailout;
3068 }
3069
3070 zone_flags = 0;
3071 lba = bp->bio_zone.zone_params.rwp.id;
3072
3073 if (bp->bio_zone.zone_params.rwp.flags &
3074 DISK_ZONE_RWP_FLAG_ALL)
3075 zone_flags |= ZBC_OUT_ALL;
3076
3077 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3079 /*retries*/ da_retry_count,
3080 /*cbfcnp*/ dadone,
3081 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3082 /*service_action*/ zone_sa,
3083 /*zone_id*/ lba,
3084 /*zone_flags*/ zone_flags,
3085 /*data_ptr*/ NULL,
3086 /*dxfer_len*/ 0,
3087 /*sense_len*/ SSD_FULL_SIZE,
3088 /*timeout*/ da_default_timeout * 1000);
3089 } else {
3090 /*
3091 * Note that in this case, even though we can
3092 * technically use NCQ, we don't bother for several
3093 * reasons:
3094 * 1. It hasn't been tested on a SAT layer that
3095 * supports it. This is new as of SAT-4.
3096 * 2. Even when there is a SAT layer that supports
3097 * it, that SAT layer will also probably support
3098 * ZBC -> ZAC translation, since they are both
3099 * in the SAT-4 spec.
3100 * 3. Translation will likely be preferable to ATA
3101 * passthrough. LSI / Avago at least single
3102 * steps ATA passthrough commands in the HBA,
3103 * regardless of protocol, so unless that
3104 * changes, there is a performance penalty for
3105 * doing ATA passthrough no matter whether
3106 * you're using NCQ/FPDMA, DMA or PIO.
3107 * 4. It requires a 32-byte CDB, which at least at
3108 * this point in CAM requires a CDB pointer, which
3109 * would require us to allocate an additional bit
3110 * of storage separate from the CCB.
3111 */
3112 error = scsi_ata_zac_mgmt_out(&ccb->csio,
3113 /*retries*/ da_retry_count,
3114 /*cbfcnp*/ dadone,
3115 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3116 /*use_ncq*/ 0,
3117 /*zm_action*/ zone_sa,
3118 /*zone_id*/ lba,
3119 /*zone_flags*/ zone_flags,
3120 /*data_ptr*/ NULL,
3121 /*dxfer_len*/ 0,
3122 /*cdb_storage*/ NULL,
3123 /*cdb_storage_len*/ 0,
3124 /*sense_len*/ SSD_FULL_SIZE,
3125 /*timeout*/ da_default_timeout * 1000);
3126 if (error != 0) {
3127 error = EINVAL;
3128 xpt_print(periph->path,
3129 "scsi_ata_zac_mgmt_out() returned an "
3130 "error!");
3131 goto bailout;
3132 }
3133 }
3134 *queue_ccb = 1;
3135
3136 break;
3137 }
3138 case DISK_ZONE_REPORT_ZONES: {
3139 uint8_t *rz_ptr;
3140 uint32_t num_entries, alloc_size;
3141 struct disk_zone_report *rep;
3142
3143 rep = &bp->bio_zone.zone_params.report;
3144
3145 num_entries = rep->entries_allocated;
3146 if (num_entries == 0) {
3147 xpt_print(periph->path, "No entries allocated for "
3148 "Report Zones request\n");
3149 error = EINVAL;
3150 goto bailout;
3151 }
3152 alloc_size = sizeof(struct scsi_report_zones_hdr) +
3153 (sizeof(struct scsi_report_zones_desc) * num_entries);
3154 alloc_size = min(alloc_size, softc->disk->d_maxsize);
3155 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
3156 if (rz_ptr == NULL) {
3157 xpt_print(periph->path, "Unable to allocate memory "
3158 "for Report Zones request\n");
3159 error = ENOMEM;
3160 goto bailout;
3161 }
3162
3163 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3165 /*retries*/ da_retry_count,
3166 /*cbcfnp*/ dadone,
3167 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3168 /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
3169 /*zone_start_lba*/ rep->starting_id,
3170 /*zone_options*/ rep->rep_options,
3171 /*data_ptr*/ rz_ptr,
3172 /*dxfer_len*/ alloc_size,
3173 /*sense_len*/ SSD_FULL_SIZE,
3174 /*timeout*/ da_default_timeout * 1000);
3175 } else {
3176 /*
3177 * Note that in this case, even though we can
3178 * technically use NCQ, we don't bother for several
3179 * reasons:
3180 * 1. It hasn't been tested on a SAT layer that
3181 * supports it. This is new as of SAT-4.
3182 * 2. Even when there is a SAT layer that supports
3183 * it, that SAT layer will also probably support
3184 * ZBC -> ZAC translation, since they are both
3185 * in the SAT-4 spec.
3186 * 3. Translation will likely be preferable to ATA
3187 * passthrough. LSI / Avago at least single
3188 * steps ATA passthrough commands in the HBA,
3189 * regardless of protocol, so unless that
3190 * changes, there is a performance penalty for
3191 * doing ATA passthrough no matter whether
3192 * you're using NCQ/FPDMA, DMA or PIO.
3193 * 4. It requires a 32-byte CDB, which at least at
3194 * this point in CAM requires a CDB pointer, which
3195 * would require us to allocate an additional bit
3196 * of storage separate from the CCB.
3197 */
3198 error = scsi_ata_zac_mgmt_in(&ccb->csio,
3199 /*retries*/ da_retry_count,
3200 /*cbcfnp*/ dadone,
3201 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3202 /*use_ncq*/ 0,
3203 /*zm_action*/ ATA_ZM_REPORT_ZONES,
3204 /*zone_id*/ rep->starting_id,
3205 /*zone_flags*/ rep->rep_options,
3206 /*data_ptr*/ rz_ptr,
3207 /*dxfer_len*/ alloc_size,
3208 /*cdb_storage*/ NULL,
3209 /*cdb_storage_len*/ 0,
3210 /*sense_len*/ SSD_FULL_SIZE,
3211 /*timeout*/ da_default_timeout * 1000);
3212 if (error != 0) {
3213 error = EINVAL;
3214 xpt_print(periph->path,
3215 "scsi_ata_zac_mgmt_in() returned an "
3216 "error!");
3217 goto bailout;
3218 }
3219 }
3220
3221 /*
3222 * For BIO_ZONE, this isn't normally needed. However, it
3223 * is used by devstat_end_transaction_bio() to determine
3224 * how much data was transferred.
3225 */
3226 /*
3227 * XXX KDM we have a problem. But I'm not sure how to fix
3228 * it. devstat uses bio_bcount - bio_resid to calculate
3229 * the amount of data transferred. The GEOM disk code
3230 * uses bio_length - bio_resid to calculate the amount of
3231 * data in bio_completed. We have different structure
3232 * sizes above and below the ada(4) driver. So, if we
3233 * use the sizes above, the amount transferred won't be
3234 * quite accurate for devstat. If we use different sizes
3235 * for bio_bcount and bio_length (above and below
3236 * respectively), then the residual needs to match one or
3237 * the other. Everything is calculated after the bio
3238 * leaves the driver, so changing the values around isn't
3239 * really an option. For now, just set the count to the
3240 * passed in length. This means that the calculations
3241 * above (e.g. bio_completed) will be correct, but the
3242 * amount of data reported to devstat will be slightly
3243 * under or overstated.
3244 */
3245 bp->bio_bcount = bp->bio_length;
3246
3247 *queue_ccb = 1;
3248
3249 break;
3250 }
3251 case DISK_ZONE_GET_PARAMS: {
3252 struct disk_zone_disk_params *params;
3253
3254 params = &bp->bio_zone.zone_params.disk_params;
3255 bzero(params, sizeof(*params));
3256
3257 switch (softc->zone_mode) {
3259 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
3260 break;
3261 case DA_ZONE_HOST_AWARE:
3262 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
3263 break;
3265 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
3266 break;
3267 default:
3268 case DA_ZONE_NONE:
3269 params->zone_mode = DISK_ZONE_MODE_NONE;
3270 break;
3271 }
3272
3273 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
3274 params->flags |= DISK_ZONE_DISK_URSWRZ;
3275
3276 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
3277 params->optimal_seq_zones = softc->optimal_seq_zones;
3278 params->flags |= DISK_ZONE_OPT_SEQ_SET;
3279 }
3280
3281 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
3282 params->optimal_nonseq_zones =
3283 softc->optimal_nonseq_zones;
3284 params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
3285 }
3286
3287 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
3288 params->max_seq_zones = softc->max_seq_zones;
3289 params->flags |= DISK_ZONE_MAX_SEQ_SET;
3290 }
3291 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
3292 params->flags |= DISK_ZONE_RZ_SUP;
3293
3294 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
3295 params->flags |= DISK_ZONE_OPEN_SUP;
3296
3297 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
3298 params->flags |= DISK_ZONE_CLOSE_SUP;
3299
3300 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
3301 params->flags |= DISK_ZONE_FINISH_SUP;
3302
3303 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
3304 params->flags |= DISK_ZONE_RWP_SUP;
3305 break;
3306 }
3307 default:
3308 break;
3309 }
3310bailout:
3311 return (error);
3312}
3313
3314static void
3315dastart(struct cam_periph *periph, union ccb *start_ccb)
3316{
3317 struct da_softc *softc;
3318
3319 cam_periph_assert(periph, MA_OWNED);
3320 softc = (struct da_softc *)periph->softc;
3321
3322 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
3323
3324skipstate:
3325 switch (softc->state) {
3326 case DA_STATE_NORMAL:
3327 {
3328 struct bio *bp;
3329 uint8_t tag_code;
3330
3331more:
3332 bp = cam_iosched_next_bio(softc->cam_iosched);
3333 if (bp == NULL) {
3335 DA_WORK_TUR)) {
3336 softc->flags |= DA_FLAG_TUR_PENDING;
3338 DA_WORK_TUR);
3339 scsi_test_unit_ready(&start_ccb->csio,
3340 /*retries*/ da_retry_count,
3341 dadone_tur,
3344 da_default_timeout * 1000);
3345 start_ccb->ccb_h.ccb_bp = NULL;
3346 start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3347 xpt_action(start_ccb);
3348 } else
3349 xpt_release_ccb(start_ccb);
3350 break;
3351 }
3352
3353 if (bp->bio_cmd == BIO_DELETE) {
3354 if (softc->delete_func != NULL) {
3355 softc->delete_func(periph, start_ccb, bp);
3356 goto out;
3357 } else {
3358 /*
3359 * Not sure this is possible, but failsafe by
3360 * lying and saying "sure, done."
3361 */
3362 biofinish(bp, NULL, 0);
3363 goto more;
3364 }
3365 }
3366
3368 DA_WORK_TUR)) {
3370 DA_WORK_TUR);
3372 }
3373
3374 if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3375 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3376 softc->flags &= ~DA_FLAG_NEED_OTAG;
3377 softc->flags |= DA_FLAG_WAS_OTAG;
3378 tag_code = MSG_ORDERED_Q_TAG;
3379 } else {
3380 tag_code = MSG_SIMPLE_Q_TAG;
3381 }
3382
3383 switch (bp->bio_cmd) {
3384 case BIO_WRITE:
3385 case BIO_READ:
3386 {
3387 void *data_ptr;
3388 int rw_op;
3389
3390 biotrack(bp, __func__);
3391
3392 if (bp->bio_cmd == BIO_WRITE) {
3393 softc->flags |= DA_FLAG_DIRTY;
3394 rw_op = SCSI_RW_WRITE;
3395 } else {
3396 rw_op = SCSI_RW_READ;
3397 }
3398
3399 data_ptr = bp->bio_data;
3400 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3401 rw_op |= SCSI_RW_BIO;
3402 data_ptr = bp;
3403 }
3404
3405 scsi_read_write(&start_ccb->csio,
3406 /*retries*/da_retry_count,
3407 /*cbfcnp*/dadone,
3408 /*tag_action*/tag_code,
3409 rw_op,
3410 /*byte2*/0,
3411 softc->minimum_cmd_size,
3412 /*lba*/bp->bio_pblkno,
3413 /*block_count*/bp->bio_bcount /
3414 softc->params.secsize,
3415 data_ptr,
3416 /*dxfer_len*/ bp->bio_bcount,
3417 /*sense_len*/SSD_FULL_SIZE,
3418 da_default_timeout * 1000);
3419#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3420 start_ccb->csio.bio = bp;
3421#endif
3422 break;
3423 }
3424 case BIO_FLUSH:
3425 /*
3426 * If we don't support sync cache, or the disk
3427 * isn't dirty, FLUSH is a no-op. Use the
3428 * allocated CCB for the next bio if one is
3429 * available.
3430 */
3431 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3432 (softc->flags & DA_FLAG_DIRTY) == 0) {
3433 biodone(bp);
3434 goto skipstate;
3435 }
3436
3437 /*
3438 * BIO_FLUSH doesn't currently communicate
3439 * range data, so we synchronize the cache
3440 * over the whole disk.
3441 */
3442 scsi_synchronize_cache(&start_ccb->csio,
3443 /*retries*/1,
3444 /*cbfcnp*/dadone,
3445 /*tag_action*/tag_code,
3446 /*begin_lba*/0,
3447 /*lb_count*/0,
3449 da_default_timeout*1000);
3450 /*
3451 * Clear the dirty flag before sending the command.
3452 * Either this sync cache will be successful, or it
3453 * will fail after a retry. If it fails, it is
3454 * unlikely to be successful if retried later, so
3455 * we'll save ourselves time by just marking the
3456 * device clean.
3457 */
3458 softc->flags &= ~DA_FLAG_DIRTY;
3459 break;
3460 case BIO_ZONE: {
3461 int error, queue_ccb;
3462
3463 queue_ccb = 0;
3464
3465 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3466 if ((error != 0)
3467 || (queue_ccb == 0)) {
3468 biofinish(bp, NULL, error);
3469 xpt_release_ccb(start_ccb);
3470 return;
3471 }
3472 break;
3473 }
3474 default:
3475 biofinish(bp, NULL, EOPNOTSUPP);
3476 xpt_release_ccb(start_ccb);
3477 return;
3478 }
3479 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3480 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3481 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3482
3483out:
3484 LIST_INSERT_HEAD(&softc->pending_ccbs,
3485 &start_ccb->ccb_h, periph_links.le);
3486
3487 /* We expect a unit attention from this device */
3488 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3489 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3490 softc->flags &= ~DA_FLAG_RETRY_UA;
3491 }
3492
3493 start_ccb->ccb_h.ccb_bp = bp;
3494 softc->refcount++;
3495 cam_periph_unlock(periph);
3496 xpt_action(start_ccb);
3497 cam_periph_lock(periph);
3498
3499 /* May have more work to do, so ensure we stay scheduled */
3500 daschedule(periph);
3501 break;
3502 }
3503 case DA_STATE_PROBE_WP:
3504 {
3505 void *mode_buf;
3506 int mode_buf_len;
3507
3508 if (da_disable_wp_detection || softc->mode_page < 0) {
3509 if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3510 softc->state = DA_STATE_PROBE_RC16;
3511 else
3512 softc->state = DA_STATE_PROBE_RC;
3513 goto skipstate;
3514 }
3515 mode_buf_len = 192;
3516 mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3517 if (mode_buf == NULL) {
3518 xpt_print(periph->path, "Unable to send mode sense - "
3519 "malloc failure\n");
3520 if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3521 softc->state = DA_STATE_PROBE_RC16;
3522 else
3523 softc->state = DA_STATE_PROBE_RC;
3524 goto skipstate;
3525 }
3526 scsi_mode_sense_len(&start_ccb->csio,
3527 /*retries*/ da_retry_count,
3528 /*cbfcnp*/ dadone_probewp,
3529 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3530 /*dbd*/ FALSE,
3531 /*pc*/ SMS_PAGE_CTRL_CURRENT,
3532 /*page*/ softc->mode_page,
3533 /*param_buf*/ mode_buf,
3534 /*param_len*/ mode_buf_len,
3535 /*minimum_cmd_size*/ softc->minimum_cmd_size,
3536 /*sense_len*/ SSD_FULL_SIZE,
3537 /*timeout*/ da_default_timeout * 1000);
3538 start_ccb->ccb_h.ccb_bp = NULL;
3539 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3540 xpt_action(start_ccb);
3541 break;
3542 }
3543 case DA_STATE_PROBE_RC:
3544 {
3545 struct scsi_read_capacity_data *rcap;
3546
3547 rcap = (struct scsi_read_capacity_data *)
3548 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3549 if (rcap == NULL) {
3550 printf("dastart: Couldn't malloc read_capacity data\n");
3551 /* da_free_periph??? */
3552 break;
3553 }
3554 scsi_read_capacity(&start_ccb->csio,
3555 /*retries*/da_retry_count,
3558 rcap,
3560 /*timeout*/5000);
3561 start_ccb->ccb_h.ccb_bp = NULL;
3562 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3563 xpt_action(start_ccb);
3564 break;
3565 }
3567 {
3568 struct scsi_read_capacity_data_long *rcaplong;
3569
3570 rcaplong = (struct scsi_read_capacity_data_long *)
3571 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3572 if (rcaplong == NULL) {
3573 printf("dastart: Couldn't malloc read_capacity data\n");
3574 /* da_free_periph??? */
3575 break;
3576 }
3577 scsi_read_capacity_16(&start_ccb->csio,
3578 /*retries*/ da_retry_count,
3579 /*cbfcnp*/ dadone_proberc,
3580 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3581 /*lba*/ 0,
3582 /*reladr*/ 0,
3583 /*pmi*/ 0,
3584 /*rcap_buf*/ (uint8_t *)rcaplong,
3585 /*rcap_buf_len*/ sizeof(*rcaplong),
3586 /*sense_len*/ SSD_FULL_SIZE,
3587 /*timeout*/ da_default_timeout * 1000);
3588 start_ccb->ccb_h.ccb_bp = NULL;
3589 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3590 xpt_action(start_ccb);
3591 break;
3592 }
3593 case DA_STATE_PROBE_LBP:
3594 {
3595 struct scsi_vpd_logical_block_prov *lbp;
3596
3597 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3598 /*
3599 * If we get here we don't support any SBC-3 delete
3600 * methods with UNMAP as the Logical Block Provisioning
3601 * VPD page support is required for devices which
3602 * support it according to T10/1799-D Revision 31
3603 * however older revisions of the spec don't mandate
3604 * this so we currently don't remove these methods
3605 * from the available set.
3606 */
3607 softc->state = DA_STATE_PROBE_BLK_LIMITS;
3608 goto skipstate;
3609 }
3610
3611 lbp = (struct scsi_vpd_logical_block_prov *)
3612 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3613
3614 if (lbp == NULL) {
3615 printf("dastart: Couldn't malloc lbp data\n");
3616 /* da_free_periph??? */
3617 break;
3618 }
3619
3620 scsi_inquiry(&start_ccb->csio,
3621 /*retries*/da_retry_count,
3622 /*cbfcnp*/dadone_probelbp,
3623 /*tag_action*/MSG_SIMPLE_Q_TAG,
3624 /*inq_buf*/(u_int8_t *)lbp,
3625 /*inq_len*/sizeof(*lbp),
3626 /*evpd*/TRUE,
3627 /*page_code*/SVPD_LBP,
3628 /*sense_len*/SSD_MIN_SIZE,
3629 /*timeout*/da_default_timeout * 1000);
3630 start_ccb->ccb_h.ccb_bp = NULL;
3631 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3632 xpt_action(start_ccb);
3633 break;
3634 }
3636 {
3637 struct scsi_vpd_block_limits *block_limits;
3638
3640 /* Not supported skip to next probe */
3641 softc->state = DA_STATE_PROBE_BDC;
3642 goto skipstate;
3643 }
3644
3645 block_limits = (struct scsi_vpd_block_limits *)
3646 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3647
3648 if (block_limits == NULL) {
3649 printf("dastart: Couldn't malloc block_limits data\n");
3650 /* da_free_periph??? */
3651 break;
3652 }
3653
3654 scsi_inquiry(&start_ccb->csio,
3655 /*retries*/da_retry_count,
3656 /*cbfcnp*/dadone_probeblklimits,
3657 /*tag_action*/MSG_SIMPLE_Q_TAG,
3658 /*inq_buf*/(u_int8_t *)block_limits,
3659 /*inq_len*/sizeof(*block_limits),
3660 /*evpd*/TRUE,
3661 /*page_code*/SVPD_BLOCK_LIMITS,
3662 /*sense_len*/SSD_MIN_SIZE,
3663 /*timeout*/da_default_timeout * 1000);
3664 start_ccb->ccb_h.ccb_bp = NULL;
3665 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3666 xpt_action(start_ccb);
3667 break;
3668 }
3669 case DA_STATE_PROBE_BDC:
3670 {
3672
3673 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3674 softc->state = DA_STATE_PROBE_ATA;
3675 goto skipstate;
3676 }
3677
3679 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3680
3681 if (bdc == NULL) {
3682 printf("dastart: Couldn't malloc bdc data\n");
3683 /* da_free_periph??? */
3684 break;
3685 }
3686
3687 scsi_inquiry(&start_ccb->csio,
3688 /*retries*/da_retry_count,
3689 /*cbfcnp*/dadone_probebdc,
3690 /*tag_action*/MSG_SIMPLE_Q_TAG,
3691 /*inq_buf*/(u_int8_t *)bdc,
3692 /*inq_len*/sizeof(*bdc),
3693 /*evpd*/TRUE,
3694 /*page_code*/SVPD_BDC,
3695 /*sense_len*/SSD_MIN_SIZE,
3696 /*timeout*/da_default_timeout * 1000);
3697 start_ccb->ccb_h.ccb_bp = NULL;
3698 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3699 xpt_action(start_ccb);
3700 break;
3701 }
3702 case DA_STATE_PROBE_ATA:
3703 {
3704 struct ata_params *ata_params;
3705
3707 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3708 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3709 /*
3710 * Note that if the ATA VPD page isn't
3711 * supported, we aren't talking to an ATA
3712 * device anyway. Support for that VPD
3713 * page is mandatory for SCSI to ATA (SAT)
3714 * translation layers.
3715 */
3716 softc->state = DA_STATE_PROBE_ZONE;
3717 goto skipstate;
3718 }
3719 daprobedone(periph, start_ccb);
3720 break;
3721 }
3722
3723 ata_params = &periph->path->device->ident_data;
3724
3725 scsi_ata_identify(&start_ccb->csio,
3726 /*retries*/da_retry_count,
3727 /*cbfcnp*/dadone_probeata,
3728 /*tag_action*/MSG_SIMPLE_Q_TAG,
3729 /*data_ptr*/(u_int8_t *)ata_params,
3730 /*dxfer_len*/sizeof(*ata_params),
3731 /*sense_len*/SSD_FULL_SIZE,
3732 /*timeout*/da_default_timeout * 1000);
3733 start_ccb->ccb_h.ccb_bp = NULL;
3734 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3735 xpt_action(start_ccb);
3736 break;
3737 }
3739 {
3740 struct ata_gp_log_dir *log_dir;
3741 int retval;
3742
3743 retval = 0;
3744
3745 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3746 /*
3747 * If we don't have log support, not much point in
3748 * trying to probe zone support.
3749 */
3750 daprobedone(periph, start_ccb);
3751 break;
3752 }
3753
3754 /*
3755 * If we have an ATA device (the SCSI ATA Information VPD
3756 * page should be present and the ATA identify should have
3757 * succeeded) and it supports logs, ask for the log directory.
3758 */
3759
3760 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3761 if (log_dir == NULL) {
3762 xpt_print(periph->path, "Couldn't malloc log_dir "
3763 "data\n");
3764 daprobedone(periph, start_ccb);
3765 break;
3766 }
3767
3768 retval = scsi_ata_read_log(&start_ccb->csio,
3769 /*retries*/ da_retry_count,
3770 /*cbfcnp*/ dadone_probeatalogdir,
3771 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3772 /*log_address*/ ATA_LOG_DIRECTORY,
3773 /*page_number*/ 0,
3774 /*block_count*/ 1,
3775 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3777 /*data_ptr*/ (uint8_t *)log_dir,
3778 /*dxfer_len*/ sizeof(*log_dir),
3779 /*sense_len*/ SSD_FULL_SIZE,
3780 /*timeout*/ da_default_timeout * 1000);
3781
3782 if (retval != 0) {
3783 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3784 free(log_dir, M_SCSIDA);
3785 daprobedone(periph, start_ccb);
3786 break;
3787 }
3788 start_ccb->ccb_h.ccb_bp = NULL;
3789 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3790 xpt_action(start_ccb);
3791 break;
3792 }
3794 {
3795 struct ata_identify_log_pages *id_dir;
3796 int retval;
3797
3798 retval = 0;
3799
3800 /*
3801 * Check here to see whether the Identify Device log is
3802 * supported in the directory of logs. If so, continue
3803 * with requesting the log of identify device pages.
3804 */
3805 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3806 daprobedone(periph, start_ccb);
3807 break;
3808 }
3809
3810 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3811 if (id_dir == NULL) {
3812 xpt_print(periph->path, "Couldn't malloc id_dir "
3813 "data\n");
3814 daprobedone(periph, start_ccb);
3815 break;
3816 }
3817
3818 retval = scsi_ata_read_log(&start_ccb->csio,
3819 /*retries*/ da_retry_count,
3820 /*cbfcnp*/ dadone_probeataiddir,
3821 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3822 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3823 /*page_number*/ ATA_IDL_PAGE_LIST,
3824 /*block_count*/ 1,
3825 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3827 /*data_ptr*/ (uint8_t *)id_dir,
3828 /*dxfer_len*/ sizeof(*id_dir),
3829 /*sense_len*/ SSD_FULL_SIZE,
3830 /*timeout*/ da_default_timeout * 1000);
3831
3832 if (retval != 0) {
3833 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3834 free(id_dir, M_SCSIDA);
3835 daprobedone(periph, start_ccb);
3836 break;
3837 }
3838 start_ccb->ccb_h.ccb_bp = NULL;
3839 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3840 xpt_action(start_ccb);
3841 break;
3842 }
3844 {
3845 struct ata_identify_log_sup_cap *sup_cap;
3846 int retval;
3847
3848 retval = 0;
3849
3850 /*
3851 * Check here to see whether the Supported Capabilities log
3852 * is in the list of Identify Device logs.
3853 */
3854 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3855 daprobedone(periph, start_ccb);
3856 break;
3857 }
3858
3859 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3860 if (sup_cap == NULL) {
3861 xpt_print(periph->path, "Couldn't malloc sup_cap "
3862 "data\n");
3863 daprobedone(periph, start_ccb);
3864 break;
3865 }
3866
3867 retval = scsi_ata_read_log(&start_ccb->csio,
3868 /*retries*/ da_retry_count,
3869 /*cbfcnp*/ dadone_probeatasup,
3870 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3871 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3872 /*page_number*/ ATA_IDL_SUP_CAP,
3873 /*block_count*/ 1,
3874 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3876 /*data_ptr*/ (uint8_t *)sup_cap,
3877 /*dxfer_len*/ sizeof(*sup_cap),
3878 /*sense_len*/ SSD_FULL_SIZE,
3879 /*timeout*/ da_default_timeout * 1000);
3880
3881 if (retval != 0) {
3882 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3883 free(sup_cap, M_SCSIDA);
3884 daprobedone(periph, start_ccb);
3885 break;
3886 }
3887
3888 start_ccb->ccb_h.ccb_bp = NULL;
3889 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3890 xpt_action(start_ccb);
3891 break;
3892 }
3894 {
3895 struct ata_zoned_info_log *ata_zone;
3896 int retval;
3897
3898 retval = 0;
3899
3900 /*
3901 * Check here to see whether the zoned device information
3902 * page is supported. If so, continue on to request it.
3903 * If not, skip to DA_STATE_PROBE_LOG or done.
3904 */
3905 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3906 daprobedone(periph, start_ccb);
3907 break;
3908 }
3909 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3910 M_NOWAIT|M_ZERO);
3911 if (ata_zone == NULL) {
3912 xpt_print(periph->path, "Couldn't malloc ata_zone "
3913 "data\n");
3914 daprobedone(periph, start_ccb);
3915 break;
3916 }
3917
3918 retval = scsi_ata_read_log(&start_ccb->csio,
3919 /*retries*/ da_retry_count,
3920 /*cbfcnp*/ dadone_probeatazone,
3921 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3922 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3923 /*page_number*/ ATA_IDL_ZDI,
3924 /*block_count*/ 1,
3925 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3927 /*data_ptr*/ (uint8_t *)ata_zone,
3928 /*dxfer_len*/ sizeof(*ata_zone),
3929 /*sense_len*/ SSD_FULL_SIZE,
3930 /*timeout*/ da_default_timeout * 1000);
3931
3932 if (retval != 0) {
3933 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3934 free(ata_zone, M_SCSIDA);
3935 daprobedone(periph, start_ccb);
3936 break;
3937 }
3938 start_ccb->ccb_h.ccb_bp = NULL;
3939 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3940 xpt_action(start_ccb);
3941
3942 break;
3943 }
3945 {
3946 struct scsi_vpd_zoned_bdc *bdc;
3947
3948 /*
3949 * Note that this page will be supported for SCSI protocol
3950 * devices that support ZBC (SMR devices), as well as ATA
3951 * protocol devices that are behind a SAT (SCSI to ATA
3952 * Translation) layer that supports converting ZBC commands
3953 * to their ZAC equivalents.
3954 */
3956 daprobedone(periph, start_ccb);
3957 break;
3958 }
3959 bdc = (struct scsi_vpd_zoned_bdc *)
3960 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3961
3962 if (bdc == NULL) {
3963 xpt_release_ccb(start_ccb);
3964 xpt_print(periph->path, "Couldn't malloc zone VPD "
3965 "data\n");
3966 break;
3967 }
3968 scsi_inquiry(&start_ccb->csio,
3969 /*retries*/da_retry_count,
3970 /*cbfcnp*/dadone_probezone,
3971 /*tag_action*/MSG_SIMPLE_Q_TAG,
3972 /*inq_buf*/(u_int8_t *)bdc,
3973 /*inq_len*/sizeof(*bdc),
3974 /*evpd*/TRUE,
3975 /*page_code*/SVPD_ZONED_BDC,
3976 /*sense_len*/SSD_FULL_SIZE,
3977 /*timeout*/da_default_timeout * 1000);
3978 start_ccb->ccb_h.ccb_bp = NULL;
3979 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3980 xpt_action(start_ccb);
3981 break;
3982 }
3983 }
3984}
3985
3986/*
3987 * In each of the methods below, while its the caller's
3988 * responsibility to ensure the request will fit into a
3989 * single device request, we might have changed the delete
3990 * method due to the device incorrectly advertising either
3991 * its supported methods or limits.
3992 *
3993 * To prevent this causing further issues we validate the
3994 * against the methods limits, and warn which would
3995 * otherwise be unnecessary.
3996 */
3997static void
3998da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3999{
4000 struct da_softc *softc = (struct da_softc *)periph->softc;
4001 struct bio *bp1;
4002 uint8_t *buf = softc->unmap_buf;
4003 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
4004 uint64_t lba, lastlba = (uint64_t)-1;
4005 uint64_t totalcount = 0;
4006 uint64_t count;
4007 uint32_t c, lastcount = 0, ranges = 0;
4008
4009 /*
4010 * Currently this doesn't take the UNMAP
4011 * Granularity and Granularity Alignment
4012 * fields into account.
4013 *
4014 * This could result in both unoptimal unmap
4015 * requests as as well as UNMAP calls unmapping
4016 * fewer LBA's than requested.
4017 */
4018
4019 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4020 bp1 = bp;
4021 do {
4022 /*
4023 * Note: ada and da are different in how they store the
4024 * pending bp's in a trim. ada stores all of them in the
4025 * trim_req.bps. da stores all but the first one in the
4026 * delete_run_queue. ada then completes all the bps in
4027 * its adadone() loop. da completes all the bps in the
4028 * delete_run_queue in dadone, and relies on the biodone
4029 * after to complete. This should be reconciled since there's
4030 * no real reason to do it differently. XXX
4031 */
4032 if (bp1 != bp)
4033 bioq_insert_tail(&softc->delete_run_queue, bp1);
4034 lba = bp1->bio_pblkno;
4035 count = bp1->bio_bcount / softc->params.secsize;
4036
4037 /* Try to extend the previous range. */
4038 if (lba == lastlba) {
4039 c = omin(count, UNMAP_RANGE_MAX - lastcount);
4040 lastlba += c;
4041 lastcount += c;
4042 scsi_ulto4b(lastcount, d[ranges - 1].length);
4043 count -= c;
4044 lba += c;
4045 totalcount += c;
4046 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
4047 softc->unmap_gran != 0) {
4048 /* Align length of the previous range. */
4049 if ((c = lastcount % softc->unmap_gran) != 0) {
4050 if (lastcount <= c) {
4051 totalcount -= lastcount;
4052 lastlba = (uint64_t)-1;
4053 lastcount = 0;
4054 ranges--;
4055 } else {
4056 totalcount -= c;
4057 lastlba -= c;
4058 lastcount -= c;
4059 scsi_ulto4b(lastcount,
4060 d[ranges - 1].length);
4061 }
4062 }
4063 /* Align beginning of the new range. */
4064 c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
4065 if (c != 0) {
4066 c = softc->unmap_gran - c;
4067 if (count <= c) {
4068 count = 0;
4069 } else {
4070 lba += c;
4071 count -= c;
4072 }
4073 }
4074 }
4075
4076 while (count > 0) {
4077 c = omin(count, UNMAP_RANGE_MAX);
4078 if (totalcount + c > softc->unmap_max_lba ||
4079 ranges >= softc->unmap_max_ranges) {
4080 xpt_print(periph->path,
4081 "%s issuing short delete %ld > %ld"
4082 "|| %d >= %d",
4083 da_delete_method_desc[softc->delete_method],
4084 totalcount + c, softc->unmap_max_lba,
4085 ranges, softc->unmap_max_ranges);
4086 break;
4087 }
4088 scsi_u64to8b(lba, d[ranges].lba);
4089 scsi_ulto4b(c, d[ranges].length);
4090 lba += c;
4091 totalcount += c;
4092 ranges++;
4093 count -= c;
4094 lastlba = lba;
4095 lastcount = c;
4096 }
4097 bp1 = cam_iosched_next_trim(softc->cam_iosched);
4098 if (bp1 == NULL)
4099 break;
4100 if (ranges >= softc->unmap_max_ranges ||
4101 totalcount + bp1->bio_bcount /
4102 softc->params.secsize > softc->unmap_max_lba) {
4104 break;
4105 }
4106 } while (1);
4107
4108 /* Align length of the last range. */
4109 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
4110 (c = lastcount % softc->unmap_gran) != 0) {
4111 if (lastcount <= c)
4112 ranges--;
4113 else
4114 scsi_ulto4b(lastcount - c, d[ranges - 1].length);
4115 }
4116
4117 scsi_ulto2b(ranges * 16 + 6, &buf[0]);
4118 scsi_ulto2b(ranges * 16, &buf[2]);
4119
4121 /*retries*/da_retry_count,
4122 /*cbfcnp*/dadone,
4123 /*tag_action*/MSG_SIMPLE_Q_TAG,
4124 /*byte2*/0,
4125 /*data_ptr*/ buf,
4126 /*dxfer_len*/ ranges * 16 + 8,
4127 /*sense_len*/SSD_FULL_SIZE,
4128 da_default_timeout * 1000);
4129 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4131 softc->trim_count++;
4132 softc->trim_ranges += ranges;
4133 softc->trim_lbas += totalcount;
4135}
4136
4137static void
4138da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4139{
4140 struct da_softc *softc = (struct da_softc *)periph->softc;
4141 struct bio *bp1;
4142 uint8_t *buf = softc->unmap_buf;
4143 uint64_t lastlba = (uint64_t)-1;
4144 uint64_t count;
4145 uint64_t lba;
4146 uint32_t lastcount = 0, c, requestcount;
4147 int ranges = 0, off, block_count;
4148
4149 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4150 bp1 = bp;
4151 do {
4152 if (bp1 != bp)//XXX imp XXX
4153 bioq_insert_tail(&softc->delete_run_queue, bp1);
4154 lba = bp1->bio_pblkno;
4155 count = bp1->bio_bcount / softc->params.secsize;
4156 requestcount = count;
4157
4158 /* Try to extend the previous range. */
4159 if (lba == lastlba) {
4160 c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
4161 lastcount += c;
4162 off = (ranges - 1) * 8;
4163 buf[off + 6] = lastcount & 0xff;
4164 buf[off + 7] = (lastcount >> 8) & 0xff;
4165 count -= c;
4166 lba += c;
4167 }
4168
4169 while (count > 0) {
4170 c = omin(count, ATA_DSM_RANGE_MAX);
4171 off = ranges * 8;
4172
4173 buf[off + 0] = lba & 0xff;
4174 buf[off + 1] = (lba >> 8) & 0xff;
4175 buf[off + 2] = (lba >> 16) & 0xff;
4176 buf[off + 3] = (lba >> 24) & 0xff;
4177 buf[off + 4] = (lba >> 32) & 0xff;
4178 buf[off + 5] = (lba >> 40) & 0xff;
4179 buf[off + 6] = c & 0xff;
4180 buf[off + 7] = (c >> 8) & 0xff;
4181 lba += c;
4182 ranges++;
4183 count -= c;
4184 lastcount = c;
4185 if (count != 0 && ranges == softc->trim_max_ranges) {
4186 xpt_print(periph->path,
4187 "%s issuing short delete %ld > %ld\n",
4188 da_delete_method_desc[softc->delete_method],
4189 requestcount,
4190 (softc->trim_max_ranges - ranges) *
4191 ATA_DSM_RANGE_MAX);
4192 break;
4193 }
4194 }
4195 lastlba = lba;
4196 bp1 = cam_iosched_next_trim(softc->cam_iosched);
4197 if (bp1 == NULL)
4198 break;
4199 if (bp1->bio_bcount / softc->params.secsize >
4200 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
4202 break;
4203 }
4204 } while (1);
4205
4206 block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
4208 /*retries*/da_retry_count,
4209 /*cbfcnp*/dadone,
4210 /*tag_action*/MSG_SIMPLE_Q_TAG,
4211 block_count,
4212 /*data_ptr*/buf,
4213 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
4214 /*sense_len*/SSD_FULL_SIZE,
4215 da_default_timeout * 1000);
4216 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4219}
4220
4221/*
4222 * We calculate ws_max_blks here based off d_delmaxsize instead
4223 * of using softc->ws_max_blks as it is absolute max for the
4224 * device not the protocol max which may well be lower.
4225 */
4226static void
4227da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4228{
4229 struct da_softc *softc;
4230 struct bio *bp1;
4231 uint64_t ws_max_blks;
4232 uint64_t lba;
4233 uint64_t count; /* forward compat with WS32 */
4234
4235 softc = (struct da_softc *)periph->softc;
4236 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
4237 lba = bp->bio_pblkno;
4238 count = 0;
4239 bp1 = bp;
4240 do {
4241 if (bp1 != bp)//XXX imp XXX
4242 bioq_insert_tail(&softc->delete_run_queue, bp1);
4243 count += bp1->bio_bcount / softc->params.secsize;
4244 if (count > ws_max_blks) {
4245 xpt_print(periph->path,
4246 "%s issuing short delete %ld > %ld\n",
4247 da_delete_method_desc[softc->delete_method],
4248 count, ws_max_blks);
4249 count = omin(count, ws_max_blks);
4250 break;
4251 }
4252 bp1 = cam_iosched_next_trim(softc->cam_iosched);
4253 if (bp1 == NULL)
4254 break;
4255 if (lba + count != bp1->bio_pblkno ||
4256 count + bp1->bio_bcount /
4257 softc->params.secsize > ws_max_blks) {
4259 break;
4260 }
4261 } while (1);
4262
4264 /*retries*/da_retry_count,
4265 /*cbfcnp*/dadone,
4266 /*tag_action*/MSG_SIMPLE_Q_TAG,
4267 /*byte2*/softc->delete_method ==
4269 softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
4270 /*lba*/lba,
4271 /*block_count*/count,
4272 /*data_ptr*/ __DECONST(void *, zero_region),
4273 /*dxfer_len*/ softc->params.secsize,
4274 /*sense_len*/SSD_FULL_SIZE,
4275 da_default_timeout * 1000);
4276 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4279}
4280
4281static int
4283{
4284 struct scsi_rw_6 cmd6;
4285 struct scsi_rw_10 *cmd10;
4286 struct da_softc *softc;
4287 u_int8_t *cdb;
4288 struct bio *bp;
4289 int frozen;
4290
4291 cdb = ccb->csio.cdb_io.cdb_bytes;
4292 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
4293
4294 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
4295 da_delete_methods old_method = softc->delete_method;
4296
4297 /*
4298 * Typically there are two reasons for failure here
4299 * 1. Delete method was detected as supported but isn't
4300 * 2. Delete failed due to invalid params e.g. too big
4301 *
4302 * While we will attempt to choose an alternative delete method
4303 * this may result in short deletes if the existing delete
4304 * requests from geom are big for the new method chosen.
4305 *
4306 * This method assumes that the error which triggered this
4307 * will not retry the io otherwise a panic will occur
4308 */
4309 dadeleteflag(softc, old_method, 0);
4311 if (softc->delete_method == DA_DELETE_DISABLE)
4313 "%s failed, disabling BIO_DELETE\n",
4314 da_delete_method_desc[old_method]);
4315 else
4317 "%s failed, switching to %s BIO_DELETE\n",
4318 da_delete_method_desc[old_method],
4319 da_delete_method_desc[softc->delete_method]);
4320
4321 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
4324 (struct bio *)ccb->ccb_h.ccb_bp);
4325 ccb->ccb_h.ccb_bp = NULL;
4326 return (0);
4327 }
4328
4329 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
4330 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4331 (*cdb == PREVENT_ALLOW) &&
4332 (softc->quirks & DA_Q_NO_PREVENT) == 0) {
4333 if (bootverbose)
4335 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
4336 softc->quirks |= DA_Q_NO_PREVENT;
4337 return (0);
4338 }
4339
4340 /* Detect unsupported SYNCHRONIZE CACHE(10). */
4341 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4342 (*cdb == SYNCHRONIZE_CACHE) &&
4343 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4344 if (bootverbose)
4346 "SYNCHRONIZE CACHE(10) not supported.\n");
4347 softc->quirks |= DA_Q_NO_SYNC_CACHE;
4348 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4349 return (0);
4350 }
4351
4352 /* Translation only possible if CDB is an array and cmd is R/W6 */
4353 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4354 (*cdb != READ_6 && *cdb != WRITE_6))
4355 return 0;
4356
4357 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
4358 "increasing minimum_cmd_size to 10.\n");
4359 softc->minimum_cmd_size = 10;
4360
4361 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4362 cmd10 = (struct scsi_rw_10 *)cdb;
4363 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4364 cmd10->byte2 = 0;
4365 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4366 cmd10->reserved = 0;
4367 scsi_ulto2b(cmd6.length, cmd10->length);
4368 cmd10->control = cmd6.control;
4369 ccb->csio.cdb_len = sizeof(*cmd10);
4370
4371 /* Requeue request, unfreezing queue if necessary */
4372 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4374 xpt_action(ccb);
4375 if (frozen) {
4377 /*relsim_flags*/0,
4378 /*reduction*/0,
4379 /*timeout*/0,
4380 /*getcount_only*/0);
4381 }
4382 return (ERESTART);
4383}
4384
4385static void
4386dazonedone(struct cam_periph *periph, union ccb *ccb)
4387{
4388 struct da_softc *softc;
4389 struct bio *bp;
4390
4391 softc = periph->softc;
4392 bp = (struct bio *)ccb->ccb_h.ccb_bp;
4393
4394 switch (bp->bio_zone.zone_cmd) {
4395 case DISK_ZONE_OPEN:
4396 case DISK_ZONE_CLOSE:
4397 case DISK_ZONE_FINISH:
4398 case DISK_ZONE_RWP:
4399 break;
4400 case DISK_ZONE_REPORT_ZONES: {
4401 uint32_t avail_len;
4402 struct disk_zone_report *rep;
4403 struct scsi_report_zones_hdr *hdr;
4404 struct scsi_report_zones_desc *desc;
4405 struct disk_zone_rep_entry *entry;
4406 uint32_t hdr_len, num_avail;
4407 uint32_t num_to_fill, i;
4408 int ata;
4409
4410 rep = &bp->bio_zone.zone_params.report;
4411 avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4412 /*
4413 * Note that bio_resid isn't normally used for zone
4414 * commands, but it is used by devstat_end_transaction_bio()
4415 * to determine how much data was transferred. Because
4416 * the size of the SCSI/ATA data structures is different
4417 * than the size of the BIO interface structures, the
4418 * amount of data actually transferred from the drive will
4419 * be different than the amount of data transferred to
4420 * the user.
4421 */
4422 bp->bio_resid = ccb->csio.resid;
4423 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4424 if (avail_len < sizeof(*hdr)) {
4425 /*
4426 * Is there a better error than EIO here? We asked
4427 * for at least the header, and we got less than
4428 * that.
4429 */
4430 bp->bio_error = EIO;
4431 bp->bio_flags |= BIO_ERROR;
4432 bp->bio_resid = bp->bio_bcount;
4433 break;
4434 }
4435
4436 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4437 ata = 1;
4438 else
4439 ata = 0;
4440
4441 hdr_len = ata ? le32dec(hdr->length) :
4442 scsi_4btoul(hdr->length);
4443 if (hdr_len > 0)
4444 rep->entries_available = hdr_len / sizeof(*desc);
4445 else
4446 rep->entries_available = 0;
4447 /*
4448 * NOTE: using the same values for the BIO version of the
4449 * same field as the SCSI/ATA values. This means we could
4450 * get some additional values that aren't defined in bio.h
4451 * if more values of the same field are defined later.
4452 */
4453 rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4454 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
4456 /*
4457 * If the drive reports no entries that match the query,
4458 * we're done.
4459 */
4460 if (hdr_len == 0) {
4461 rep->entries_filled = 0;
4462 break;
4463 }
4464
4465 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4466 hdr_len / sizeof(*desc));
4467 /*
4468 * If the drive didn't return any data, then we're done.
4469 */
4470 if (num_avail == 0) {
4471 rep->entries_filled = 0;
4472 break;
4473 }
4474
4475 num_to_fill = min(num_avail, rep->entries_allocated);
4476 /*
4477 * If the user didn't allocate any entries for us to fill,
4478 * we're done.
4479 */
4480 if (num_to_fill == 0) {
4481 rep->entries_filled = 0;
4482 break;
4483 }
4484
4485 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4486 i < num_to_fill; i++, desc++, entry++) {
4487 /*
4488 * NOTE: we're mapping the values here directly
4489 * from the SCSI/ATA bit definitions to the bio.h
4490 * definitions. There is also a warning in
4491 * disk_zone.h, but the impact is that if
4492 * additional values are added in the SCSI/ATA
4493 * specs these will be visible to consumers of
4494 * this interface.
4495 */
4496 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4497 entry->zone_condition =
4498 (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4500 entry->zone_flags |= desc->zone_flags &
4502 entry->zone_length =
4503 ata ? le64dec(desc->zone_length) :
4505 entry->zone_start_lba =
4506 ata ? le64dec(desc->zone_start_lba) :
4508 entry->write_pointer_lba =
4509 ata ? le64dec(desc->write_pointer_lba) :
4511 }
4512 rep->entries_filled = num_to_fill;
4513 break;
4514 }
4515 case DISK_ZONE_GET_PARAMS:
4516 default:
4517 /*
4518 * In theory we should not get a GET_PARAMS bio, since it
4519 * should be handled without queueing the command to the
4520 * drive.
4521 */
4522 panic("%s: Invalid zone command %d", __func__,
4523 bp->bio_zone.zone_cmd);
4524 break;
4525 }
4526
4527 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4528 free(ccb->csio.data_ptr, M_SCSIDA);
4529}
4530
4531static void
4532dadone(struct cam_periph *periph, union ccb *done_ccb)
4533{
4534 struct bio *bp, *bp1;
4535 struct da_softc *softc;
4536 struct ccb_scsiio *csio;
4537 da_ccb_state state;
4538
4539 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4540
4541 softc = (struct da_softc *)periph->softc;
4542 csio = &done_ccb->csio;
4543
4544#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4545 if (csio->bio != NULL)
4546 biotrack(csio->bio, __func__);
4547#endif
4548 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4549
4550 cam_periph_lock(periph);
4551 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4552 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4553 int error;
4554 int sf;
4555
4556 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4557 sf = SF_RETRY_UA;
4558 else
4559 sf = 0;
4560
4561 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4562 if (error == ERESTART) {
4563 /* A retry was scheduled, so just return. */
4564 cam_periph_unlock(periph);
4565 return;
4566 }
4567 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4568 if (error != 0) {
4569 int queued_error;
4570
4571 /*
4572 * return all queued I/O with EIO, so that
4573 * the client can retry these I/Os in the
4574 * proper order should it attempt to recover.
4575 */
4576 queued_error = EIO;
4577
4578 if (error == ENXIO
4579 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4580 /*
4581 * Catastrophic error. Mark our pack as
4582 * invalid.
4583 *
4584 * XXX See if this is really a media
4585 * XXX change first?
4586 */
4587 xpt_print(periph->path, "Invalidating pack\n");
4588 softc->flags |= DA_FLAG_PACK_INVALID;
4589#ifdef CAM_IO_STATS
4590 softc->invalidations++;
4591#endif
4592 queued_error = ENXIO;
4593 }
4594 cam_iosched_flush(softc->cam_iosched, NULL,
4595 queued_error);
4596 if (bp != NULL) {
4597 bp->bio_error = error;
4598 bp->bio_resid = bp->bio_bcount;
4599 bp->bio_flags |= BIO_ERROR;
4600 }
4601 } else if (bp != NULL) {
4602 if (state == DA_CCB_DELETE)
4603 bp->bio_resid = 0;
4604 else
4605 bp->bio_resid = csio->resid;
4606 bp->bio_error = 0;
4607 if (bp->bio_resid != 0)
4608 bp->bio_flags |= BIO_ERROR;
4609 }
4610 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4611 cam_release_devq(done_ccb->ccb_h.path,
4612 /*relsim_flags*/0,
4613 /*reduction*/0,
4614 /*timeout*/0,
4615 /*getcount_only*/0);
4616 } else if (bp != NULL) {
4617 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4618 panic("REQ_CMP with QFRZN");
4619 if (bp->bio_cmd == BIO_ZONE)
4620 dazonedone(periph, done_ccb);
4621 else if (state == DA_CCB_DELETE)
4622 bp->bio_resid = 0;
4623 else
4624 bp->bio_resid = csio->resid;
4625 if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE))
4626 bp->bio_flags |= BIO_ERROR;
4627 if (softc->error_inject != 0) {
4628 bp->bio_error = softc->error_inject;
4629 bp->bio_resid = bp->bio_bcount;
4630 bp->bio_flags |= BIO_ERROR;
4631 softc->error_inject = 0;
4632 }
4633 }
4634
4635 if (bp != NULL)
4636 biotrack(bp, __func__);
4637 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4638 if (LIST_EMPTY(&softc->pending_ccbs))
4639 softc->flags |= DA_FLAG_WAS_OTAG;
4640
4641 /*
4642 * We need to call cam_iosched before we call biodone so that we don't
4643 * measure any activity that happens in the completion routine, which in
4644 * the case of sendfile can be quite extensive. Release the periph
4645 * refcount taken in dastart() for each CCB.
4646 */
4647 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4648 xpt_release_ccb(done_ccb);
4649 KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount));
4650 softc->refcount--;
4651 if (state == DA_CCB_DELETE) {
4652 TAILQ_HEAD(, bio) queue;
4653
4654 TAILQ_INIT(&queue);
4655 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4656 softc->delete_run_queue.insert_point = NULL;
4657 /*
4658 * Normally, the xpt_release_ccb() above would make sure
4659 * that when we have more work to do, that work would
4660 * get kicked off. However, we specifically keep
4661 * delete_running set to 0 before the call above to
4662 * allow other I/O to progress when many BIO_DELETE
4663 * requests are pushed down. We set delete_running to 0
4664 * and call daschedule again so that we don't stall if
4665 * there are no other I/Os pending apart from BIO_DELETEs.
4666 */
4668 daschedule(periph);
4669 cam_periph_unlock(periph);
4670 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4671 TAILQ_REMOVE(&queue, bp1, bio_queue);
4672 bp1->bio_error = bp->bio_error;
4673 if (bp->bio_flags & BIO_ERROR) {
4674 bp1->bio_flags |= BIO_ERROR;
4675 bp1->bio_resid = bp1->bio_bcount;
4676 } else
4677 bp1->bio_resid = 0;
4678 biodone(bp1);
4679 }
4680 } else {
4681 daschedule(periph);
4682 cam_periph_unlock(periph);
4683 }
4684 if (bp != NULL)
4685 biodone(bp);
4686 return;
4687}
4688
4689static void
4690dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
4691{
4692 struct da_softc *softc;
4693 struct ccb_scsiio *csio;
4694 u_int32_t priority;
4695
4696 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n"));
4697
4698 softc = (struct da_softc *)periph->softc;
4699 priority = done_ccb->ccb_h.pinfo.priority;
4700 csio = &done_ccb->csio;
4701
4702 cam_periph_assert(periph, MA_OWNED);
4703
4704 KASSERT(softc->state == DA_STATE_PROBE_WP,
4705 ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4706 softc->state, periph, done_ccb));
4707 KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP,
4708 ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4709 (unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph,
4710 done_ccb));
4711
4712 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4713 int len, off;
4714 uint8_t dev_spec;
4715
4716 if (csio->cdb_len > 6) {
4717 struct scsi_mode_header_10 *mh =
4718 (struct scsi_mode_header_10 *)csio->data_ptr;
4719 len = 2 + scsi_2btoul(mh->data_length);
4720 off = sizeof(*mh) + scsi_2btoul(mh->blk_desc_len);
4721 dev_spec = mh->dev_spec;
4722 } else {
4723 struct scsi_mode_header_6 *mh =
4724 (struct scsi_mode_header_6 *)csio->data_ptr;
4725 len = 1 + mh->data_length;
4726 off = sizeof(*mh) + mh->blk_desc_len;
4727 dev_spec = mh->dev_spec;
4728 }
4729 if ((dev_spec & 0x80) != 0)
4730 softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4731 else
4732 softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4733
4734 /* Next time request only the first of returned mode pages. */
4735 if (off < len && off < csio->dxfer_len - csio->resid)
4736 softc->mode_page = csio->data_ptr[off] & SMPH_PC_MASK;
4737 } else {
4738 int error;
4739
4740 error = daerror(done_ccb, CAM_RETRY_SELTO,
4742 if (error == ERESTART)
4743 return;
4744 else if (error != 0) {
4745 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4746 /* Don't wedge this device's queue */
4747 cam_release_devq(done_ccb->ccb_h.path,
4748 /*relsim_flags*/0,
4749 /*reduction*/0,
4750 /*timeout*/0,
4751 /*getcount_only*/0);
4752 }
4753
4754 /* We don't depend on it, so don't try again. */
4755 softc->mode_page = -1;
4756 }
4757 }
4758
4759 free(csio->data_ptr, M_SCSIDA);
4760 if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4761 softc->state = DA_STATE_PROBE_RC16;
4762 else
4763 softc->state = DA_STATE_PROBE_RC;
4764 xpt_release_ccb(done_ccb);
4765 xpt_schedule(periph, priority);
4766 return;
4767}
4768
4769static void
4770dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
4771{
4772 struct scsi_read_capacity_data *rdcap;
4773 struct scsi_read_capacity_data_long *rcaplong;
4774 struct da_softc *softc;
4775 struct ccb_scsiio *csio;
4776 da_ccb_state state;
4777 char *announce_buf;
4778 u_int32_t priority;
4779 int lbp, n;
4780
4781 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n"));
4782
4783 softc = (struct da_softc *)periph->softc;
4784 priority = done_ccb->ccb_h.pinfo.priority;
4785 csio = &done_ccb->csio;
4786 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4787
4788 KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16,
4789 ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p",
4790 softc->state, periph, done_ccb));
4791 KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16,
4792 ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p",
4793 (unsigned long)state, periph, done_ccb));
4794
4795 lbp = 0;
4796 rdcap = NULL;
4797 rcaplong = NULL;
4798 /* XXX TODO: can this be a malloc? */
4799 announce_buf = softc->announce_temp;
4800 bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4801
4802 if (state == DA_CCB_PROBE_RC)
4803 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4804 else
4805 rcaplong = (struct scsi_read_capacity_data_long *)
4806 csio->data_ptr;
4807
4808 cam_periph_assert(periph, MA_OWNED);
4809
4810 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4811 struct disk_params *dp;
4812 uint32_t block_size;
4813 uint64_t maxsector;
4814 u_int lalba; /* Lowest aligned LBA. */
4815
4816 if (state == DA_CCB_PROBE_RC) {
4817 block_size = scsi_4btoul(rdcap->length);
4818 maxsector = scsi_4btoul(rdcap->addr);
4819 lalba = 0;
4820
4821 /*
4822 * According to SBC-2, if the standard 10
4823 * byte READ CAPACITY command returns 2^32,
4824 * we should issue the 16 byte version of
4825 * the command, since the device in question
4826 * has more sectors than can be represented
4827 * with the short version of the command.
4828 */
4829 if (maxsector == 0xffffffff) {
4830 free(rdcap, M_SCSIDA);
4831 softc->state = DA_STATE_PROBE_RC16;
4832 xpt_release_ccb(done_ccb);
4833 xpt_schedule(periph, priority);
4834 return;
4835 }
4836 } else {
4837 block_size = scsi_4btoul(rcaplong->length);
4838 maxsector = scsi_8btou64(rcaplong->addr);
4839 lalba = scsi_2btoul(rcaplong->lalba_lbp);
4840 }
4841
4842 /*
4843 * Because GEOM code just will panic us if we
4844 * give them an 'illegal' value we'll avoid that
4845 * here.
4846 */
4847 if (block_size == 0) {
4848 block_size = 512;
4849 if (maxsector == 0)
4850 maxsector = -1;
4851 }
4852 if (block_size >= maxphys) {
4853 xpt_print(periph->path,
4854 "unsupportable block size %ju\n",
4855 (uintmax_t) block_size);
4856 announce_buf = NULL;
4857 cam_periph_invalidate(periph);
4858 } else {
4859 /*
4860 * We pass rcaplong into dasetgeom(),
4861 * because it will only use it if it is
4862 * non-NULL.
4863 */
4864 dasetgeom(periph, block_size, maxsector,
4865 rcaplong, sizeof(*rcaplong));
4866 lbp = (lalba & SRC16_LBPME_A);
4867 dp = &softc->params;
4868 n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4869 "%juMB (%ju %u byte sectors",
4870 ((uintmax_t)dp->secsize * dp->sectors) /
4871 (1024 * 1024),
4872 (uintmax_t)dp->sectors, dp->secsize);
4873 if (softc->p_type != 0) {
4874 n += snprintf(announce_buf + n,
4876 ", DIF type %d", softc->p_type);
4877 }
4878 snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")");
4879 }
4880 } else {
4881 int error;
4882
4883 /*
4884 * Retry any UNIT ATTENTION type errors. They
4885 * are expected at boot.
4886 */
4887 error = daerror(done_ccb, CAM_RETRY_SELTO,
4889 if (error == ERESTART) {
4890 /*
4891 * A retry was scheuled, so
4892 * just return.
4893 */
4894 return;
4895 } else if (error != 0) {
4896 int asc, ascq;
4897 int sense_key, error_code;
4898 int have_sense;
4899 cam_status status;
4900 struct ccb_getdev cgd;
4901
4902 /* Don't wedge this device's queue */
4903 status = done_ccb->ccb_h.status;
4904 if ((status & CAM_DEV_QFRZN) != 0)
4905 cam_release_devq(done_ccb->ccb_h.path,
4906 /*relsim_flags*/0,
4907 /*reduction*/0,
4908 /*timeout*/0,
4909 /*getcount_only*/0);
4910
4911 memset(&cgd, 0, sizeof(cgd));
4912 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
4915 xpt_action((union ccb *)&cgd);
4916
4917 if (scsi_extract_sense_ccb(done_ccb,
4918 &error_code, &sense_key, &asc, &ascq))
4919 have_sense = TRUE;
4920 else
4921 have_sense = FALSE;
4922
4923 /*
4924 * If we tried READ CAPACITY(16) and failed,
4925 * fallback to READ CAPACITY(10).
4926 */
4927 if ((state == DA_CCB_PROBE_RC16) &&
4928 (softc->flags & DA_FLAG_CAN_RC16) &&
4929 (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4930 CAM_REQ_INVALID) ||
4931 ((have_sense) &&
4932 (error_code == SSD_CURRENT_ERROR ||
4933 error_code == SSD_DESC_CURRENT_ERROR) &&
4934 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4935 cam_periph_assert(periph, MA_OWNED);
4936 softc->flags &= ~DA_FLAG_CAN_RC16;
4937 free(rdcap, M_SCSIDA);
4938 softc->state = DA_STATE_PROBE_RC;
4939 xpt_release_ccb(done_ccb);
4940 xpt_schedule(periph, priority);
4941 return;
4942 }
4943
4944 /*
4945 * Attach to anything that claims to be a
4946 * direct access or optical disk device,
4947 * as long as it doesn't return a "Logical
4948 * unit not supported" (0x25) error.
4949 * "Internal Target Failure" (0x44) is also
4950 * special and typically means that the
4951 * device is a SATA drive behind a SATL
4952 * translation that's fallen into a
4953 * terminally fatal state.
4954 */
4955 if ((have_sense)
4956 && (asc != 0x25) && (asc != 0x44)
4957 && (error_code == SSD_CURRENT_ERROR
4958 || error_code == SSD_DESC_CURRENT_ERROR)) {
4959 const char *sense_key_desc;
4960 const char *asc_desc;
4961
4962 dasetgeom(periph, 512, -1, NULL, 0);
4963 scsi_sense_desc(sense_key, asc, ascq,
4964 &cgd.inq_data, &sense_key_desc,
4965 &asc_desc);
4966 snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4967 "Attempt to query device "
4968 "size failed: %s, %s",
4969 sense_key_desc, asc_desc);
4970 } else {
4971 if (have_sense)
4972 scsi_sense_print(&done_ccb->csio);
4973 else {
4974 xpt_print(periph->path,
4975 "got CAM status %#x\n",
4976 done_ccb->ccb_h.status);
4977 }
4978
4979 xpt_print(periph->path, "fatal error, "
4980 "failed to attach to device\n");
4981
4982 announce_buf = NULL;
4983
4984 /*
4985 * Free up resources.
4986 */
4987 cam_periph_invalidate(periph);
4988 }
4989 }
4990 }
4991 free(csio->data_ptr, M_SCSIDA);
4992 if (announce_buf != NULL &&
4993 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4994 struct sbuf sb;
4995
4996 sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
4997 SBUF_FIXEDLEN);
4998 xpt_announce_periph_sbuf(periph, &sb, announce_buf);
4999 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
5001 sbuf_finish(&sb);
5002 sbuf_putbuf(&sb);
5003
5004 /*
5005 * Create our sysctl variables, now that we know
5006 * we have successfully attached.
5007 */
5008 /* increase the refcount */
5009 if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) {
5010 taskqueue_enqueue(taskqueue_thread,
5011 &softc->sysctl_task);
5012 } else {
5013 /* XXX This message is useless! */
5014 xpt_print(periph->path, "fatal error, "
5015 "could not acquire reference count\n");
5016 }
5017 }
5018
5019 /* We already probed the device. */
5020 if (softc->flags & DA_FLAG_PROBED) {
5021 daprobedone(periph, done_ccb);
5022 return;
5023 }
5024
5025 /* Ensure re-probe doesn't see old delete. */
5026 softc->delete_available = 0;
5027 dadeleteflag(softc, DA_DELETE_ZERO, 1);
5028 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5029 /*
5030 * Based on older SBC-3 spec revisions
5031 * any of the UNMAP methods "may" be
5032 * available via LBP given this flag so
5033 * we flag all of them as available and
5034 * then remove those which further
5035 * probes confirm aren't available
5036 * later.
5037 *
5038 * We could also check readcap(16) p_type
5039 * flag to exclude one or more invalid
5040 * write same (X) types here
5041 */
5042 dadeleteflag(softc, DA_DELETE_WS16, 1);
5043 dadeleteflag(softc, DA_DELETE_WS10, 1);
5044 dadeleteflag(softc, DA_DELETE_UNMAP, 1);
5045
5046 softc->state = DA_STATE_PROBE_LBP;
5047 xpt_release_ccb(done_ccb);
5048 xpt_schedule(periph, priority);
5049 return;
5050 }
5051
5052 softc->state = DA_STATE_PROBE_BDC;
5053 xpt_release_ccb(done_ccb);
5054 xpt_schedule(periph, priority);
5055 return;
5056}
5057
5058static void
5059dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
5060{
5061 struct scsi_vpd_logical_block_prov *lbp;
5062 struct da_softc *softc;
5063 struct ccb_scsiio *csio;
5064 u_int32_t priority;
5065
5066 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n"));
5067
5068 softc = (struct da_softc *)periph->softc;
5069 priority = done_ccb->ccb_h.pinfo.priority;
5070 csio = &done_ccb->csio;
5071 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
5072
5073 cam_periph_assert(periph, MA_OWNED);
5074
5075 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5076 /*
5077 * T10/1799-D Revision 31 states at least one of these
5078 * must be supported but we don't currently enforce this.
5079 */
5081 (lbp->flags & SVPD_LBP_WS16));
5083 (lbp->flags & SVPD_LBP_WS10));
5085 (lbp->flags & SVPD_LBP_UNMAP));
5086 } else {
5087 int error;
5088 error = daerror(done_ccb, CAM_RETRY_SELTO,
5090 if (error == ERESTART)
5091 return;
5092 else if (error != 0) {
5093 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5094 /* Don't wedge this device's queue */
5095 cam_release_devq(done_ccb->ccb_h.path,
5096 /*relsim_flags*/0,
5097 /*reduction*/0,
5098 /*timeout*/0,
5099 /*getcount_only*/0);
5100 }
5101
5102 /*
5103 * Failure indicates we don't support any SBC-3
5104 * delete methods with UNMAP
5105 */
5106 }
5107 }
5108
5109 free(lbp, M_SCSIDA);
5110 softc->state = DA_STATE_PROBE_BLK_LIMITS;
5111 xpt_release_ccb(done_ccb);
5112 xpt_schedule(periph, priority);
5113 return;
5114}
5115
5116static void
5117dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
5118{
5119 struct scsi_vpd_block_limits *block_limits;
5120 struct da_softc *softc;
5121 struct ccb_scsiio *csio;
5122 u_int32_t priority;
5123
5124 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n"));
5125
5126 softc = (struct da_softc *)periph->softc;
5127 priority = done_ccb->ccb_h.pinfo.priority;
5128 csio = &done_ccb->csio;
5129 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
5130
5131 cam_periph_assert(periph, MA_OWNED);
5132
5133 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5134 uint32_t max_txfer_len = scsi_4btoul(
5135 block_limits->max_txfer_len);
5136 uint32_t max_unmap_lba_cnt = scsi_4btoul(
5137 block_limits->max_unmap_lba_cnt);
5138 uint32_t max_unmap_blk_cnt = scsi_4btoul(
5139 block_limits->max_unmap_blk_cnt);
5140 uint32_t unmap_gran = scsi_4btoul(
5141 block_limits->opt_unmap_grain);
5142 uint32_t unmap_gran_align = scsi_4btoul(
5143 block_limits->unmap_grain_align);
5144 uint64_t ws_max_blks = scsi_8btou64(
5145 block_limits->max_write_same_length);
5146
5147 if (max_txfer_len != 0) {
5148 softc->disk->d_maxsize = MIN(softc->maxio,
5149 (off_t)max_txfer_len * softc->params.secsize);
5150 }
5151
5152 /*
5153 * We should already support UNMAP but we check lba
5154 * and block count to be sure
5155 */
5156 if (max_unmap_lba_cnt != 0x00L &&
5157 max_unmap_blk_cnt != 0x00L) {
5158 softc->unmap_max_lba = max_unmap_lba_cnt;
5159 softc->unmap_max_ranges = min(max_unmap_blk_cnt,
5161 if (unmap_gran > 1) {
5162 softc->unmap_gran = unmap_gran;
5163 if (unmap_gran_align & 0x80000000) {
5164 softc->unmap_gran_align =
5165 unmap_gran_align & 0x7fffffff;
5166 }
5167 }
5168 } else {
5169 /*
5170 * Unexpected UNMAP limits which means the
5171 * device doesn't actually support UNMAP
5172 */
5173 dadeleteflag(softc, DA_DELETE_UNMAP, 0);
5174 }
5175
5176 if (ws_max_blks != 0x00L)
5177 softc->ws_max_blks = ws_max_blks;
5178 } else {
5179 int error;
5180 error = daerror(done_ccb, CAM_RETRY_SELTO,
5182 if (error == ERESTART)
5183 return;
5184 else if (error != 0) {
5185 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5186 /* Don't wedge this device's queue */
5187 cam_release_devq(done_ccb->ccb_h.path,
5188 /*relsim_flags*/0,
5189 /*reduction*/0,
5190 /*timeout*/0,
5191 /*getcount_only*/0);
5192 }
5193
5194 /*
5195 * Failure here doesn't mean UNMAP is not
5196 * supported as this is an optional page.
5197 */
5198 softc->unmap_max_lba = 1;
5199 softc->unmap_max_ranges = 1;
5200 }
5201 }
5202
5203 free(block_limits, M_SCSIDA);
5204 softc->state = DA_STATE_PROBE_BDC;
5205 xpt_release_ccb(done_ccb);
5206 xpt_schedule(periph, priority);
5207 return;
5208}
5209
5210static void
5211dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
5212{
5214 struct da_softc *softc;
5215 struct ccb_scsiio *csio;
5216 u_int32_t priority;
5217
5218 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n"));
5219
5220 softc = (struct da_softc *)periph->softc;
5221 priority = done_ccb->ccb_h.pinfo.priority;
5222 csio = &done_ccb->csio;
5223 bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr;
5224
5225 cam_periph_assert(periph, MA_OWNED);
5226
5227 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5228 uint32_t valid_len;
5229
5230 /*
5231 * Disable queue sorting for non-rotational media
5232 * by default.
5233 */
5234 u_int16_t old_rate = softc->disk->d_rotation_rate;
5235
5236 valid_len = csio->dxfer_len - csio->resid;
5237 if (SBDC_IS_PRESENT(bdc, valid_len,
5238 medium_rotation_rate)) {
5239 softc->disk->d_rotation_rate =
5241 if (softc->disk->d_rotation_rate == SVPD_NON_ROTATING) {
5243 softc->cam_iosched, 0);
5244 softc->flags &= ~DA_FLAG_ROTATING;
5245 }
5246 if (softc->disk->d_rotation_rate != old_rate) {
5247 disk_attr_changed(softc->disk,
5248 "GEOM::rotation_rate", M_NOWAIT);
5249 }
5250 }
5251 if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
5252 && (softc->zone_mode == DA_ZONE_NONE)) {
5253 int ata_proto;
5254
5255 if (scsi_vpd_supported_page(periph,
5257 ata_proto = 1;
5258 else
5259 ata_proto = 0;
5260
5261 /*
5262 * The Zoned field will only be set for
5263 * Drive Managed and Host Aware drives. If
5264 * they are Host Managed, the device type
5265 * in the standard INQUIRY data should be
5266 * set to T_ZBC_HM (0x14).
5267 */
5268 if ((bdc->flags & SVPD_ZBC_MASK) ==
5269 SVPD_HAW_ZBC) {
5270 softc->zone_mode = DA_ZONE_HOST_AWARE;
5271 softc->zone_interface = (ata_proto) ?
5273 } else if ((bdc->flags & SVPD_ZBC_MASK) ==
5274 SVPD_DM_ZBC) {
5275 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5276 softc->zone_interface = (ata_proto) ?
5278 } else if ((bdc->flags & SVPD_ZBC_MASK) !=
5279 SVPD_ZBC_NR) {
5280 xpt_print(periph->path, "Unknown zoned "
5281 "type %#x",
5282 bdc->flags & SVPD_ZBC_MASK);
5283 }
5284 }
5285 } else {
5286 int error;
5287 error = daerror(done_ccb, CAM_RETRY_SELTO,
5289 if (error == ERESTART)
5290 return;
5291 else if (error != 0) {
5292 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5293 /* Don't wedge this device's queue */
5294 cam_release_devq(done_ccb->ccb_h.path,
5295 /*relsim_flags*/0,
5296 /*reduction*/0,
5297 /*timeout*/0,
5298 /*getcount_only*/0);
5299 }
5300 }
5301 }
5302
5303 free(bdc, M_SCSIDA);
5304 softc->state = DA_STATE_PROBE_ATA;
5305 xpt_release_ccb(done_ccb);
5306 xpt_schedule(periph, priority);
5307 return;
5308}
5309
5310static void
5311dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
5312{
5313 struct ata_params *ata_params;
5314 struct ccb_scsiio *csio;
5315 struct da_softc *softc;
5316 u_int32_t priority;
5317 int continue_probe;
5318 int error;
5319
5320 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n"));
5321
5322 softc = (struct da_softc *)periph->softc;
5323 priority = done_ccb->ccb_h.pinfo.priority;
5324 csio = &done_ccb->csio;
5325 ata_params = (struct ata_params *)csio->data_ptr;
5326 continue_probe = 0;
5327 error = 0;
5328
5329 cam_periph_assert(periph, MA_OWNED);
5330
5331 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5332 uint16_t old_rate;
5333
5334 ata_param_fixup(ata_params);
5335 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
5336 (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5338 if (ata_params->max_dsm_blocks != 0)
5339 softc->trim_max_ranges = min(
5340 softc->trim_max_ranges,
5341 ata_params->max_dsm_blocks *
5342 ATA_DSM_BLK_RANGES);
5343 }
5344 /*
5345 * Disable queue sorting for non-rotational media
5346 * by default.
5347 */
5348 old_rate = softc->disk->d_rotation_rate;
5349 softc->disk->d_rotation_rate = ata_params->media_rotation_rate;
5350 if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) {
5352 softc->flags &= ~DA_FLAG_ROTATING;
5353 }
5354 if (softc->disk->d_rotation_rate != old_rate) {
5355 disk_attr_changed(softc->disk,
5356 "GEOM::rotation_rate", M_NOWAIT);
5357 }
5358
5359 cam_periph_assert(periph, MA_OWNED);
5360 if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
5361 softc->flags |= DA_FLAG_CAN_ATA_DMA;
5362
5363 if (ata_params->support.extension & ATA_SUPPORT_GENLOG)
5364 softc->flags |= DA_FLAG_CAN_ATA_LOG;
5365
5366 /*
5367 * At this point, if we have a SATA host aware drive,
5368 * we communicate via ATA passthrough unless the
5369 * SAT layer supports ZBC -> ZAC translation. In
5370 * that case,
5371 *
5372 * XXX KDM figure out how to detect a host managed
5373 * SATA drive.
5374 */
5375 if (softc->zone_mode == DA_ZONE_NONE) {
5376 /*
5377 * Note that we don't override the zone
5378 * mode or interface if it has already been
5379 * set. This is because it has either been
5380 * set as a quirk, or when we probed the
5381 * SCSI Block Device Characteristics page,
5382 * the zoned field was set. The latter
5383 * means that the SAT layer supports ZBC to
5384 * ZAC translation, and we would prefer to
5385 * use that if it is available.
5386 */
5387 if ((ata_params->support3 &
5388 ATA_SUPPORT_ZONE_MASK) ==
5389 ATA_SUPPORT_ZONE_HOST_AWARE) {
5390 softc->zone_mode = DA_ZONE_HOST_AWARE;
5391 softc->zone_interface =
5393 } else if ((ata_params->support3 &
5394 ATA_SUPPORT_ZONE_MASK) ==
5395 ATA_SUPPORT_ZONE_DEV_MANAGED) {
5396 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5397 softc->zone_interface = DA_ZONE_IF_ATA_PASS;
5398 }
5399 }
5400
5401 } else {
5402 error = daerror(done_ccb, CAM_RETRY_SELTO,
5404 if (error == ERESTART)
5405 return;
5406 else if (error != 0) {
5407 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5408 /* Don't wedge this device's queue */
5409 cam_release_devq(done_ccb->ccb_h.path,
5410 /*relsim_flags*/0,
5411 /*reduction*/0,
5412 /*timeout*/0,
5413 /*getcount_only*/0);
5414 }
5415 }
5416 }
5417
5418 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
5419 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
5420 /*
5421 * If the ATA IDENTIFY failed, we could be talking
5422 * to a SCSI drive, although that seems unlikely,
5423 * since the drive did report that it supported the
5424 * ATA Information VPD page. If the ATA IDENTIFY
5425 * succeeded, and the SAT layer doesn't support
5426 * ZBC -> ZAC translation, continue on to get the
5427 * directory of ATA logs, and complete the rest of
5428 * the ZAC probe. If the SAT layer does support
5429 * ZBC -> ZAC translation, we want to use that,
5430 * and we'll probe the SCSI Zoned Block Device
5431 * Characteristics VPD page next.
5432 */
5433 if ((error == 0)
5434 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
5435 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
5436 softc->state = DA_STATE_PROBE_ATA_LOGDIR;
5437 else
5438 softc->state = DA_STATE_PROBE_ZONE;
5439 continue_probe = 1;
5440 }
5441 if (continue_probe != 0) {
5442 xpt_schedule(periph, priority);
5443 xpt_release_ccb(done_ccb);
5444 return;
5445 } else
5446 daprobedone(periph, done_ccb);
5447 return;
5448}
5449
5450static void
5451dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
5452{
5453 struct da_softc *softc;
5454 struct ccb_scsiio *csio;
5455 u_int32_t priority;
5456 int error;
5457
5458 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n"));
5459
5460 softc = (struct da_softc *)periph->softc;
5461 priority = done_ccb->ccb_h.pinfo.priority;
5462 csio = &done_ccb->csio;
5463
5464 cam_periph_assert(periph, MA_OWNED);
5465 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5466 error = 0;
5467 softc->valid_logdir_len = 0;
5468 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5469 softc->valid_logdir_len = csio->dxfer_len - csio->resid;
5470 if (softc->valid_logdir_len > 0)
5471 bcopy(csio->data_ptr, &softc->ata_logdir,
5472 min(softc->valid_logdir_len,
5473 sizeof(softc->ata_logdir)));
5474 /*
5475 * Figure out whether the Identify Device log is
5476 * supported. The General Purpose log directory
5477 * has a header, and lists the number of pages
5478 * available for each GP log identified by the
5479 * offset into the list.
5480 */
5481 if ((softc->valid_logdir_len >=
5482 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5483 && (le16dec(softc->ata_logdir.header) ==
5484 ATA_GP_LOG_DIR_VERSION)
5485 && (le16dec(&softc->ata_logdir.num_pages[
5486 (ATA_IDENTIFY_DATA_LOG *
5487 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5488 softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5489 } else {
5490 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5491 }
5492 } else {
5493 error = daerror(done_ccb, CAM_RETRY_SELTO,
5495 if (error == ERESTART)
5496 return;
5497 else if (error != 0) {
5498 /*
5499 * If we can't get the ATA log directory,
5500 * then ATA logs are effectively not
5501 * supported even if the bit is set in the
5502 * identify data.
5503 */
5504 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5506 if ((done_ccb->ccb_h.status &
5507 CAM_DEV_QFRZN) != 0) {
5508 /* Don't wedge this device's queue */
5509 cam_release_devq(done_ccb->ccb_h.path,
5510 /*relsim_flags*/0,
5511 /*reduction*/0,
5512 /*timeout*/0,
5513 /*getcount_only*/0);
5514 }
5515 }
5516 }
5517
5518 free(csio->data_ptr, M_SCSIDA);
5519
5520 if ((error == 0)
5521 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5522 softc->state = DA_STATE_PROBE_ATA_IDDIR;
5523 xpt_release_ccb(done_ccb);
5524 xpt_schedule(periph, priority);
5525 return;
5526 }
5527 daprobedone(periph, done_ccb);
5528 return;
5529}
5530
5531static void
5532dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
5533{
5534 struct da_softc *softc;
5535 struct ccb_scsiio *csio;
5536 u_int32_t priority;
5537 int error;
5538
5539 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n"));
5540
5541 softc = (struct da_softc *)periph->softc;
5542 priority = done_ccb->ccb_h.pinfo.priority;
5543 csio = &done_ccb->csio;
5544
5545 cam_periph_assert(periph, MA_OWNED);
5546
5547 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5548 off_t entries_offset, max_entries;
5549 error = 0;
5550
5551 softc->valid_iddir_len = 0;
5552 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5553 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5555 softc->valid_iddir_len = csio->dxfer_len - csio->resid;
5556 if (softc->valid_iddir_len > 0)
5557 bcopy(csio->data_ptr, &softc->ata_iddir,
5558 min(softc->valid_iddir_len,
5559 sizeof(softc->ata_iddir)));
5560
5561 entries_offset =
5562 __offsetof(struct ata_identify_log_pages,entries);
5563 max_entries = softc->valid_iddir_len - entries_offset;
5564 if ((softc->valid_iddir_len > (entries_offset + 1))
5565 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION)
5566 && (softc->ata_iddir.entry_count > 0)) {
5567 int num_entries, i;
5568
5569 num_entries = softc->ata_iddir.entry_count;
5570 num_entries = min(num_entries,
5571 softc->valid_iddir_len - entries_offset);
5572 for (i = 0; i < num_entries && i < max_entries; i++) {
5573 if (softc->ata_iddir.entries[i] ==
5574 ATA_IDL_SUP_CAP)
5575 softc->flags |= DA_FLAG_CAN_ATA_SUPCAP;
5576 else if (softc->ata_iddir.entries[i] ==
5577 ATA_IDL_ZDI)
5578 softc->flags |= DA_FLAG_CAN_ATA_ZONE;
5579
5580 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP)
5581 && (softc->flags & DA_FLAG_CAN_ATA_ZONE))
5582 break;
5583 }
5584 }
5585 } else {
5586 error = daerror(done_ccb, CAM_RETRY_SELTO,
5588 if (error == ERESTART)
5589 return;
5590 else if (error != 0) {
5591 /*
5592 * If we can't get the ATA Identify Data log
5593 * directory, then it effectively isn't
5594 * supported even if the ATA Log directory
5595 * a non-zero number of pages present for
5596 * this log.
5597 */
5598 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5599 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5600 /* Don't wedge this device's queue */
5601 cam_release_devq(done_ccb->ccb_h.path,
5602 /*relsim_flags*/0,
5603 /*reduction*/0,
5604 /*timeout*/0,
5605 /*getcount_only*/0);
5606 }
5607 }
5608 }
5609
5610 free(csio->data_ptr, M_SCSIDA);
5611
5612 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5613 softc->state = DA_STATE_PROBE_ATA_SUP;
5614 xpt_release_ccb(done_ccb);
5615 xpt_schedule(periph, priority);
5616 return;
5617 }
5618 daprobedone(periph, done_ccb);
5619 return;
5620}
5621
5622static void
5623dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
5624{
5625 struct da_softc *softc;
5626 struct ccb_scsiio *csio;
5627 u_int32_t priority;
5628 int error;
5629
5630 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n"));
5631
5632 softc = (struct da_softc *)periph->softc;
5633 priority = done_ccb->ccb_h.pinfo.priority;
5634 csio = &done_ccb->csio;
5635
5636 cam_periph_assert(periph, MA_OWNED);
5637
5638 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5639 uint32_t valid_len;
5640 size_t needed_size;
5641 struct ata_identify_log_sup_cap *sup_cap;
5642 error = 0;
5643
5644 sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr;
5645 valid_len = csio->dxfer_len - csio->resid;
5646 needed_size = __offsetof(struct ata_identify_log_sup_cap,
5647 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5648 if (valid_len >= needed_size) {
5649 uint64_t zoned, zac_cap;
5650
5651 zoned = le64dec(sup_cap->zoned_cap);
5652 if (zoned & ATA_ZONED_VALID) {
5653 /*
5654 * This should have already been
5655 * set, because this is also in the
5656 * ATA identify data.
5657 */
5658 if ((zoned & ATA_ZONED_MASK) ==
5659 ATA_SUPPORT_ZONE_HOST_AWARE)
5660 softc->zone_mode = DA_ZONE_HOST_AWARE;
5661 else if ((zoned & ATA_ZONED_MASK) ==
5662 ATA_SUPPORT_ZONE_DEV_MANAGED)
5663 softc->zone_mode =
5665 }
5666
5667 zac_cap = le64dec(sup_cap->sup_zac_cap);
5668 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5669 if (zac_cap & ATA_REPORT_ZONES_SUP)
5670 softc->zone_flags |=
5672 if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5673 softc->zone_flags |=
5675 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5676 softc->zone_flags |=
5678 if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5679 softc->zone_flags |=
5681 if (zac_cap & ATA_ND_RWP_SUP)
5682 softc->zone_flags |=
5684 } else {
5685 /*
5686 * This field was introduced in
5687 * ACS-4, r08 on April 28th, 2015.
5688 * If the drive firmware was written
5689 * to an earlier spec, it won't have
5690 * the field. So, assume all
5691 * commands are supported.
5692 */
5693 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5694 }
5695 }
5696 } else {
5697 error = daerror(done_ccb, CAM_RETRY_SELTO,
5699 if (error == ERESTART)
5700 return;
5701 else if (error != 0) {
5702 /*
5703 * If we can't get the ATA Identify Data
5704 * Supported Capabilities page, clear the
5705 * flag...
5706 */
5707 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5708 /*
5709 * And clear zone capabilities.
5710 */
5711 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5712 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5713 /* Don't wedge this device's queue */
5714 cam_release_devq(done_ccb->ccb_h.path,
5715 /*relsim_flags*/0,
5716 /*reduction*/0,
5717 /*timeout*/0,
5718 /*getcount_only*/0);
5719 }
5720 }
5721 }
5722
5723 free(csio->data_ptr, M_SCSIDA);
5724
5725 if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5726 softc->state = DA_STATE_PROBE_ATA_ZONE;
5727 xpt_release_ccb(done_ccb);
5728 xpt_schedule(periph, priority);
5729 return;
5730 }
5731 daprobedone(periph, done_ccb);
5732 return;
5733}
5734
5735static void
5736dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
5737{
5738 struct da_softc *softc;
5739 struct ccb_scsiio *csio;
5740 int error;
5741
5742 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n"));
5743
5744 softc = (struct da_softc *)periph->softc;
5745 csio = &done_ccb->csio;
5746
5747 cam_periph_assert(periph, MA_OWNED);
5748
5749 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5750 struct ata_zoned_info_log *zi_log;
5751 uint32_t valid_len;
5752 size_t needed_size;
5753
5754 zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5755
5756 valid_len = csio->dxfer_len - csio->resid;
5757 needed_size = __offsetof(struct ata_zoned_info_log,
5758 version_info) + 1 + sizeof(zi_log->version_info);
5759 if (valid_len >= needed_size) {
5760 uint64_t tmpvar;
5761
5762 tmpvar = le64dec(zi_log->zoned_cap);
5763 if (tmpvar & ATA_ZDI_CAP_VALID) {
5764 if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5765 softc->zone_flags |=
5767 else
5768 softc->zone_flags &=
5769 ~DA_ZONE_FLAG_URSWRZ;
5770 }
5771 tmpvar = le64dec(zi_log->optimal_seq_zones);
5772 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5773 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5774 softc->optimal_seq_zones = (tmpvar &
5775 ATA_ZDI_OPT_SEQ_MASK);
5776 } else {
5777 softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET;
5778 softc->optimal_seq_zones = 0;
5779 }
5780
5781 tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5782 if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5783 softc->zone_flags |=
5785 softc->optimal_nonseq_zones =
5786 (tmpvar & ATA_ZDI_OPT_NS_MASK);
5787 } else {
5788 softc->zone_flags &=
5789 ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5790 softc->optimal_nonseq_zones = 0;
5791 }
5792
5793 tmpvar = le64dec(zi_log->max_seq_req_zones);
5794 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5795 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5796 softc->max_seq_zones =
5797 (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5798 } else {
5799 softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET;
5800 softc->max_seq_zones = 0;
5801 }
5802 }
5803 } else {
5804 error = daerror(done_ccb, CAM_RETRY_SELTO,
5806 if (error == ERESTART)
5807 return;
5808 else if (error != 0) {
5809 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5810 softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5811
5812 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5813 /* Don't wedge this device's queue */
5814 cam_release_devq(done_ccb->ccb_h.path,
5815 /*relsim_flags*/0,
5816 /*reduction*/0,
5817 /*timeout*/0,
5818 /*getcount_only*/0);
5819 }
5820 }
5821 }
5822
5823 free(csio->data_ptr, M_SCSIDA);
5824
5825 daprobedone(periph, done_ccb);
5826 return;
5827}
5828
5829static void
5830dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
5831{
5832 struct da_softc *softc;
5833 struct ccb_scsiio *csio;
5834 int error;
5835
5836 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n"));
5837
5838 softc = (struct da_softc *)periph->softc;
5839 csio = &done_ccb->csio;
5840
5841 cam_periph_assert(periph, MA_OWNED);
5842
5843 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5844 uint32_t valid_len;
5845 size_t needed_len;
5846 struct scsi_vpd_zoned_bdc *zoned_bdc;
5847
5848 error = 0;
5849 zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr;
5850 valid_len = csio->dxfer_len - csio->resid;
5851 needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5852 max_seq_req_zones) + 1 +
5853 sizeof(zoned_bdc->max_seq_req_zones);
5854 if ((valid_len >= needed_len)
5855 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) {
5856 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5857 softc->zone_flags |= DA_ZONE_FLAG_URSWRZ;
5858 else
5859 softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ;
5860 softc->optimal_seq_zones =
5861 scsi_4btoul(zoned_bdc->optimal_seq_zones);
5862 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5863 softc->optimal_nonseq_zones = scsi_4btoul(
5864 zoned_bdc->optimal_nonseq_zones);
5865 softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET;
5866 softc->max_seq_zones =
5867 scsi_4btoul(zoned_bdc->max_seq_req_zones);
5868 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5869 }
5870 /*
5871 * All of the zone commands are mandatory for SCSI
5872 * devices.
5873 *
5874 * XXX KDM this is valid as of September 2015.
5875 * Re-check this assumption once the SAT spec is
5876 * updated to support SCSI ZBC to ATA ZAC mapping.
5877 * Since ATA allows zone commands to be reported
5878 * as supported or not, this may not necessarily
5879 * be true for an ATA device behind a SAT (SCSI to
5880 * ATA Translation) layer.
5881 */
5882 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5883 } else {
5884 error = daerror(done_ccb, CAM_RETRY_SELTO,
5886 if (error == ERESTART)
5887 return;
5888 else if (error != 0) {
5889 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5890 /* Don't wedge this device's queue */
5891 cam_release_devq(done_ccb->ccb_h.path,
5892 /*relsim_flags*/0,
5893 /*reduction*/0,
5894 /*timeout*/0,
5895 /*getcount_only*/0);
5896 }
5897 }
5898 }
5899
5900 free(csio->data_ptr, M_SCSIDA);
5901
5902 daprobedone(periph, done_ccb);
5903 return;
5904}
5905
5906static void
5907dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
5908{
5909 struct da_softc *softc;
5910
5911 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n"));
5912
5913 softc = (struct da_softc *)periph->softc;
5914
5915 cam_periph_assert(periph, MA_OWNED);
5916
5917 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5918 if (daerror(done_ccb, CAM_RETRY_SELTO,
5919 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART)
5920 return; /* Will complete again, keep reference */
5921 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5922 cam_release_devq(done_ccb->ccb_h.path,
5923 /*relsim_flags*/0,
5924 /*reduction*/0,
5925 /*timeout*/0,
5926 /*getcount_only*/0);
5927 }
5928 softc->flags &= ~DA_FLAG_TUR_PENDING;
5929 xpt_release_ccb(done_ccb);
5931 return;
5932}
5933
5934static void
5935dareprobe(struct cam_periph *periph)
5936{
5937 struct da_softc *softc;
5938 int status __diagused;
5939
5940 softc = (struct da_softc *)periph->softc;
5941
5942 cam_periph_assert(periph, MA_OWNED);
5943
5944 /* Probe in progress; don't interfere. */
5945 if (softc->state != DA_STATE_NORMAL)
5946 return;
5947
5948 status = da_periph_acquire(periph, DA_REF_REPROBE);
5949 KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed"));
5950
5951 softc->state = DA_STATE_PROBE_WP;
5953}
5954
5955static int
5956daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5957{
5958 struct da_softc *softc;
5959 struct cam_periph *periph;
5960 int error, error_code, sense_key, asc, ascq;
5961
5962#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5963 if (ccb->csio.bio != NULL)
5964 biotrack(ccb->csio.bio, __func__);
5965#endif
5966
5967 periph = xpt_path_periph(ccb->ccb_h.path);
5968 softc = (struct da_softc *)periph->softc;
5969
5970 cam_periph_assert(periph, MA_OWNED);
5971
5972 /*
5973 * Automatically detect devices that do not support
5974 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5975 */
5976 error = 0;
5978 error = cmd6workaround(ccb);
5979 } else if (scsi_extract_sense_ccb(ccb,
5980 &error_code, &sense_key, &asc, &ascq)) {
5981 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5982 error = cmd6workaround(ccb);
5983 /*
5984 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5985 * query the capacity and notify upper layers.
5986 */
5987 else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5988 asc == 0x2A && ascq == 0x09) {
5989 xpt_print(periph->path, "Capacity data has changed\n");
5990 softc->flags &= ~DA_FLAG_PROBED;
5991 dareprobe(periph);
5992 sense_flags |= SF_NO_PRINT;
5993 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5994 asc == 0x28 && ascq == 0x00) {
5995 softc->flags &= ~DA_FLAG_PROBED;
5996 disk_media_changed(softc->disk, M_NOWAIT);
5997 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5998 asc == 0x3F && ascq == 0x03) {
5999 xpt_print(periph->path, "INQUIRY data has changed\n");
6000 softc->flags &= ~DA_FLAG_PROBED;
6001 dareprobe(periph);
6002 sense_flags |= SF_NO_PRINT;
6003 } else if (sense_key == SSD_KEY_NOT_READY &&
6004 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
6005 softc->flags |= DA_FLAG_PACK_INVALID;
6006 disk_media_gone(softc->disk, M_NOWAIT);
6007 }
6008 }
6009 if (error == ERESTART)
6010 return (ERESTART);
6011
6012#ifdef CAM_IO_STATS
6013 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
6014 case CAM_CMD_TIMEOUT:
6015 softc->timeouts++;
6016 break;
6017 case CAM_REQ_ABORTED:
6018 case CAM_REQ_CMP_ERR:
6019 case CAM_REQ_TERMIO:
6021 case CAM_DATA_RUN_ERR:
6024 softc->errors++;
6025 break;
6026 default:
6027 break;
6028 }
6029#endif
6030
6031 /*
6032 * XXX
6033 * Until we have a better way of doing pack validation,
6034 * don't treat UAs as errors.
6035 */
6036 sense_flags |= SF_RETRY_UA;
6037
6038 if (softc->quirks & DA_Q_RETRY_BUSY)
6039 sense_flags |= SF_RETRY_BUSY;
6040 return(cam_periph_error(ccb, cam_flags, sense_flags));
6041}
6042
6043static void
6044damediapoll(void *arg)
6045{
6046 struct cam_periph *periph = arg;
6047 struct da_softc *softc = periph->softc;
6048
6050 (softc->flags & DA_FLAG_TUR_PENDING) == 0 &&
6051 softc->state == DA_STATE_NORMAL &&
6052 LIST_EMPTY(&softc->pending_ccbs)) {
6053 if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
6055 daschedule(periph);
6056 }
6057 }
6058
6059 /* Queue us up again */
6060 if (da_poll_period != 0) {
6061 callout_schedule_sbt(&softc->mediapoll_c,
6062 da_poll_period * SBT_1S, 0, C_PREL(1));
6063 }
6064}
6065
6066static void
6067daprevent(struct cam_periph *periph, int action)
6068{
6069 struct da_softc *softc;
6070 union ccb *ccb;
6071 int error;
6072
6073 cam_periph_assert(periph, MA_OWNED);
6074 softc = (struct da_softc *)periph->softc;
6075
6076 if (((action == PR_ALLOW)
6077 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
6078 || ((action == PR_PREVENT)
6079 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
6080 return;
6081 }
6082
6084
6086 /*retries*/1,
6087 /*cbcfp*/NULL,
6089 action,
6091 5000);
6092
6094 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
6095
6096 if (error == 0) {
6097 if (action == PR_ALLOW)
6098 softc->flags &= ~DA_FLAG_PACK_LOCKED;
6099 else
6100 softc->flags |= DA_FLAG_PACK_LOCKED;
6101 }
6102
6104}
6105
6106static void
6107dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
6108 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
6109{
6110 struct ccb_calc_geometry ccg;
6111 struct da_softc *softc;
6112 struct disk_params *dp;
6113 u_int lbppbe, lalba;
6114 int error;
6115
6116 softc = (struct da_softc *)periph->softc;
6117
6118 dp = &softc->params;
6119 dp->secsize = block_len;
6120 dp->sectors = maxsector + 1;
6121 if (rcaplong != NULL) {
6122 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
6123 lalba = scsi_2btoul(rcaplong->lalba_lbp);
6124 lalba &= SRC16_LALBA_A;
6125 if (rcaplong->prot & SRC16_PROT_EN)
6126 softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >>
6127 SRC16_P_TYPE_SHIFT) + 1;
6128 else
6129 softc->p_type = 0;
6130 } else {
6131 lbppbe = 0;
6132 lalba = 0;
6133 softc->p_type = 0;
6134 }
6135
6136 if (lbppbe > 0) {
6137 dp->stripesize = block_len << lbppbe;
6138 dp->stripeoffset = (dp->stripesize - block_len * lalba) %
6139 dp->stripesize;
6140 } else if (softc->quirks & DA_Q_4K) {
6141 dp->stripesize = 4096;
6142 dp->stripeoffset = 0;
6143 } else if (softc->unmap_gran != 0) {
6144 dp->stripesize = block_len * softc->unmap_gran;
6145 dp->stripeoffset = (dp->stripesize - block_len *
6146 softc->unmap_gran_align) % dp->stripesize;
6147 } else {
6148 dp->stripesize = 0;
6149 dp->stripeoffset = 0;
6150 }
6151 /*
6152 * Have the controller provide us with a geometry
6153 * for this disk. The only time the geometry
6154 * matters is when we boot and the controller
6155 * is the only one knowledgeable enough to come
6156 * up with something that will make this a bootable
6157 * device.
6158 */
6159 memset(&ccg, 0, sizeof(ccg));
6162 ccg.block_size = dp->secsize;
6163 ccg.volume_size = dp->sectors;
6164 ccg.heads = 0;
6165 ccg.secs_per_track = 0;
6166 ccg.cylinders = 0;
6167 xpt_action((union ccb*)&ccg);
6168 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6169 /*
6170 * We don't know what went wrong here- but just pick
6171 * a geometry so we don't have nasty things like divide
6172 * by zero.
6173 */
6174 dp->heads = 255;
6175 dp->secs_per_track = 255;
6176 dp->cylinders = dp->sectors / (255 * 255);
6177 if (dp->cylinders == 0) {
6178 dp->cylinders = 1;
6179 }
6180 } else {
6181 dp->heads = ccg.heads;
6183 dp->cylinders = ccg.cylinders;
6184 }
6185
6186 /*
6187 * If the user supplied a read capacity buffer, and if it is
6188 * different than the previous buffer, update the data in the EDT.
6189 * If it's the same, we don't bother. This avoids sending an
6190 * update every time someone opens this device.
6191 */
6192 if ((rcaplong != NULL)
6193 && (bcmp(rcaplong, &softc->rcaplong,
6194 min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
6195 struct ccb_dev_advinfo cdai;
6196
6197 memset(&cdai, 0, sizeof(cdai));
6201 cdai.flags = CDAI_FLAG_STORE;
6202 cdai.bufsiz = rcap_len;
6203 cdai.buf = (uint8_t *)rcaplong;
6204 xpt_action((union ccb *)&cdai);
6205 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
6206 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
6207 if (cdai.ccb_h.status != CAM_REQ_CMP) {
6208 xpt_print(periph->path, "%s: failed to set read "
6209 "capacity advinfo\n", __func__);
6210 /* Use cam_error_print() to decode the status */
6211 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
6212 CAM_EPF_ALL);
6213 } else {
6214 bcopy(rcaplong, &softc->rcaplong,
6215 min(sizeof(softc->rcaplong), rcap_len));
6216 }
6217 }
6218
6219 softc->disk->d_sectorsize = softc->params.secsize;
6220 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
6221 softc->disk->d_stripesize = softc->params.stripesize;
6222 softc->disk->d_stripeoffset = softc->params.stripeoffset;
6223 /* XXX: these are not actually "firmware" values, so they may be wrong */
6224 softc->disk->d_fwsectors = softc->params.secs_per_track;
6225 softc->disk->d_fwheads = softc->params.heads;
6226 softc->disk->d_devstat->block_size = softc->params.secsize;
6227 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
6228
6229 error = disk_resize(softc->disk, M_NOWAIT);
6230 if (error != 0)
6231 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
6232}
6233
6234static void
6236{
6237 struct cam_periph *periph = arg;
6238 struct da_softc *softc = periph->softc;
6239
6240 cam_periph_assert(periph, MA_OWNED);
6241 if (da_send_ordered) {
6242 if (!LIST_EMPTY(&softc->pending_ccbs)) {
6243 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
6244 softc->flags |= DA_FLAG_NEED_OTAG;
6245 softc->flags &= ~DA_FLAG_WAS_OTAG;
6246 }
6247 }
6248
6249 /* Queue us up again */
6250 callout_schedule_sbt(&softc->sendordered_c,
6252 C_PREL(1));
6253}
6254
6255/*
6256 * Step through all DA peripheral drivers, and if the device is still open,
6257 * sync the disk cache to physical media.
6258 */
6259static void
6260dashutdown(void * arg, int howto)
6261{
6262 struct cam_periph *periph;
6263 struct da_softc *softc;
6264 union ccb *ccb;
6265 int error;
6266
6267 if ((howto & RB_NOSYNC) != 0)
6268 return;
6269
6270 CAM_PERIPH_FOREACH(periph, &dadriver) {
6271 softc = (struct da_softc *)periph->softc;
6272 if (SCHEDULER_STOPPED()) {
6273 /* If we paniced with the lock held, do not recurse. */
6274 if (!cam_periph_owned(periph) &&
6275 (softc->flags & DA_FLAG_OPEN)) {
6276 dadump(softc->disk, NULL, 0, 0, 0);
6277 }
6278 continue;
6279 }
6280 cam_periph_lock(periph);
6281
6282 /*
6283 * We only sync the cache if the drive is still open, and
6284 * if the drive is capable of it..
6285 */
6286 if (((softc->flags & DA_FLAG_OPEN) == 0)
6287 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
6288 cam_periph_unlock(periph);
6289 continue;
6290 }
6291
6294 /*retries*/0,
6295 /*cbfcnp*/NULL,
6297 /*begin_lba*/0, /* whole disk */
6298 /*lb_count*/0,
6300 60 * 60 * 1000);
6301
6302 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
6303 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
6304 softc->disk->d_devstat);
6305 if (error != 0)
6306 xpt_print(periph->path, "Synchronize cache failed\n");
6308 cam_periph_unlock(periph);
6309 }
6310}
6311
6312#else /* !_KERNEL */
6313
6314/*
6315 * XXX These are only left out of the kernel build to silence warnings. If,
6316 * for some reason these functions are used in the kernel, the ifdefs should
6317 * be moved so they are included both in the kernel and userland.
6318 */
6319void
6320scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
6321 void (*cbfcnp)(struct cam_periph *, union ccb *),
6322 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
6323 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6324 u_int32_t timeout)
6325{
6326 struct scsi_format_unit *scsi_cmd;
6327
6328 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
6329 scsi_cmd->opcode = FORMAT_UNIT;
6330 scsi_cmd->byte2 = byte2;
6331 scsi_ulto2b(ileave, scsi_cmd->interleave);
6332
6333 cam_fill_csio(csio,
6334 retries,
6335 cbfcnp,
6336 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6337 tag_action,
6338 data_ptr,
6339 dxfer_len,
6340 sense_len,
6341 sizeof(*scsi_cmd),
6342 timeout);
6343}
6344
6345void
6346scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
6347 void (*cbfcnp)(struct cam_periph *, union ccb *),
6348 uint8_t tag_action, uint8_t list_format,
6349 uint32_t addr_desc_index, uint8_t *data_ptr,
6350 uint32_t dxfer_len, int minimum_cmd_size,
6351 uint8_t sense_len, uint32_t timeout)
6352{
6353 uint8_t cdb_len;
6354
6355 /*
6356 * These conditions allow using the 10 byte command. Otherwise we
6357 * need to use the 12 byte command.
6358 */
6359 if ((minimum_cmd_size <= 10)
6360 && (addr_desc_index == 0)
6361 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
6362 struct scsi_read_defect_data_10 *cdb10;
6363
6364 cdb10 = (struct scsi_read_defect_data_10 *)
6365 &csio->cdb_io.cdb_bytes;
6366
6367 cdb_len = sizeof(*cdb10);
6368 bzero(cdb10, cdb_len);
6369 cdb10->opcode = READ_DEFECT_DATA_10;
6370 cdb10->format = list_format;
6371 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
6372 } else {
6373 struct scsi_read_defect_data_12 *cdb12;
6374
6375 cdb12 = (struct scsi_read_defect_data_12 *)
6376 &csio->cdb_io.cdb_bytes;
6377
6378 cdb_len = sizeof(*cdb12);
6379 bzero(cdb12, cdb_len);
6380 cdb12->opcode = READ_DEFECT_DATA_12;
6381 cdb12->format = list_format;
6382 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
6383 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
6384 }
6385
6386 cam_fill_csio(csio,
6387 retries,
6388 cbfcnp,
6389 /*flags*/ CAM_DIR_IN,
6390 tag_action,
6391 data_ptr,
6392 dxfer_len,
6393 sense_len,
6394 cdb_len,
6395 timeout);
6396}
6397
6398void
6399scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
6400 void (*cbfcnp)(struct cam_periph *, union ccb *),
6401 u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
6402 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6403 u_int32_t timeout)
6404{
6405 struct scsi_sanitize *scsi_cmd;
6406
6407 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
6408 scsi_cmd->opcode = SANITIZE;
6409 scsi_cmd->byte2 = byte2;
6410 scsi_cmd->control = control;
6411 scsi_ulto2b(dxfer_len, scsi_cmd->length);
6412
6413 cam_fill_csio(csio,
6414 retries,
6415 cbfcnp,
6416 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6417 tag_action,
6418 data_ptr,
6419 dxfer_len,
6420 sense_len,
6421 sizeof(*scsi_cmd),
6422 timeout);
6423}
6424
6425#endif /* _KERNEL */
6426
6427void
6428scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
6429 void (*cbfcnp)(struct cam_periph *, union ccb *),
6430 uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
6431 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
6432 uint8_t sense_len, uint32_t timeout)
6433{
6434 struct scsi_zbc_out *scsi_cmd;
6435
6436 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
6437 scsi_cmd->opcode = ZBC_OUT;
6438 scsi_cmd->service_action = service_action;
6439 scsi_u64to8b(zone_id, scsi_cmd->zone_id);
6440 scsi_cmd->zone_flags = zone_flags;
6441
6442 cam_fill_csio(csio,
6443 retries,
6444 cbfcnp,
6445 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6446 tag_action,
6447 data_ptr,
6448 dxfer_len,
6449 sense_len,
6450 sizeof(*scsi_cmd),
6451 timeout);
6452}
6453
6454void
6455scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
6456 void (*cbfcnp)(struct cam_periph *, union ccb *),
6457 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
6458 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
6459 uint8_t sense_len, uint32_t timeout)
6460{
6461 struct scsi_zbc_in *scsi_cmd;
6462
6463 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
6464 scsi_cmd->opcode = ZBC_IN;
6465 scsi_cmd->service_action = service_action;
6466 scsi_ulto4b(dxfer_len, scsi_cmd->length);
6468 scsi_cmd->zone_options = zone_options;
6469
6470 cam_fill_csio(csio,
6471 retries,
6472 cbfcnp,
6473 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
6474 tag_action,
6475 data_ptr,
6476 dxfer_len,
6477 sense_len,
6478 sizeof(*scsi_cmd),
6479 timeout);
6480
6481}
6482
6483int
6484scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
6485 void (*cbfcnp)(struct cam_periph *, union ccb *),
6486 uint8_t tag_action, int use_ncq,
6487 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6488 uint8_t *data_ptr, uint32_t dxfer_len,
6489 uint8_t *cdb_storage, size_t cdb_storage_len,
6490 uint8_t sense_len, uint32_t timeout)
6491{
6492 uint8_t command_out, protocol, ata_flags;
6493 uint16_t features_out;
6494 uint32_t sectors_out, auxiliary;
6495 int retval;
6496
6497 retval = 0;
6498
6499 if (use_ncq == 0) {
6500 command_out = ATA_ZAC_MANAGEMENT_OUT;
6501 features_out = (zm_action & 0xf) | (zone_flags << 8);
6502 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6503 if (dxfer_len == 0) {
6504 protocol = AP_PROTO_NON_DATA;
6505 ata_flags |= AP_FLAG_TLEN_NO_DATA;
6506 sectors_out = 0;
6507 } else {
6508 protocol = AP_PROTO_DMA;
6509 ata_flags |= AP_FLAG_TLEN_SECT_CNT |
6511 sectors_out = ((dxfer_len >> 9) & 0xffff);
6512 }
6513 auxiliary = 0;
6514 } else {
6515 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6516 if (dxfer_len == 0) {
6517 command_out = ATA_NCQ_NON_DATA;
6518 features_out = ATA_NCQ_ZAC_MGMT_OUT;
6519 /*
6520 * We're assuming the SCSI to ATA translation layer
6521 * will set the NCQ tag number in the tag field.
6522 * That isn't clear from the SAT-4 spec (as of rev 05).
6523 */
6524 sectors_out = 0;
6525 ata_flags |= AP_FLAG_TLEN_NO_DATA;
6526 } else {
6527 command_out = ATA_SEND_FPDMA_QUEUED;
6528 /*
6529 * Note that we're defaulting to normal priority,
6530 * and assuming that the SCSI to ATA translation
6531 * layer will insert the NCQ tag number in the tag
6532 * field. That isn't clear in the SAT-4 spec (as
6533 * of rev 05).
6534 */
6535 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6536
6537 ata_flags |= AP_FLAG_TLEN_FEAT |
6539
6540 /*
6541 * For SEND FPDMA QUEUED, the transfer length is
6542 * encoded in the FEATURE register, and 0 means
6543 * that 65536 512 byte blocks are to be tranferred.
6544 * In practice, it seems unlikely that we'll see
6545 * a transfer that large, and it may confuse the
6546 * the SAT layer, because generally that means that
6547 * 0 bytes should be transferred.
6548 */
6549 if (dxfer_len == (65536 * 512)) {
6550 features_out = 0;
6551 } else if (dxfer_len <= (65535 * 512)) {
6552 features_out = ((dxfer_len >> 9) & 0xffff);
6553 } else {
6554 /* The transfer is too big. */
6555 retval = 1;
6556 goto bailout;
6557 }
6558 }
6559
6560 auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6561 protocol = AP_PROTO_FPDMA;
6562 }
6563
6564 protocol |= AP_EXTEND;
6565
6566 retval = scsi_ata_pass(csio,
6567 retries,
6568 cbfcnp,
6569 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6570 tag_action,
6571 /*protocol*/ protocol,
6572 /*ata_flags*/ ata_flags,
6573 /*features*/ features_out,
6574 /*sector_count*/ sectors_out,
6575 /*lba*/ zone_id,
6576 /*command*/ command_out,
6577 /*device*/ 0,
6578 /*icc*/ 0,
6579 /*auxiliary*/ auxiliary,
6580 /*control*/ 0,
6581 /*data_ptr*/ data_ptr,
6582 /*dxfer_len*/ dxfer_len,
6583 /*cdb_storage*/ cdb_storage,
6584 /*cdb_storage_len*/ cdb_storage_len,
6585 /*minimum_cmd_size*/ 0,
6586 /*sense_len*/ SSD_FULL_SIZE,
6587 /*timeout*/ timeout);
6588
6589bailout:
6590
6591 return (retval);
6592}
6593
6594int
6595scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6596 void (*cbfcnp)(struct cam_periph *, union ccb *),
6597 uint8_t tag_action, int use_ncq,
6598 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6599 uint8_t *data_ptr, uint32_t dxfer_len,
6600 uint8_t *cdb_storage, size_t cdb_storage_len,
6601 uint8_t sense_len, uint32_t timeout)
6602{
6603 uint8_t command_out, protocol;
6604 uint16_t features_out, sectors_out;
6605 uint32_t auxiliary;
6606 int ata_flags;
6607 int retval;
6608
6609 retval = 0;
6611
6612 if (use_ncq == 0) {
6613 command_out = ATA_ZAC_MANAGEMENT_IN;
6614 /* XXX KDM put a macro here */
6615 features_out = (zm_action & 0xf) | (zone_flags << 8);
6616 sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6617 protocol = AP_PROTO_DMA;
6618 ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6619 auxiliary = 0;
6620 } else {
6621 ata_flags |= AP_FLAG_TLEN_FEAT;
6622
6623 command_out = ATA_RECV_FPDMA_QUEUED;
6624 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6625
6626 /*
6627 * For RECEIVE FPDMA QUEUED, the transfer length is
6628 * encoded in the FEATURE register, and 0 means
6629 * that 65536 512 byte blocks are to be tranferred.
6630 * In practice, it seems unlikely that we'll see
6631 * a transfer that large, and it may confuse the
6632 * the SAT layer, because generally that means that
6633 * 0 bytes should be transferred.
6634 */
6635 if (dxfer_len == (65536 * 512)) {
6636 features_out = 0;
6637 } else if (dxfer_len <= (65535 * 512)) {
6638 features_out = ((dxfer_len >> 9) & 0xffff);
6639 } else {
6640 /* The transfer is too big. */
6641 retval = 1;
6642 goto bailout;
6643 }
6644 auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6645 protocol = AP_PROTO_FPDMA;
6646 }
6647
6648 protocol |= AP_EXTEND;
6649
6650 retval = scsi_ata_pass(csio,
6651 retries,
6652 cbfcnp,
6653 /*flags*/ CAM_DIR_IN,
6654 tag_action,
6655 /*protocol*/ protocol,
6656 /*ata_flags*/ ata_flags,
6657 /*features*/ features_out,
6658 /*sector_count*/ sectors_out,
6659 /*lba*/ zone_id,
6660 /*command*/ command_out,
6661 /*device*/ 0,
6662 /*icc*/ 0,
6663 /*auxiliary*/ auxiliary,
6664 /*control*/ 0,
6665 /*data_ptr*/ data_ptr,
6666 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6667 /*cdb_storage*/ cdb_storage,
6668 /*cdb_storage_len*/ cdb_storage_len,
6669 /*minimum_cmd_size*/ 0,
6670 /*sense_len*/ SSD_FULL_SIZE,
6671 /*timeout*/ timeout);
6672
6673bailout:
6674 return (retval);
6675}
void ata_param_fixup(struct ata_params *ident_buf)
Definition: ata_all.c:1266
#define SID_AEN
Definition: ata_all.h:41
caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries, int entry_size, cam_quirkmatch_t *comp_func)
Definition: cam.c:281
void cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen)
Definition: cam.c:124
void cam_error_print(union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags)
Definition: cam.c:510
@ CAM_EPF_ALL
Definition: cam.h:324
@ SF_QUIET_IR
Definition: cam.h:124
@ SF_RETRY_UA
Definition: cam.h:122
@ SF_RETRY_BUSY
Definition: cam.h:128
@ SF_NO_RETRY
Definition: cam.h:127
@ SF_NO_RECOVERY
Definition: cam.h:126
@ SF_NO_PRINT
Definition: cam.h:123
cam_flags
Definition: cam.h:115
@ CAM_RETRY_SELTO
Definition: cam.h:118
#define CAM_PRIORITY_NORMAL
Definition: cam.h:92
#define CAM_PRIORITY_NONE
Definition: cam.h:93
@ CAM_ESF_CAM_STATUS
Definition: cam.h:315
cam_status
Definition: cam.h:132
@ CAM_ATA_STATUS_ERROR
Definition: cam.h:225
@ CAM_REQ_INVALID
Definition: cam.h:152
@ CAM_REQ_INPROG
Definition: cam.h:134
@ CAM_REQ_CMP
Definition: cam.h:137
@ CAM_REQUEUE_REQ
Definition: cam.h:222
@ CAM_CMD_TIMEOUT
Definition: cam.h:167
@ CAM_REQ_CMP_ERR
Definition: cam.h:146
@ CAM_STATUS_MASK
Definition: cam.h:302
@ CAM_REQ_ABORTED
Definition: cam.h:140
@ CAM_REQ_TERMIO
Definition: cam.h:206
@ CAM_UNREC_HBA_ERROR
Definition: cam.h:209
@ CAM_SCSI_STATUS_ERROR
Definition: cam.h:170
@ CAM_DATA_RUN_ERR
Definition: cam.h:188
@ CAM_DEV_QFRZN
Definition: cam.h:287
#define CAM_PRIORITY_DEV
Definition: cam.h:90
@ AC_ADVINFO_CHANGED
Definition: cam_ccb.h:868
@ AC_FOUND_DEVICE
Definition: cam_ccb.h:874
@ AC_SCSI_AEN
Definition: cam_ccb.h:878
@ AC_BUS_RESET
Definition: cam_ccb.h:880
@ AC_UNIT_ATTENTION
Definition: cam_ccb.h:867
@ AC_INQ_CHANGED
Definition: cam_ccb.h:871
@ AC_LOST_DEVICE
Definition: cam_ccb.h:873
@ AC_SENT_BDR
Definition: cam_ccb.h:877
#define CDAI_FLAG_STORE
Definition: cam_ccb.h:1312
@ PROTO_SCSI
Definition: cam_ccb.h:279
@ CTS_TYPE_CURRENT_SETTINGS
Definition: cam_ccb.h:945
@ PIM_NO_6_BYTE
Definition: cam_ccb.h:623
@ PIM_UNMAPPED
Definition: cam_ccb.h:625
#define CDAI_TYPE_RCAPLONG
Definition: cam_ccb.h:1318
@ XPORT_FC
Definition: cam_ccb.h:292
@ CAM_DIR_IN
Definition: cam_ccb.h:79
@ CAM_DIR_NONE
Definition: cam_ccb.h:81
@ CAM_CDB_POINTER
Definition: cam_ccb.h:69
@ CAM_UNLOCKED
Definition: cam_ccb.h:119
@ CAM_DIR_OUT
Definition: cam_ccb.h:80
static __BEGIN_DECLS __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout)
Definition: cam_ccb.h:1389
@ XPT_DEV_ADVINFO
Definition: cam_ccb.h:166
@ XPT_GET_TRAN_SETTINGS
Definition: cam_ccb.h:183
@ XPT_CALC_GEOMETRY
Definition: cam_ccb.h:193
@ XPT_GDEV_TYPE
Definition: cam_ccb.h:143
#define XPORT_DEVSTAT_TYPE(t)
Definition: cam_ccb.h:310
#define CDAI_TYPE_PHYS_PATH
Definition: cam_ccb.h:1317
static __inline cam_status cam_ccb_status(union ccb *ccb)
Definition: cam_ccb.h:1514
#define CTS_FC_VALID_WWPN
Definition: cam_ccb.h:984
@ CAM_DEBUG_TRACE
Definition: cam_debug.h:41
@ CAM_DEBUG_PERIPH
Definition: cam_debug.h:45
#define CAM_DEBUG(path, flag, printfargs)
Definition: cam_debug.h:93
struct bio * cam_iosched_next_trim(struct cam_iosched_softc *isc)
Definition: cam_iosched.c:1414
void cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
Definition: cam_iosched.c:1780
void cam_iosched_fini(struct cam_iosched_softc *isc)
Definition: cam_iosched.c:1176
void cam_iosched_set_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
Definition: cam_iosched.c:1774
void cam_iosched_flush(struct cam_iosched_softc *isc, struct devstat *stp, int err)
Definition: cam_iosched.c:1313
void cam_iosched_trim_done(struct cam_iosched_softc *isc)
Definition: cam_iosched.c:1682
int cam_iosched_init(struct cam_iosched_softc **iscp, struct cam_periph *periph)
Definition: cam_iosched.c:1136
void cam_iosched_put_back_trim(struct cam_iosched_softc *isc, struct bio *bp)
Definition: cam_iosched.c:1393
int cam_iosched_bio_complete(struct cam_iosched_softc *isc, struct bio *bp, union ccb *done_ccb)
Definition: cam_iosched.c:1693
void cam_iosched_sysctl_init(struct cam_iosched_softc *isc, struct sysctl_ctx_list *ctx, struct sysctl_oid *node)
Definition: cam_iosched.c:1201
void cam_iosched_schedule(struct cam_iosched_softc *isc, struct cam_periph *periph)
Definition: cam_iosched.c:1671
void cam_iosched_set_sort_queue(struct cam_iosched_softc *isc, int val)
Definition: cam_iosched.c:1761
void cam_iosched_submit_trim(struct cam_iosched_softc *isc)
Definition: cam_iosched.c:1751
struct bio * cam_iosched_next_bio(struct cam_iosched_softc *isc)
Definition: cam_iosched.c:1505
int cam_iosched_has_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
Definition: cam_iosched.c:1768
void cam_iosched_queue_work(struct cam_iosched_softc *isc, struct bio *bp)
Definition: cam_iosched.c:1574
void cam_periph_release_locked(struct cam_periph *periph)
Definition: cam_periph.c:453
u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, u_int32_t openings, u_int32_t arg, int getcount_only)
Definition: cam_periph.c:1342
int cam_periph_acquire(struct cam_periph *periph)
Definition: cam_periph.c:413
void cam_periph_async(struct cam_periph *periph, u_int32_t code, struct cam_path *path, void *arg)
Definition: cam_periph.c:1471
void cam_periph_release(struct cam_periph *periph)
Definition: cam_periph.c:465
void cam_periph_unhold(struct cam_periph *periph)
Definition: cam_periph.c:519
int cam_periph_hold(struct cam_periph *periph, int priority)
Definition: cam_periph.c:486
int cam_periph_runccb(union ccb *ccb, int(*error_routine)(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags), cam_flags camflags, u_int32_t sense_flags, struct devstat *ds)
Definition: cam_periph.c:1207
int cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
Definition: cam_periph.c:2182
void cam_periph_invalidate(struct cam_periph *periph)
Definition: cam_periph.c:655
int cam_periph_error(union ccb *ccb, cam_flags camflags, u_int32_t sense_flags)
Definition: cam_periph.c:1864
cam_status cam_periph_alloc(periph_ctor_t *periph_ctor, periph_oninv_t *periph_oninvalidate, periph_dtor_t *periph_dtor, periph_start_t *periph_start, char *name, cam_periph_type type, struct cam_path *path, ac_callback_t *ac_callback, ac_code code, void *arg)
Definition: cam_periph.c:197
cam_status periph_ctor_t(struct cam_periph *periph, void *arg)
Definition: cam_periph.h:115
#define CAM_PERIPH_INVALID
Definition: cam_periph.h:133
void periph_oninv_t(struct cam_periph *periph)
Definition: cam_periph.h:117
#define cam_periph_assert(periph, what)
Definition: cam_periph.h:230
#define CAM_PERIPH_FOREACH(periph, driver)
Definition: cam_periph.h:268
#define cam_periph_lock(periph)
Definition: cam_periph.h:224
union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
Definition: cam_xpt.c:4695
#define cam_periph_owned(periph)
Definition: cam_periph.h:221
#define cam_periph_unlock(periph)
Definition: cam_periph.h:227
static __inline struct mtx * cam_periph_mtx(struct cam_periph *periph)
Definition: cam_periph.h:213
void periph_dtor_t(struct cam_periph *periph)
Definition: cam_periph.h:118
void periph_start_t(struct cam_periph *periph, union ccb *start_ccb)
Definition: cam_periph.h:113
#define cam_periph_sleep(periph, chan, priority, wmesg, timo)
Definition: cam_periph.h:233
@ CAM_PERIPH_BIO
Definition: cam_periph.h:104
void() periph_init_t(void)
Definition: cam_periph.h:85
TAILQ_HEAD(ccb_hdr_tailq, ccb_hdr)
LIST_HEAD(ccb_hdr_list, ccb_hdr)
static __inline bool cam_sim_pollable(const struct cam_sim *sim)
Definition: cam_sim.h:139
void xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
Definition: cam_xpt.c:3243
int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
Definition: cam_xpt.c:1243
void xpt_print(struct cam_path *path, const char *fmt,...)
Definition: cam_xpt.c:3814
void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
Definition: cam_xpt.c:3520
void xpt_action(union ccb *start_ccb)
Definition: cam_xpt.c:2601
void xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, int quirks, char *bit_string)
Definition: cam_xpt.c:1160
cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path)
Definition: cam_xpt.c:5213
struct cam_periph * xpt_path_periph(struct cam_path *path)
Definition: cam_xpt.c:3911
void xpt_release_ccb(union ccb *free_ccb)
Definition: cam_xpt.c:3924
void xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, char *announce_string)
Definition: cam_xpt.c:1093
static void xpt_path_inq(struct ccb_pathinq *cpi, struct cam_path *path)
Definition: cam_xpt.h:156
union ccb * ccb
Definition: mmc_sim_if.m:53
struct ccb_trans_settings_mmc * cts
Definition: mmc_sim_if.m:43
void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8317
void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc)
Definition: scsi_all.c:3361
void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8659
void scsi_sense_print(struct ccb_scsiio *csio)
Definition: scsi_all.c:5161
int scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, uint16_t sector_count, uint64_t lba, uint8_t command, uint8_t device, uint8_t icc, uint32_t auxiliary, uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8444
void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:7845
int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
Definition: scsi_all.c:9163
void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *rcap_buf, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:7871
void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8215
void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8284
int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq)
Definition: scsi_all.c:5214
void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:7595
void scsi_mode_sense_len(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int dbd, uint8_t pc, uint8_t page, uint8_t *param_buf, uint32_t param_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout)
Definition: scsi_all.c:7636
int scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t log_address, uint32_t page_number, uint16_t block_count, uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_all.c:8342
void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8074
int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id)
Definition: scsi_all.c:9277
void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:8101
void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout)
Definition: scsi_all.c:7547
void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_all.c:7896
#define SVPD_DM_ZBC
Definition: scsi_all.h:2814
#define T_OPTICAL
Definition: scsi_all.h:2177
#define SSD_KEY_UNIT_ATTENTION
Definition: scsi_all.h:3282
#define SID_IS_REMOVABLE(inq_data)
Definition: scsi_all.h:2217
#define PR_ALLOW
Definition: scsi_all.h:946
#define SCSI_RW_BIO
Definition: scsi_all.h:4234
#define SVPD_BLOCK_LIMITS
Definition: scsi_all.h:2863
#define AP_PROTO_PIO_IN
Definition: scsi_all.h:1438
#define SVPD_LBP_WS16
Definition: scsi_all.h:2841
#define AP_FLAG_TDIR_FROM_DEV
Definition: scsi_all.h:2037
#define SSD_DESC_CURRENT_ERROR
Definition: scsi_all.h:3325
#define SSD_MIN_SIZE
Definition: scsi_all.h:3256
#define READ_6
Definition: scsi_all.h:2083
#define AP_PROTO_NON_DATA
Definition: scsi_all.h:1437
#define WRITE_6
Definition: scsi_all.h:2084
#define SVPD_LBP_WS10
Definition: scsi_all.h:2842
static __inline uint32_t scsi_3btoul(const uint8_t *bytes)
Definition: scsi_all.h:4425
#define SVPD_LBP
Definition: scsi_all.h:2835
static __inline uint64_t scsi_8btou64(const uint8_t *bytes)
Definition: scsi_all.h:4459
#define AP_PROTO_FPDMA
Definition: scsi_all.h:1446
static __inline uint32_t scsi_2btoul(const uint8_t *bytes)
Definition: scsi_all.h:4415
#define SMPH_PC_MASK
Definition: scsi_all.h:3666
#define SIP_MEDIA_REMOVABLE
Definition: scsi_all.h:3706
#define T_RBC
Definition: scsi_all.h:2184
#define T_DIRECT
Definition: scsi_all.h:2170
#define SVPD_LBP_UNMAP
Definition: scsi_all.h:2840
#define SRC16_PROT_EN
Definition: scsi_all.h:2943
#define SIP_MEDIA_FIXED
Definition: scsi_all.h:3707
#define SVPD_ZBDC_URSWRZ
Definition: scsi_all.h:2897
#define SRC16_P_TYPE_SHIFT
Definition: scsi_all.h:2945
#define SCSI_RW_READ
Definition: scsi_all.h:4231
#define SYNCHRONIZE_CACHE
Definition: scsi_all.h:2101
#define SMS_ALL_PAGES_PAGE
Definition: scsi_all.h:202
#define AP_FLAG_TLEN_FEAT
Definition: scsi_all.h:2031
static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes)
Definition: scsi_all.h:4374
#define SRC16_LBPME_A
Definition: scsi_all.h:2963
#define SVPD_NON_ROTATING
Definition: scsi_all.h:2804
#define AP_FLAG_TLEN_NO_DATA
Definition: scsi_all.h:2030
#define SSD_KEY_NOT_READY
Definition: scsi_all.h:3278
#define SVPD_ZBDC_PL
Definition: scsi_all.h:2895
#define SVPD_BDC
Definition: scsi_all.h:2800
#define AP_FLAG_BYT_BLOK_BLOCKS
Definition: scsi_all.h:2035
static __inline uint32_t scsi_4btoul(const uint8_t *bytes)
Definition: scsi_all.h:4447
#define SID_QUAL_LU_CONNECTED
Definition: scsi_all.h:2208
#define SMS_PAGE_CTRL_CURRENT
Definition: scsi_all.h:204
#define AP_EXTEND
Definition: scsi_all.h:2028
#define PREVENT_ALLOW
Definition: scsi_all.h:2094
#define SWS_UNMAP
Definition: scsi_all.h:1335
#define SRC16_LBPPBE
Definition: scsi_all.h:2950
#define AP_FLAG_TDIR_TO_DEV
Definition: scsi_all.h:2036
#define SVPD_ZONED_BDC
Definition: scsi_all.h:2893
static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes)
Definition: scsi_all.h:4391
#define AP_PROTO_DMA
Definition: scsi_all.h:1440
static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes)
Definition: scsi_all.h:4401
#define WRITE_10
Definition: scsi_all.h:2097
#define SID_TYPE(inq_data)
Definition: scsi_all.h:2206
#define SVPD_HAW_ZBC
Definition: scsi_all.h:2813
#define SVPD_ZBC_NR
Definition: scsi_all.h:2812
#define SCSI_RW_WRITE
Definition: scsi_all.h:4232
#define T_ZBC_HM
Definition: scsi_all.h:2188
#define SCSI_REV_SPC3
Definition: scsi_all.h:2225
#define PR_PREVENT
Definition: scsi_all.h:945
#define SID_ANSI_REV(inq_data)
Definition: scsi_all.h:2219
#define SVPD_ATA_INFORMATION
Definition: scsi_all.h:2637
#define SRC16_P_TYPE
Definition: scsi_all.h:2944
#define SBDC_IS_PRESENT(bdc, length, field)
Definition: scsi_all.h:2823
#define SID_QUAL(inq_data)
Definition: scsi_all.h:2207
#define READ_10
Definition: scsi_all.h:2096
#define SSD_KEY_ILLEGAL_REQUEST
Definition: scsi_all.h:3281
#define SSD_FULL_SIZE
Definition: scsi_all.h:3251
#define SVPD_ZBC_MASK
Definition: scsi_all.h:2815
#define READ_DEFECT_DATA_10
Definition: scsi_all.h:2102
#define AP_FLAG_TLEN_SECT_CNT
Definition: scsi_all.h:2032
#define SRC16_LALBA_A
Definition: scsi_all.h:2961
#define SSD_CURRENT_ERROR
Definition: scsi_all.h:3270
static da_delete_func_t da_delete_trim
Definition: scsi_da.c:280
static int cmd6workaround(union ccb *ccb)
Definition: scsi_da.c:4282
#define DA_DEFAULT_SEND_ORDERED
Definition: scsi_da.c:1553
static void dashutdown(void *arg, int howto)
Definition: scsi_da.c:6260
da_zone_flags
Definition: scsi_da.c:247
@ DA_ZONE_FLAG_FINISH_SUP
Definition: scsi_da.c:251
@ DA_ZONE_FLAG_RZ_SUP
Definition: scsi_da.c:248
@ DA_ZONE_FLAG_CLOSE_SUP
Definition: scsi_da.c:250
@ DA_ZONE_FLAG_RWP_SUP
Definition: scsi_da.c:252
@ DA_ZONE_FLAG_OPEN_SUP
Definition: scsi_da.c:249
@ DA_ZONE_FLAG_OPT_NONSEQ_SET
Definition: scsi_da.c:260
@ DA_ZONE_FLAG_OPT_SEQ_SET
Definition: scsi_da.c:259
@ DA_ZONE_FLAG_URSWRZ
Definition: scsi_da.c:258
@ DA_ZONE_FLAG_SUP_MASK
Definition: scsi_da.c:253
@ DA_ZONE_FLAG_MAX_SEQ_SET
Definition: scsi_da.c:261
@ DA_ZONE_FLAG_SET_MASK
Definition: scsi_da.c:262
static int da_send_ordered
Definition: scsi_da.c:1560
da_ref_token
Definition: scsi_da.c:328
@ DA_REF_TUR
Definition: scsi_da.c:333
@ DA_REF_GEOM
Definition: scsi_da.c:334
@ DA_REF_OPEN
Definition: scsi_da.c:329
@ DA_REF_REPROBE
Definition: scsi_da.c:336
@ DA_REF_PROBE_HOLD
Definition: scsi_da.c:332
@ DA_REF_MAX
Definition: scsi_da.c:337
@ DA_REF_CLOSE_HOLD
Definition: scsi_da.c:331
@ DA_REF_OPEN_HOLD
Definition: scsi_da.c:330
@ DA_REF_SYSCTL
Definition: scsi_da.c:335
#define ATA_TRIM_MAX_RANGES
Definition: scsi_da.c:323
static void dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:4690
static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
Definition: scsi_da.c:2510
static struct da_quirk_entry da_quirk_table[]
Definition: scsi_da.c:417
static sbintime_t da_default_softtimeout
Definition: scsi_da.c:1559
static periph_init_t dainit
Definition: scsi_da.c:1473
static int da_zone_bio_to_scsi(int disk_zone_cmd)
Definition: scsi_da.c:3021
static periph_oninv_t daoninvalidate
Definition: scsi_da.c:1496
da_zone_mode
Definition: scsi_da.c:226
@ DA_ZONE_HOST_AWARE
Definition: scsi_da.c:229
@ DA_ZONE_NONE
Definition: scsi_da.c:227
@ DA_ZONE_DRIVE_MANAGED
Definition: scsi_da.c:228
@ DA_ZONE_HOST_MANAGED
Definition: scsi_da.c:230
static void dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5311
static struct da_zone_desc da_zone_desc_table[]
#define WS16_MAX_BLKS
Definition: scsi_da.c:322
void da_delete_func_t(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
Definition: scsi_da.c:278
PERIPHDRIVER_DECLARE(da, dadriver)
static void dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5532
static void daprevent(struct cam_periph *periph, int action)
Definition: scsi_da.c:6067
#define DA_ORDEREDTAG_INTERVAL
Definition: scsi_da.c:1602
da_state
Definition: scsi_da.c:89
@ DA_STATE_PROBE_ZONE
Definition: scsi_da.c:101
@ DA_STATE_NORMAL
Definition: scsi_da.c:102
@ DA_STATE_PROBE_ATA_SUP
Definition: scsi_da.c:99
@ DA_STATE_PROBE_RC16
Definition: scsi_da.c:92
@ DA_STATE_PROBE_BDC
Definition: scsi_da.c:95
@ DA_STATE_PROBE_ATA_ZONE
Definition: scsi_da.c:100
@ DA_STATE_PROBE_ATA_IDDIR
Definition: scsi_da.c:98
@ DA_STATE_PROBE_ATA
Definition: scsi_da.c:96
@ DA_STATE_PROBE_LBP
Definition: scsi_da.c:93
@ DA_STATE_PROBE_ATA_LOGDIR
Definition: scsi_da.c:97
@ DA_STATE_PROBE_BLK_LIMITS
Definition: scsi_da.c:94
@ DA_STATE_PROBE_RC
Definition: scsi_da.c:91
@ DA_STATE_PROBE_WP
Definition: scsi_da.c:90
static int da_enable_biospeedup
Definition: scsi_da.c:1562
static int da_disable_wp_detection
Definition: scsi_da.c:1561
static disk_strategy_t dastrategy
Definition: scsi_da.c:1471
da_flags
Definition: scsi_da.c:105
@ DA_FLAG_DIRTY
Definition: scsi_da.c:118
@ DA_FLAG_PACK_INVALID
Definition: scsi_da.c:106
@ DA_FLAG_SCTX_INIT
Definition: scsi_da.c:115
@ DA_FLAG_CAN_ATA_SUPCAP
Definition: scsi_da.c:123
@ DA_FLAG_TUR_PENDING
Definition: scsi_da.c:125
@ DA_FLAG_PACK_REMOVABLE
Definition: scsi_da.c:109
@ DA_FLAG_ANNOUNCED
Definition: scsi_da.c:119
@ DA_FLAG_NEW_PACK
Definition: scsi_da.c:107
@ DA_FLAG_UNMAPPEDIO
Definition: scsi_da.c:126
@ DA_FLAG_CAN_ATA_DMA
Definition: scsi_da.c:120
@ DA_FLAG_OPEN
Definition: scsi_da.c:114
@ DA_FLAG_WAS_OTAG
Definition: scsi_da.c:112
@ DA_FLAG_PROBED
Definition: scsi_da.c:117
@ DA_FLAG_CAN_RC16
Definition: scsi_da.c:116
@ DA_FLAG_CAN_ATA_ZONE
Definition: scsi_da.c:124
@ DA_FLAG_PACK_LOCKED
Definition: scsi_da.c:108
@ DA_FLAG_ROTATING
Definition: scsi_da.c:110
@ DA_FLAG_CAN_ATA_IDLOG
Definition: scsi_da.c:122
@ DA_FLAG_CAN_ATA_LOG
Definition: scsi_da.c:121
@ DA_FLAG_NEED_OTAG
Definition: scsi_da.c:111
@ DA_FLAG_RETRY_UA
Definition: scsi_da.c:113
#define DA_DEFAULT_RETRY
Definition: scsi_da.c:1549
da_ccb_state
Definition: scsi_da.c:179
@ DA_CCB_DELETE
Definition: scsi_da.c:188
@ DA_CCB_TUR
Definition: scsi_da.c:189
@ DA_CCB_PROBE_ATA
Definition: scsi_da.c:185
@ DA_CCB_PROBE_RC16
Definition: scsi_da.c:181
@ DA_CCB_RETRY_UA
Definition: scsi_da.c:197
@ DA_CCB_PROBE_BLK_LIMITS
Definition: scsi_da.c:183
@ DA_CCB_DUMP
Definition: scsi_da.c:187
@ DA_CCB_PROBE_ATA_ZONE
Definition: scsi_da.c:194
@ DA_CCB_PROBE_WP
Definition: scsi_da.c:195
@ DA_CCB_PROBE_ATA_LOGDIR
Definition: scsi_da.c:191
@ DA_CCB_PROBE_ATA_SUP
Definition: scsi_da.c:193
@ DA_CCB_PROBE_RC
Definition: scsi_da.c:180
@ DA_CCB_PROBE_LBP
Definition: scsi_da.c:182
@ DA_CCB_TYPE_MASK
Definition: scsi_da.c:196
@ DA_CCB_PROBE_ZONE
Definition: scsi_da.c:190
@ DA_CCB_PROBE_BDC
Definition: scsi_da.c:184
@ DA_CCB_BUFFER_IO
Definition: scsi_da.c:186
@ DA_CCB_PROBE_ATA_IDDIR
Definition: scsi_da.c:192
static callout_func_t dasendorderedtag
Definition: scsi_da.c:1532
#define DA_ANNOUNCETMP_SZ
static void dadone(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:4532
static void dasysctlinit(void *context, int pending)
Definition: scsi_da.c:2241
int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_da.c:6484
#define UNMAP_HEAD_SIZE
Definition: scsi_da.c:315
int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int use_ncq, uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_da.c:6595
static void daschedule(struct cam_periph *periph)
Definition: scsi_da.c:1858
static da_delete_func_t da_delete_ws
Definition: scsi_da.c:282
static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2670
static int daclose(struct disk *dp)
Definition: scsi_da.c:1804
#define da_periph_acquire(periph, token)
Definition: scsi_da.c:1746
static int dabitsysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2637
#define da_periph_hold(periph, prio, token)
Definition: scsi_da.c:1744
static void dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5907
#define DA_DEFAULT_SOFTTIMEOUT
Definition: scsi_da.c:1545
static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
Definition: scsi_da.c:5956
static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
Definition: scsi_da.c:2610
static dumper_t dadump
Definition: scsi_da.c:1472
static void dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5623
static void dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5117
da_quirks
Definition: scsi_da.c:152
@ DA_Q_RETRY_BUSY
Definition: scsi_da.c:160
@ DA_Q_NO_UNMAP
Definition: scsi_da.c:159
@ DA_Q_STRICT_UNMAP
Definition: scsi_da.c:162
@ DA_Q_128KB
Definition: scsi_da.c:163
@ DA_Q_NO_RC16
Definition: scsi_da.c:158
@ DA_Q_SMR_DM
Definition: scsi_da.c:161
@ DA_Q_4K
Definition: scsi_da.c:157
@ DA_Q_NO_SYNC_CACHE
Definition: scsi_da.c:154
@ DA_Q_NONE
Definition: scsi_da.c:153
@ DA_Q_NO_6_BYTE
Definition: scsi_da.c:155
@ DA_Q_NO_PREVENT
Definition: scsi_da.c:156
static const void * da_delete_functions[]
Definition: scsi_da.c:284
#define DA_DEFAULT_TIMEOUT
Definition: scsi_da.c:1541
static int dazonesupsysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2730
static const char microp[]
Definition: scsi_da.c:415
static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
Definition: scsi_da.c:2104
void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_da.c:6455
TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout)
static void dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5736
#define DA_FLAG_STRING
Definition: scsi_da.c:128
static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers")
static struct periph_driver dadriver
Definition: scsi_da.c:1605
SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds")
__FBSDID("$FreeBSD$")
static da_delete_func_t da_delete_unmap
Definition: scsi_da.c:281
#define UNMAP_MAX_RANGES
Definition: scsi_da.c:317
static callout_func_t damediapoll
Definition: scsi_da.c:1534
static void dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:4770
#define da_periph_release_locked(periph, token)
Definition: scsi_da.c:1748
static periph_dtor_t dacleanup
Definition: scsi_da.c:1494
#define UNMAP_RANGE_MAX
Definition: scsi_da.c:314
static void dazonedone(struct cam_periph *periph, union ccb *ccb)
Definition: scsi_da.c:4386
static void daprobedone(struct cam_periph *periph, union ccb *ccb)
Definition: scsi_da.c:2551
static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2490
static int da_default_timeout
Definition: scsi_da.c:1558
void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, void(*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t service_action, uint64_t zone_id, uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, uint32_t timeout)
Definition: scsi_da.c:6428
static uma_zone_t da_ccb_zone
Definition: scsi_da.c:407
static int da_poll_period
Definition: scsi_da.c:1556
static int daflagssysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2652
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "CAM Direct Access Disk driver")
#define DA_WORK_TUR
Definition: scsi_da.c:326
SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT|CTLFLAG_RW|CTLFLAG_MPSAFE, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)")
static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2435
static int da_enable_uma_ccbs
Definition: scsi_da.c:1563
static void dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5830
#define dadeleteflag(softc, delete_method, enable)
Definition: scsi_da.c:400
da_zone_interface
Definition: scsi_da.c:241
@ DA_ZONE_IF_SCSI
Definition: scsi_da.c:242
@ DA_ZONE_IF_ATA_SAT
Definition: scsi_da.c:244
@ DA_ZONE_IF_ATA_PASS
Definition: scsi_da.c:243
static void dadiskgonecb(struct disk *dp)
Definition: scsi_da.c:2031
static const char * da_delete_method_desc[]
Definition: scsi_da.c:296
static void dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5059
da_delete_methods
Definition: scsi_da.c:209
@ DA_DELETE_WS16
Definition: scsi_da.c:214
@ DA_DELETE_NONE
Definition: scsi_da.c:210
@ DA_DELETE_DISABLE
Definition: scsi_da.c:211
@ DA_DELETE_ZERO
Definition: scsi_da.c:216
@ DA_DELETE_UNMAP
Definition: scsi_da.c:213
@ DA_DELETE_MAX
Definition: scsi_da.c:218
@ DA_DELETE_WS10
Definition: scsi_da.c:215
@ DA_DELETE_ATA_TRIM
Definition: scsi_da.c:212
@ DA_DELETE_MIN
Definition: scsi_da.c:217
static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2458
static int daopen(struct disk *dp)
Definition: scsi_da.c:1752
static periph_start_t dastart
Definition: scsi_da.c:1495
static void dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5451
static const char * da_delete_method_names[]
Definition: scsi_da.c:294
#define DA_Q_BIT_STRING
Definition: scsi_da.c:166
static int da_retry_count
Definition: scsi_da.c:1557
#define UNMAP_BUF_SIZE
Definition: scsi_da.c:318
static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size)
Definition: scsi_da.c:6107
#define WS10_MAX_BLKS
Definition: scsi_da.c:321
#define da_periph_release(periph, token)
Definition: scsi_da.c:1747
static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
Definition: scsi_da.c:2524
#define DA_DEFAULT_POLL_PERIOD
Definition: scsi_da.c:1537
static void dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
Definition: scsi_da.c:5211
static const char quantum[]
Definition: scsi_da.c:414
static int da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, int *queue_ccb)
Definition: scsi_da.c:3038
static void dareprobe(struct cam_periph *periph)
Definition: scsi_da.c:5935
static int dagetattr(struct bio *bp)
Definition: scsi_da.c:1982
static int dazonemodesysctl(SYSCTL_HANDLER_ARGS)
Definition: scsi_da.c:2700
#define da_periph_unhold(periph, token)
Definition: scsi_da.c:1745
#define DA_ANNOUNCE_SZ
static periph_ctor_t daregister
Definition: scsi_da.c:1493
#define ZBC_IN_SA_REPORT_ZONES
Definition: scsi_da.h:175
#define SRZ_SAME_MASK
Definition: scsi_da.h:228
#define SRZ_ZONE_COND_SHIFT
Definition: scsi_da.h:202
#define ZBC_OUT_SA_OPEN
Definition: scsi_da.h:162
#define SRZ_ZONE_RESET
Definition: scsi_da.h:213
#define SANITIZE
Definition: scsi_da.h:247
#define ZBC_OUT
Definition: scsi_da.h:248
#define ZBC_IN
Definition: scsi_da.h:249
#define SRZ_ZONE_COND_MASK
Definition: scsi_da.h:203
#define ZBC_OUT_ALL
Definition: scsi_da.h:167
#define FORMAT_UNIT
Definition: scsi_da.h:239
#define ZBC_OUT_SA_RWP
Definition: scsi_da.h:163
#define SRZ_ZONE_NON_SEQ
Definition: scsi_da.h:212
#define SRZ_TYPE_MASK
Definition: scsi_da.h:200
#define ZBC_OUT_SA_FINISH
Definition: scsi_da.h:161
#define READ_DEFECT_DATA_12
Definition: scsi_da.h:250
#define ZBC_OUT_SA_CLOSE
Definition: scsi_da.h:160
#define SRDD10_MAX_LENGTH
Definition: scsi_da.h:110
#define MSG_ORDERED_Q_TAG
Definition: scsi_message.h:39
#define MSG_SIMPLE_Q_TAG
Definition: scsi_message.h:35
struct ata_params ident_data
struct cam_ed * device
char * periph_name
Definition: cam_periph.h:123
u_int32_t unit_number
Definition: cam_periph.h:127
uma_zone_t ccb_zone
Definition: cam_periph.h:152
void * softc
Definition: cam_periph.h:125
struct cam_path * path
Definition: cam_periph.h:124
u_int32_t flags
Definition: cam_periph.h:129
struct cam_sim * sim
Definition: cam_periph.h:126
u_int32_t priority
Definition: cam.h:86
u_int32_t block_size
Definition: cam_ccb.h:1136
u_int64_t volume_size
Definition: cam_ccb.h:1137
u_int32_t cylinders
Definition: cam_ccb.h:1138
u_int8_t heads
Definition: cam_ccb.h:1139
struct ccb_hdr ccb_h
Definition: cam_ccb.h:1135
u_int8_t secs_per_track
Definition: cam_ccb.h:1140
struct ccb_hdr ccb_h
Definition: cam_ccb.h:1309
uint32_t buftype
Definition: cam_ccb.h:1313
uint8_t * buf
Definition: cam_ccb.h:1326
uint32_t flags
Definition: cam_ccb.h:1310
cam_proto protocol
Definition: cam_ccb.h:380
struct ccb_hdr ccb_h
Definition: cam_ccb.h:379
struct scsi_inquiry_data inq_data
Definition: cam_ccb.h:381
u_int8_t inq_flags
Definition: cam_ccb.h:384
u_int32_t flags
Definition: cam_ccb.h:368
struct cam_path * path
Definition: cam_ccb.h:364
cam_pinfo pinfo
Definition: cam_ccb.h:349
xpt_opcode func_code
Definition: cam_ccb.h:362
u_int32_t status
Definition: cam_ccb.h:363
struct timeval softtimeout
Definition: cam_ccb.h:374
u_int32_t unit_number
Definition: cam_ccb.h:677
u_int16_t hba_vendor
Definition: cam_ccb.h:692
char dev_name[DEV_IDLEN]
Definition: cam_ccb.h:676
u_int32_t hba_misc
Definition: cam_ccb.h:665
u_int16_t hba_device
Definition: cam_ccb.h:693
struct ccb_hdr ccb_h
Definition: cam_ccb.h:661
cam_xport transport
Definition: cam_ccb.h:682
u_int16_t hba_subvendor
Definition: cam_ccb.h:694
u_int maxio
Definition: cam_ccb.h:691
u_int16_t hba_subdevice
Definition: cam_ccb.h:695
cdb_t cdb_io
Definition: cam_ccb.h:763
struct ccb_hdr ccb_h
Definition: cam_ccb.h:750
uint8_t priority
Definition: cam_ccb.h:773
u_int8_t * data_ptr
Definition: cam_ccb.h:753
u_int32_t dxfer_len
Definition: cam_ccb.h:754
u_int8_t cdb_len
Definition: cam_ccb.h:758
u_int32_t resid
Definition: cam_ccb.h:762
Definition: scsi_da.c:409
struct scsi_inquiry_pattern inq_pat
Definition: scsi_da.c:410
da_quirks quirks
Definition: scsi_da.c:411
struct bio_queue_head delete_run_queue
Definition: scsi_da.c:342
struct cam_iosched_softc * cam_iosched
Definition: scsi_da.c:341
da_zone_flags value
Definition: scsi_da.c:268
const char * desc
Definition: scsi_da.c:269
u_int32_t secsize
Definition: ata_da.c:239
u_int8_t heads
Definition: ata_da.c:236
u_int32_t cylinders
Definition: ata_da.c:238
u_int8_t secs_per_track
Definition: ata_da.c:237
u_int stripeoffset
Definition: scsi_da.c:311
u_int64_t sectors
Definition: ata_da.c:240
u_int stripesize
Definition: scsi_da.c:310
u_int8_t opcode
Definition: scsi_da.h:72
u_int8_t interleave[2]
Definition: scsi_da.h:81
u_int8_t byte2
Definition: scsi_da.h:73
char vendor[SID_VENDOR_SIZE]
Definition: scsi_all.h:2269
char product[SID_PRODUCT_SIZE]
Definition: scsi_all.h:2271
u_int8_t data_length[2]
Definition: scsi_all.h:3652
u_int8_t blk_desc_len[2]
Definition: scsi_all.h:3658
u_int8_t dev_spec
Definition: scsi_all.h:3646
u_int8_t blk_desc_len
Definition: scsi_all.h:3647
u_int8_t data_length
Definition: scsi_all.h:3644
uint8_t alloc_length[2]
Definition: scsi_da.h:109
uint8_t address_descriptor_index[4]
Definition: scsi_da.h:149
uint8_t alloc_length[4]
Definition: scsi_da.h:150
uint8_t write_pointer_lba[8]
Definition: scsi_da.h:217
uint8_t zone_length[8]
Definition: scsi_da.h:215
uint8_t zone_start_lba[8]
Definition: scsi_da.h:216
struct scsi_report_zones_desc desc_list[]
Definition: scsi_da.h:232
uint8_t maximum_lba[8]
Definition: scsi_da.h:230
uint8_t length[4]
Definition: scsi_da.h:222
u_int8_t length[2]
Definition: scsi_all.h:1288
u_int8_t control
Definition: scsi_all.h:1289
u_int8_t addr[4]
Definition: scsi_all.h:1286
u_int8_t reserved
Definition: scsi_all.h:1287
u_int8_t opcode
Definition: scsi_all.h:1279
u_int8_t byte2
Definition: scsi_all.h:1285
u_int8_t opcode
Definition: scsi_all.h:1269
u_int8_t control
Definition: scsi_all.h:1274
u_int8_t addr[3]
Definition: scsi_all.h:1270
u_int8_t length
Definition: scsi_all.h:1273
u_int8_t opcode
Definition: scsi_da.h:116
u_int8_t byte2
Definition: scsi_da.h:117
u_int8_t length[2]
Definition: scsi_da.h:125
u_int8_t control
Definition: scsi_da.h:126
u_int8_t max_write_same_length[8]
Definition: scsi_all.h:2878
u_int8_t opt_unmap_grain[4]
Definition: scsi_all.h:2876
u_int8_t max_txfer_len[4]
Definition: scsi_all.h:2871
u_int8_t max_unmap_blk_cnt[4]
Definition: scsi_all.h:2875
u_int8_t unmap_grain_align[4]
Definition: scsi_all.h:2877
u_int8_t max_unmap_lba_cnt[4]
Definition: scsi_all.h:2874
uint8_t optimal_seq_zones[4]
Definition: scsi_all.h:2899
uint8_t max_seq_req_zones[4]
Definition: scsi_all.h:2903
uint8_t page_length[2]
Definition: scsi_all.h:2894
uint8_t optimal_nonseq_zones[4]
Definition: scsi_all.h:2901
uint8_t zone_options
Definition: scsi_da.h:178
uint8_t zone_start_lba[8]
Definition: scsi_da.h:176
uint8_t opcode
Definition: scsi_da.h:173
uint8_t service_action
Definition: scsi_da.h:174
uint8_t length[4]
Definition: scsi_da.h:177
uint8_t zone_flags
Definition: scsi_da.h:166
uint8_t zone_id[8]
Definition: scsi_da.h:164
uint8_t opcode
Definition: scsi_da.h:158
uint8_t service_action
Definition: scsi_da.h:159
Definition: cam_ccb.h:1345
struct ccb_hdr ccb_h
Definition: cam_ccb.h:1346
struct ccb_scsiio csio
Definition: cam_ccb.h:1347
u_int8_t cdb_bytes[IOCDBLEN]
Definition: cam_ccb.h:742