FreeBSD kernel /amd64 XEN device code
blkfront.c
Go to the documentation of this file.
1/*
2 * XenBSD block device driver
3 *
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
11 *
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/kernel.h>
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <sys/bio.h>
42#include <sys/bus.h>
43#include <sys/conf.h>
44#include <sys/module.h>
45#include <sys/sysctl.h>
46
47#include <machine/bus.h>
48#include <sys/rman.h>
49#include <machine/resource.h>
50#include <machine/vmparam.h>
51
52#include <xen/xen-os.h>
53#include <xen/hypervisor.h>
54#include <xen/xen_intr.h>
55#include <xen/gnttab.h>
56#include <contrib/xen/grant_table.h>
57#include <contrib/xen/io/protocols.h>
58#include <xen/xenbus/xenbusvar.h>
59
60#include <machine/_inttypes.h>
61
62#include <geom/geom_disk.h>
63
65
66#include "xenbus_if.h"
67
68/*--------------------------- Forward Declarations ---------------------------*/
69static void xbd_closing(device_t);
70static void xbd_startio(struct xbd_softc *sc);
71
72/*---------------------------------- Macros ----------------------------------*/
73#if 0
74#define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args)
75#else
76#define DPRINTK(fmt, args...)
77#endif
78
79#define XBD_SECTOR_SHFT 9
80
81/*---------------------------- Global Static Data ----------------------------*/
82static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
83
84static int xbd_enable_indirect = 1;
85SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
86 "xbd driver parameters");
87SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN,
88 &xbd_enable_indirect, 0, "Enable xbd indirect segments");
89
90/*---------------------------- Command Processing ----------------------------*/
91static void
92xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
93{
94 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
95 return;
96
97 sc->xbd_flags |= xbd_flag;
98 sc->xbd_qfrozen_cnt++;
99}
100
101static void
102xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
103{
104 if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
105 return;
106
107 if (sc->xbd_qfrozen_cnt == 0)
108 panic("%s: Thaw with flag 0x%x while not frozen.",
109 __func__, xbd_flag);
110
111 sc->xbd_flags &= ~xbd_flag;
112 sc->xbd_qfrozen_cnt--;
113}
114
115static void
116xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
117{
118 if ((cm->cm_flags & XBDCF_FROZEN) != 0)
119 return;
120
121 cm->cm_flags |= XBDCF_FROZEN|cm_flag;
123}
124
125static void
126xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
127{
128 if ((cm->cm_flags & XBDCF_FROZEN) == 0)
129 return;
130
131 cm->cm_flags &= ~XBDCF_FROZEN;
132 xbd_thaw(sc, XBDF_NONE);
133}
134
135static inline void
137{
138 int notify;
139
140 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify);
141
142 if (notify)
143 xen_intr_signal(sc->xen_intr_handle);
144}
145
146static void
148{
149
150 KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
151 ("Freeing command that is still on queue %d.",
152 cm->cm_flags & XBDCF_Q_MASK));
153
155 cm->cm_bp = NULL;
156 cm->cm_complete = NULL;
159}
160
161static void
162xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
163 grant_ref_t * gref_head, int otherend_id, int readonly,
164 grant_ref_t * sg_ref, struct blkif_request_segment *sg)
165{
166 struct blkif_request_segment *last_block_sg = sg + nsegs;
167 vm_paddr_t buffer_ma;
168 uint64_t fsect, lsect;
169 int ref;
170
171 while (sg < last_block_sg) {
172 KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0,
173 ("XEN disk driver I/O must be sector aligned"));
174 KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0,
175 ("XEN disk driver I/Os must be a multiple of "
176 "the sector length"));
177 buffer_ma = segs->ds_addr;
178 fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT;
179 lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1;
180
181 KASSERT(lsect <= 7, ("XEN disk driver data cannot "
182 "cross a page boundary"));
183
184 /* install a grant reference. */
185 ref = gnttab_claim_grant_reference(gref_head);
186
187 /*
188 * GNTTAB_LIST_END == 0xffffffff, but it is private
189 * to gnttab.c.
190 */
191 KASSERT(ref != ~0, ("grant_reference failed"));
192
194 ref,
195 otherend_id,
196 buffer_ma >> PAGE_SHIFT,
197 readonly);
198
199 *sg_ref = ref;
200 *sg = (struct blkif_request_segment) {
201 .gref = ref,
202 .first_sect = fsect,
203 .last_sect = lsect
204 };
205 sg++;
206 sg_ref++;
207 segs++;
208 }
209}
210
211static void
212xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
213{
214 struct xbd_softc *sc;
215 struct xbd_command *cm;
216 int op;
217
218 cm = arg;
219 sc = cm->cm_sc;
220
221 if (error) {
222 cm->cm_bp->bio_error = EIO;
223 biodone(cm->cm_bp);
225 return;
226 }
227
228 KASSERT(nsegs <= sc->xbd_max_request_segments,
229 ("Too many segments in a blkfront I/O"));
230
231 if (nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) {
232 blkif_request_t *ring_req;
233
234 /* Fill out a blkif_request_t structure. */
235 ring_req = (blkif_request_t *)
236 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
237 sc->xbd_ring.req_prod_pvt++;
238 ring_req->id = cm->cm_id;
239 ring_req->operation = cm->cm_operation;
240 ring_req->sector_number = cm->cm_sector_number;
241 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
242 ring_req->nr_segments = nsegs;
243 cm->cm_nseg = nsegs;
244 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
245 xenbus_get_otherend_id(sc->xbd_dev),
246 cm->cm_operation == BLKIF_OP_WRITE,
247 cm->cm_sg_refs, ring_req->seg);
248 } else {
249 blkif_request_indirect_t *ring_req;
250
251 /* Fill out a blkif_request_indirect_t structure. */
252 ring_req = (blkif_request_indirect_t *)
253 RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
254 sc->xbd_ring.req_prod_pvt++;
255 ring_req->id = cm->cm_id;
256 ring_req->operation = BLKIF_OP_INDIRECT;
257 ring_req->indirect_op = cm->cm_operation;
258 ring_req->sector_number = cm->cm_sector_number;
259 ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
260 ring_req->nr_segments = nsegs;
261 cm->cm_nseg = nsegs;
262 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
263 xenbus_get_otherend_id(sc->xbd_dev),
264 cm->cm_operation == BLKIF_OP_WRITE,
266 memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
267 sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages);
268 }
269
270 if (cm->cm_operation == BLKIF_OP_READ)
271 op = BUS_DMASYNC_PREREAD;
272 else if (cm->cm_operation == BLKIF_OP_WRITE)
273 op = BUS_DMASYNC_PREWRITE;
274 else
275 op = 0;
276 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
277
279
281
282 /*
283 * If bus dma had to asynchronously call us back to dispatch
284 * this command, we are no longer executing in the context of
285 * xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
286 * xbd_flush_requests() to publish this command to the backend
287 * along with any other commands that it could batch.
288 */
289 if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
291
292 return;
293}
294
295static int
297{
298 int error;
299
300 if (cm->cm_bp != NULL)
301 error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
302 cm->cm_bp, xbd_queue_cb, cm, 0);
303 else
304 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
305 cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
306 if (error == EINPROGRESS) {
307 /*
308 * Maintain queuing order by freezing the queue. The next
309 * command may not require as many resources as the command
310 * we just attempted to map, so we can't rely on bus dma
311 * blocking for it too.
312 */
314 return (0);
315 }
316
317 return (error);
318}
319
320static void
322{
323 struct xbd_softc *sc = arg;
324
325 mtx_lock(&sc->xbd_io_lock);
326
328
329 xbd_startio(sc);
330
331 mtx_unlock(&sc->xbd_io_lock);
332}
333
334static struct xbd_command *
336{
337 struct xbd_command *cm;
338 struct bio *bp;
339
340 if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED))
341 return (NULL);
342
343 bp = xbd_dequeue_bio(sc);
344 if (bp == NULL)
345 return (NULL);
346
347 if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
349 xbd_requeue_bio(sc, bp);
350 return (NULL);
351 }
352
354 &cm->cm_gref_head) != 0) {
359 xbd_requeue_bio(sc, bp);
361 return (NULL);
362 }
363
364 cm->cm_bp = bp;
365 cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
366
367 switch (bp->bio_cmd) {
368 case BIO_READ:
369 cm->cm_operation = BLKIF_OP_READ;
370 break;
371 case BIO_WRITE:
372 cm->cm_operation = BLKIF_OP_WRITE;
373 if ((bp->bio_flags & BIO_ORDERED) != 0) {
374 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
375 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
376 } else {
377 /*
378 * Single step this command.
379 */
381 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
382 /*
383 * Wait for in-flight requests to
384 * finish.
385 */
388 return (NULL);
389 }
390 }
391 }
392 break;
393 case BIO_FLUSH:
394 if ((sc->xbd_flags & XBDF_FLUSH) != 0)
395 cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
396 else if ((sc->xbd_flags & XBDF_BARRIER) != 0)
397 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
398 else
399 panic("flush request, but no flush support available");
400 break;
401 default:
402 biofinish(bp, NULL, EOPNOTSUPP);
404 return (NULL);
405 }
406
407 return (cm);
408}
409
410/*
411 * Dequeue buffers and place them in the shared communication ring.
412 * Return when no more requests can be accepted or all buffers have
413 * been queued.
414 *
415 * Signal XEN once the ring has been filled out.
416 */
417static void
419{
420 struct xbd_command *cm;
421 int error, queued = 0;
422
423 mtx_assert(&sc->xbd_io_lock, MA_OWNED);
424
426 return;
427
428 while (!RING_FULL(&sc->xbd_ring)) {
429 if (sc->xbd_qfrozen_cnt != 0)
430 break;
431
432 cm = xbd_dequeue_cm(sc, XBD_Q_READY);
433
434 if (cm == NULL)
435 cm = xbd_bio_command(sc);
436
437 if (cm == NULL)
438 break;
439
440 if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
441 /*
442 * Single step command. Future work is
443 * held off until this command completes.
444 */
446 }
447
448 if ((error = xbd_queue_request(sc, cm)) != 0) {
449 printf("xbd_queue_request returned %d\n", error);
450 break;
451 }
452 queued++;
453 }
454
455 if (queued != 0)
457}
458
459static void
461{
462 struct bio *bp;
463
464 bp = cm->cm_bp;
465
466 if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
467 disk_err(bp, "disk error" , -1, 0);
468 printf(" status: %x\n", cm->cm_status);
469 bp->bio_flags |= BIO_ERROR;
470 }
471
472 if (bp->bio_flags & BIO_ERROR)
473 bp->bio_error = EIO;
474 else
475 bp->bio_resid = 0;
476
478 biodone(bp);
479}
480
481static void
482xbd_int(void *xsc)
483{
484 struct xbd_softc *sc = xsc;
485 struct xbd_command *cm;
486 blkif_response_t *bret;
487 RING_IDX i, rp;
488 int op;
489
490 mtx_lock(&sc->xbd_io_lock);
491
492 if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
493 mtx_unlock(&sc->xbd_io_lock);
494 return;
495 }
496
497 again:
498 rp = sc->xbd_ring.sring->rsp_prod;
499 rmb(); /* Ensure we see queued responses up to 'rp'. */
500
501 for (i = sc->xbd_ring.rsp_cons; i != rp;) {
502 bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
503 cm = &sc->xbd_shadow[bret->id];
504
507 cm->cm_sg_refs);
508 i++;
509
510 if (cm->cm_operation == BLKIF_OP_READ)
511 op = BUS_DMASYNC_POSTREAD;
512 else if (cm->cm_operation == BLKIF_OP_WRITE ||
513 cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
514 op = BUS_DMASYNC_POSTWRITE;
515 else
516 op = 0;
517 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
518 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
519
520 /*
521 * Release any hold this command has on future command
522 * dispatch.
523 */
524 xbd_cm_thaw(sc, cm);
525
526 /*
527 * Directly call the i/o complete routine to save an
528 * an indirection in the common case.
529 */
530 cm->cm_status = bret->status;
531 if (cm->cm_bp)
532 xbd_bio_complete(sc, cm);
533 else if (cm->cm_complete != NULL)
534 cm->cm_complete(cm);
535 else
537 }
538
539 sc->xbd_ring.rsp_cons = i;
540
541 if (i != sc->xbd_ring.req_prod_pvt) {
542 int more_to_do;
543 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do);
544 if (more_to_do)
545 goto again;
546 } else {
547 sc->xbd_ring.sring->rsp_event = i + 1;
548 }
549
550 if (xbd_queue_length(sc, XBD_Q_BUSY) == 0)
552
553 xbd_startio(sc);
554
555 if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED))
556 wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
557
558 mtx_unlock(&sc->xbd_io_lock);
559}
560
561/*------------------------------- Dump Support -------------------------------*/
565static void
567{
568 int mtd;
569
570 // While there are outstanding requests
571 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
572 RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
573 if (mtd) {
574 /* Received request completions, update queue. */
575 xbd_int(sc);
576 }
577 if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
578 /*
579 * Still pending requests, wait for the disk i/o
580 * to complete.
581 */
582 HYPERVISOR_yield();
583 }
584 }
585}
586
587/* Kernel dump function for a paravirtualized disk device */
588static void
590{
591
593}
594
595static int
596xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
597 size_t length)
598{
599 struct disk *dp = arg;
600 struct xbd_softc *sc = dp->d_drv1;
601 struct xbd_command *cm;
602 size_t chunk;
603 int sbp;
604 int rc = 0;
605
606 if (length == 0)
607 return (0);
608
609 xbd_quiesce(sc); /* All quiet on the western front. */
610
611 /*
612 * If this lock is held, then this module is failing, and a
613 * successful kernel dump is highly unlikely anyway.
614 */
615 mtx_lock(&sc->xbd_io_lock);
616
617 /* Split the 64KB block as needed */
618 for (sbp=0; length > 0; sbp++) {
619 cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
620 if (cm == NULL) {
621 mtx_unlock(&sc->xbd_io_lock);
622 device_printf(sc->xbd_dev, "dump: no more commands?\n");
623 return (EBUSY);
624 }
625
627 &cm->cm_gref_head) != 0) {
629 mtx_unlock(&sc->xbd_io_lock);
630 device_printf(sc->xbd_dev, "no more grant allocs?\n");
631 return (EBUSY);
632 }
633
634 chunk = length > sc->xbd_max_request_size ?
635 sc->xbd_max_request_size : length;
636 cm->cm_data = virtual;
637 cm->cm_datalen = chunk;
638 cm->cm_operation = BLKIF_OP_WRITE;
639 cm->cm_sector_number = offset / dp->d_sectorsize;
641
643
644 length -= chunk;
645 offset += chunk;
646 virtual = (char *) virtual + chunk;
647 }
648
649 /* Tell DOM0 to do the I/O */
650 xbd_startio(sc);
651 mtx_unlock(&sc->xbd_io_lock);
652
653 /* Poll for the completion. */
654 xbd_quiesce(sc); /* All quite on the eastern front */
655
656 /* If there were any errors, bail out... */
657 while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
658 if (cm->cm_status != BLKIF_RSP_OKAY) {
659 device_printf(sc->xbd_dev,
660 "Dump I/O failed at sector %jd\n",
661 cm->cm_sector_number);
662 rc = EIO;
663 }
665 }
666
667 return (rc);
668}
669
670/*----------------------------- Disk Entrypoints -----------------------------*/
671static int
672xbd_open(struct disk *dp)
673{
674 struct xbd_softc *sc = dp->d_drv1;
675
676 if (sc == NULL) {
677 printf("xbd%d: not found", dp->d_unit);
678 return (ENXIO);
679 }
680
681 sc->xbd_flags |= XBDF_OPEN;
682 sc->xbd_users++;
683 return (0);
684}
685
686static int
687xbd_close(struct disk *dp)
688{
689 struct xbd_softc *sc = dp->d_drv1;
690
691 if (sc == NULL)
692 return (ENXIO);
693 sc->xbd_flags &= ~XBDF_OPEN;
694 if (--(sc->xbd_users) == 0) {
695 /*
696 * Check whether we have been instructed to close. We will
697 * have ignored this request initially, as the device was
698 * still mounted.
699 */
700 if (xenbus_get_otherend_state(sc->xbd_dev) ==
701 XenbusStateClosing)
702 xbd_closing(sc->xbd_dev);
703 }
704 return (0);
705}
706
707static int
708xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
709{
710 struct xbd_softc *sc = dp->d_drv1;
711
712 if (sc == NULL)
713 return (ENXIO);
714
715 return (ENOTTY);
716}
717
718/*
719 * Read/write routine for a buffer. Finds the proper unit, place it on
720 * the sortq and kick the controller.
721 */
722static void
723xbd_strategy(struct bio *bp)
724{
725 struct xbd_softc *sc = bp->bio_disk->d_drv1;
726
727 /* bogus disk? */
728 if (sc == NULL) {
729 bp->bio_error = EINVAL;
730 bp->bio_flags |= BIO_ERROR;
731 bp->bio_resid = bp->bio_bcount;
732 biodone(bp);
733 return;
734 }
735
736 /*
737 * Place it in the queue of disk activities for this disk
738 */
739 mtx_lock(&sc->xbd_io_lock);
740
741 xbd_enqueue_bio(sc, bp);
742 xbd_startio(sc);
743
744 mtx_unlock(&sc->xbd_io_lock);
745 return;
746}
747
748/*------------------------------ Ring Management -----------------------------*/
749static int
751{
752 blkif_sring_t *sring;
753 uintptr_t sring_page_addr;
754 int error;
755 int i;
756
757 sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
758 M_NOWAIT|M_ZERO);
759 if (sring == NULL) {
760 xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
761 return (ENOMEM);
762 }
763 SHARED_RING_INIT(sring);
764 FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
765
766 for (i = 0, sring_page_addr = (uintptr_t)sring;
767 i < sc->xbd_ring_pages;
768 i++, sring_page_addr += PAGE_SIZE) {
769 error = xenbus_grant_ring(sc->xbd_dev,
770 (vtophys(sring_page_addr) >> PAGE_SHIFT),
771 &sc->xbd_ring_ref[i]);
772 if (error) {
773 xenbus_dev_fatal(sc->xbd_dev, error,
774 "granting ring_ref(%d)", i);
775 return (error);
776 }
777 }
778 if (sc->xbd_ring_pages == 1) {
779 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
780 "ring-ref", "%u", sc->xbd_ring_ref[0]);
781 if (error) {
782 xenbus_dev_fatal(sc->xbd_dev, error,
783 "writing %s/ring-ref",
784 xenbus_get_node(sc->xbd_dev));
785 return (error);
786 }
787 } else {
788 for (i = 0; i < sc->xbd_ring_pages; i++) {
789 char ring_ref_name[]= "ring_refXX";
790
791 snprintf(ring_ref_name, sizeof(ring_ref_name),
792 "ring-ref%u", i);
793 error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
794 ring_ref_name, "%u", sc->xbd_ring_ref[i]);
795 if (error) {
796 xenbus_dev_fatal(sc->xbd_dev, error,
797 "writing %s/%s",
798 xenbus_get_node(sc->xbd_dev),
799 ring_ref_name);
800 return (error);
801 }
802 }
803 }
804
805 error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev,
806 xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc,
807 INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle);
808 if (error) {
809 xenbus_dev_fatal(sc->xbd_dev, error,
810 "xen_intr_alloc_and_bind_local_port failed");
811 return (error);
812 }
813
814 return (0);
815}
816
817static void
819{
820 int i;
821
822 if (sc->xbd_ring.sring == NULL)
823 return;
824
825 for (i = 0; i < sc->xbd_ring_pages; i++) {
826 if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) {
828 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
829 }
830 }
831 free(sc->xbd_ring.sring, M_XENBLOCKFRONT);
832 sc->xbd_ring.sring = NULL;
833}
834
835/*-------------------------- Initialization/Teardown -------------------------*/
836static int
837xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
838{
839 struct sbuf sb;
840 int feature_cnt;
841
842 sbuf_new(&sb, features, len, SBUF_FIXEDLEN);
843
844 feature_cnt = 0;
845 if ((sc->xbd_flags & XBDF_FLUSH) != 0) {
846 sbuf_printf(&sb, "flush");
847 feature_cnt++;
848 }
849
850 if ((sc->xbd_flags & XBDF_BARRIER) != 0) {
851 if (feature_cnt != 0)
852 sbuf_printf(&sb, ", ");
853 sbuf_printf(&sb, "write_barrier");
854 feature_cnt++;
855 }
856
857 if ((sc->xbd_flags & XBDF_DISCARD) != 0) {
858 if (feature_cnt != 0)
859 sbuf_printf(&sb, ", ");
860 sbuf_printf(&sb, "discard");
861 feature_cnt++;
862 }
863
864 if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) {
865 if (feature_cnt != 0)
866 sbuf_printf(&sb, ", ");
867 sbuf_printf(&sb, "persistent_grants");
868 feature_cnt++;
869 }
870
871 (void) sbuf_finish(&sb);
872 return (sbuf_len(&sb));
873}
874
875static int
876xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
877{
878 char features[80];
879 struct xbd_softc *sc = arg1;
880 int error;
881 int len;
882
883 error = sysctl_wire_old_buffer(req, 0);
884 if (error != 0)
885 return (error);
886
887 len = xbd_feature_string(sc, features, sizeof(features));
888
889 /* len is -1 on error, which will make the SYSCTL_OUT a no-op. */
890 return (SYSCTL_OUT(req, features, len + 1/*NUL*/));
891}
892
893static void
895{
896 struct sysctl_ctx_list *sysctl_ctx = NULL;
897 struct sysctl_oid *sysctl_tree = NULL;
898 struct sysctl_oid_list *children;
899
900 sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
901 if (sysctl_ctx == NULL)
902 return;
903
904 sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
905 if (sysctl_tree == NULL)
906 return;
907
908 children = SYSCTL_CHILDREN(sysctl_tree);
909 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
910 "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
911 "maximum outstanding requests (negotiated)");
912
913 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
914 "max_request_segments", CTLFLAG_RD,
916 "maximum number of pages per requests (negotiated)");
917
918 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
919 "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
920 "maximum size in bytes of a request (negotiated)");
921
922 SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO,
923 "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
924 "communication channel pages (negotiated)");
925
926 SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO,
927 "features", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, xbd,
928 0, xbd_sysctl_features, "A", "protocol features (negotiated)");
929}
930
931/*
932 * Translate Linux major/minor to an appropriate name and unit
933 * number. For HVM guests, this allows us to use the same drive names
934 * with blkfront as the emulated drives, easing transition slightly.
935 */
936static void
937xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
938{
939 static struct vdev_info {
940 int major;
941 int shift;
942 int base;
943 const char *name;
944 } info[] = {
945 {3, 6, 0, "ada"}, /* ide0 */
946 {22, 6, 2, "ada"}, /* ide1 */
947 {33, 6, 4, "ada"}, /* ide2 */
948 {34, 6, 6, "ada"}, /* ide3 */
949 {56, 6, 8, "ada"}, /* ide4 */
950 {57, 6, 10, "ada"}, /* ide5 */
951 {88, 6, 12, "ada"}, /* ide6 */
952 {89, 6, 14, "ada"}, /* ide7 */
953 {90, 6, 16, "ada"}, /* ide8 */
954 {91, 6, 18, "ada"}, /* ide9 */
955
956 {8, 4, 0, "da"}, /* scsi disk0 */
957 {65, 4, 16, "da"}, /* scsi disk1 */
958 {66, 4, 32, "da"}, /* scsi disk2 */
959 {67, 4, 48, "da"}, /* scsi disk3 */
960 {68, 4, 64, "da"}, /* scsi disk4 */
961 {69, 4, 80, "da"}, /* scsi disk5 */
962 {70, 4, 96, "da"}, /* scsi disk6 */
963 {71, 4, 112, "da"}, /* scsi disk7 */
964 {128, 4, 128, "da"}, /* scsi disk8 */
965 {129, 4, 144, "da"}, /* scsi disk9 */
966 {130, 4, 160, "da"}, /* scsi disk10 */
967 {131, 4, 176, "da"}, /* scsi disk11 */
968 {132, 4, 192, "da"}, /* scsi disk12 */
969 {133, 4, 208, "da"}, /* scsi disk13 */
970 {134, 4, 224, "da"}, /* scsi disk14 */
971 {135, 4, 240, "da"}, /* scsi disk15 */
972
973 {202, 4, 0, "xbd"}, /* xbd */
974
975 {0, 0, 0, NULL},
976 };
977 int major = vdevice >> 8;
978 int minor = vdevice & 0xff;
979 int i;
980
981 if (vdevice & (1 << 28)) {
982 *unit = (vdevice & ((1 << 28) - 1)) >> 8;
983 *name = "xbd";
984 return;
985 }
986
987 for (i = 0; info[i].major; i++) {
988 if (info[i].major == major) {
989 *unit = info[i].base + (minor >> info[i].shift);
990 *name = info[i].name;
991 return;
992 }
993 }
994
995 *unit = minor >> 4;
996 *name = "xbd";
997}
998
999int
1000xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
1001 int vdevice, uint16_t vdisk_info, unsigned long sector_size,
1002 unsigned long phys_sector_size)
1003{
1004 char features[80];
1005 int unit, error = 0;
1006 const char *name;
1007
1008 xbd_vdevice_to_unit(vdevice, &unit, &name);
1009
1010 sc->xbd_unit = unit;
1011
1012 if (strcmp(name, "xbd") != 0)
1013 device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
1014
1015 if (xbd_feature_string(sc, features, sizeof(features)) > 0) {
1016 device_printf(sc->xbd_dev, "features: %s\n",
1017 features);
1018 }
1019
1020 sc->xbd_disk = disk_alloc();
1021 sc->xbd_disk->d_unit = sc->xbd_unit;
1022 sc->xbd_disk->d_open = xbd_open;
1023 sc->xbd_disk->d_close = xbd_close;
1024 sc->xbd_disk->d_ioctl = xbd_ioctl;
1025 sc->xbd_disk->d_strategy = xbd_strategy;
1026 sc->xbd_disk->d_dump = xbd_dump;
1027 sc->xbd_disk->d_name = name;
1028 sc->xbd_disk->d_drv1 = sc;
1029 sc->xbd_disk->d_sectorsize = sector_size;
1030 sc->xbd_disk->d_stripesize = phys_sector_size;
1031 sc->xbd_disk->d_stripeoffset = 0;
1032
1033 sc->xbd_disk->d_mediasize = sectors * sector_size;
1034 sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
1035 sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
1036 if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
1037 sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1038 device_printf(sc->xbd_dev,
1039 "synchronize cache commands enabled.\n");
1040 }
1041 disk_create(sc->xbd_disk, DISK_VERSION);
1042
1043 return error;
1044}
1045
1046static void
1048{
1049 int i;
1050
1051 /* Prevent new requests being issued until we fix things up. */
1052 mtx_lock(&sc->xbd_io_lock);
1054 mtx_unlock(&sc->xbd_io_lock);
1055
1056 /* Free resources associated with old device channel. */
1057 xbd_free_ring(sc);
1058 if (sc->xbd_shadow) {
1059 for (i = 0; i < sc->xbd_max_requests; i++) {
1060 struct xbd_command *cm;
1061
1062 cm = &sc->xbd_shadow[i];
1063 if (cm->cm_sg_refs != NULL) {
1064 free(cm->cm_sg_refs, M_XENBLOCKFRONT);
1065 cm->cm_sg_refs = NULL;
1066 }
1067
1068 if (cm->cm_indirectionpages != NULL) {
1071 &cm->cm_indirectionrefs[0]);
1072 contigfree(cm->cm_indirectionpages, PAGE_SIZE *
1074 M_XENBLOCKFRONT);
1075 cm->cm_indirectionpages = NULL;
1076 }
1077
1078 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
1079 }
1080 free(sc->xbd_shadow, M_XENBLOCKFRONT);
1081 sc->xbd_shadow = NULL;
1082
1083 bus_dma_tag_destroy(sc->xbd_io_dmat);
1084
1088 }
1089
1090 xen_intr_unbind(&sc->xen_intr_handle);
1091
1092}
1093
1094/*--------------------------- State Change Handlers --------------------------*/
1095static void
1097{
1098 const char *otherend_path;
1099 const char *node_path;
1100 uint32_t max_ring_page_order;
1101 int error;
1102
1103 if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
1104 /* Initialization has already been performed. */
1105 return;
1106 }
1107
1108 /*
1109 * Protocol defaults valid even if negotiation for a
1110 * setting fails.
1111 */
1112 max_ring_page_order = 0;
1113 sc->xbd_ring_pages = 1;
1114
1115 /*
1116 * Protocol negotiation.
1117 *
1118 * \note xs_gather() returns on the first encountered error, so
1119 * we must use independent calls in order to guarantee
1120 * we don't miss information in a sparsly populated back-end
1121 * tree.
1122 *
1123 * \note xs_scanf() does not update variables for unmatched
1124 * fields.
1125 */
1126 otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
1127 node_path = xenbus_get_node(sc->xbd_dev);
1128
1129 /* Support both backend schemes for relaying ring page limits. */
1130 (void)xs_scanf(XST_NIL, otherend_path,
1131 "max-ring-page-order", NULL, "%" PRIu32,
1132 &max_ring_page_order);
1133 sc->xbd_ring_pages = 1 << max_ring_page_order;
1134 (void)xs_scanf(XST_NIL, otherend_path,
1135 "max-ring-pages", NULL, "%" PRIu32,
1136 &sc->xbd_ring_pages);
1137 if (sc->xbd_ring_pages < 1)
1138 sc->xbd_ring_pages = 1;
1139
1141 device_printf(sc->xbd_dev,
1142 "Back-end specified ring-pages of %u "
1143 "limited to front-end limit of %u.\n",
1146 }
1147
1148 if (powerof2(sc->xbd_ring_pages) == 0) {
1149 uint32_t new_page_limit;
1150
1151 new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
1152 device_printf(sc->xbd_dev,
1153 "Back-end specified ring-pages of %u "
1154 "is not a power of 2. Limited to %u.\n",
1155 sc->xbd_ring_pages, new_page_limit);
1156 sc->xbd_ring_pages = new_page_limit;
1157 }
1158
1159 sc->xbd_max_requests =
1160 BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
1162 device_printf(sc->xbd_dev,
1163 "Back-end specified max_requests of %u "
1164 "limited to front-end limit of %zu.\n",
1167 }
1168
1169 if (xbd_alloc_ring(sc) != 0)
1170 return;
1171
1172 /* Support both backend schemes for relaying ring page limits. */
1173 if (sc->xbd_ring_pages > 1) {
1174 error = xs_printf(XST_NIL, node_path,
1175 "num-ring-pages","%u",
1176 sc->xbd_ring_pages);
1177 if (error) {
1178 xenbus_dev_fatal(sc->xbd_dev, error,
1179 "writing %s/num-ring-pages",
1180 node_path);
1181 return;
1182 }
1183
1184 error = xs_printf(XST_NIL, node_path,
1185 "ring-page-order", "%u",
1186 fls(sc->xbd_ring_pages) - 1);
1187 if (error) {
1188 xenbus_dev_fatal(sc->xbd_dev, error,
1189 "writing %s/ring-page-order",
1190 node_path);
1191 return;
1192 }
1193 }
1194
1195 error = xs_printf(XST_NIL, node_path, "event-channel",
1196 "%u", xen_intr_port(sc->xen_intr_handle));
1197 if (error) {
1198 xenbus_dev_fatal(sc->xbd_dev, error,
1199 "writing %s/event-channel",
1200 node_path);
1201 return;
1202 }
1203
1204 error = xs_printf(XST_NIL, node_path, "protocol",
1205 "%s", XEN_IO_PROTO_ABI_NATIVE);
1206 if (error) {
1207 xenbus_dev_fatal(sc->xbd_dev, error,
1208 "writing %s/protocol",
1209 node_path);
1210 return;
1211 }
1212
1213 xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
1214}
1215
1216/*
1217 * Invoked when the backend is finally 'ready' (and has published
1218 * the details about the physical device - #sectors, size, etc).
1219 */
1220static void
1222{
1223 device_t dev = sc->xbd_dev;
1224 blkif_sector_t sectors;
1225 unsigned long sector_size, phys_sector_size;
1226 unsigned int binfo;
1227 int err, feature_barrier, feature_flush;
1228 int i, j;
1229
1230 DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
1231
1232 if (sc->xbd_state == XBD_STATE_SUSPENDED) {
1233 return;
1234 }
1235
1236 if (sc->xbd_state == XBD_STATE_CONNECTED) {
1237 struct disk *disk;
1238
1239 disk = sc->xbd_disk;
1240 if (disk == NULL) {
1241 return;
1242 }
1243 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1244 "sectors", "%"PRIu64, &sectors, NULL);
1245 if (err != 0) {
1246 xenbus_dev_error(dev, err,
1247 "reading sectors at %s",
1248 xenbus_get_otherend_path(dev));
1249 return;
1250 }
1251 disk->d_mediasize = disk->d_sectorsize * sectors;
1252 err = disk_resize(disk, M_NOWAIT);
1253 if (err) {
1254 xenbus_dev_error(dev, err,
1255 "unable to resize disk %s%u",
1256 disk->d_name, disk->d_unit);
1257 return;
1258 }
1259 device_printf(sc->xbd_dev,
1260 "changed capacity to %jd\n",
1261 (intmax_t)disk->d_mediasize);
1262 return;
1263 }
1264
1265 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1266 "sectors", "%"PRIu64, &sectors,
1267 "info", "%u", &binfo,
1268 "sector-size", "%lu", &sector_size,
1269 NULL);
1270 if (err) {
1271 xenbus_dev_fatal(dev, err,
1272 "reading backend fields at %s",
1273 xenbus_get_otherend_path(dev));
1274 return;
1275 }
1276 if ((sectors == 0) || (sector_size == 0)) {
1277 xenbus_dev_fatal(dev, 0,
1278 "invalid parameters from %s:"
1279 " sectors = %"PRIu64", sector_size = %lu",
1280 xenbus_get_otherend_path(dev),
1281 sectors, sector_size);
1282 return;
1283 }
1284 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1285 "physical-sector-size", "%lu", &phys_sector_size,
1286 NULL);
1287 if (err || phys_sector_size <= sector_size)
1288 phys_sector_size = 0;
1289 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1290 "feature-barrier", "%d", &feature_barrier,
1291 NULL);
1292 if (err == 0 && feature_barrier != 0)
1293 sc->xbd_flags |= XBDF_BARRIER;
1294
1295 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1296 "feature-flush-cache", "%d", &feature_flush,
1297 NULL);
1298 if (err == 0 && feature_flush != 0)
1299 sc->xbd_flags |= XBDF_FLUSH;
1300
1301 err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev),
1302 "feature-max-indirect-segments", "%" PRIu32,
1303 &sc->xbd_max_request_segments, NULL);
1304 if ((err != 0) || (xbd_enable_indirect == 0))
1308 if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(maxphys))
1312 if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
1313 sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1316
1317 /* Allocate datastructures based on negotiated values. */
1318 err = bus_dma_tag_create(
1319 bus_get_dma_tag(sc->xbd_dev), /* parent */
1320 512, PAGE_SIZE, /* algnmnt, boundary */
1321 BUS_SPACE_MAXADDR, /* lowaddr */
1322 BUS_SPACE_MAXADDR, /* highaddr */
1323 NULL, NULL, /* filter, filterarg */
1326 PAGE_SIZE, /* maxsegsize */
1327 BUS_DMA_ALLOCNOW, /* flags */
1328 busdma_lock_mutex, /* lockfunc */
1329 &sc->xbd_io_lock, /* lockarg */
1330 &sc->xbd_io_dmat);
1331 if (err != 0) {
1332 xenbus_dev_fatal(sc->xbd_dev, err,
1333 "Cannot allocate parent DMA tag\n");
1334 return;
1335 }
1336
1337 /* Per-transaction data allocation. */
1338 sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
1339 M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
1340 if (sc->xbd_shadow == NULL) {
1341 bus_dma_tag_destroy(sc->xbd_io_dmat);
1342 xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
1343 "Cannot allocate request structures\n");
1344 return;
1345 }
1346
1347 for (i = 0; i < sc->xbd_max_requests; i++) {
1348 struct xbd_command *cm;
1349 void * indirectpages;
1350
1351 cm = &sc->xbd_shadow[i];
1352 cm->cm_sg_refs = malloc(
1353 sizeof(grant_ref_t) * sc->xbd_max_request_segments,
1354 M_XENBLOCKFRONT, M_NOWAIT);
1355 if (cm->cm_sg_refs == NULL)
1356 break;
1357 cm->cm_id = i;
1359 cm->cm_sc = sc;
1360 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
1361 break;
1362 if (sc->xbd_max_request_indirectpages > 0) {
1363 indirectpages = contigmalloc(
1364 PAGE_SIZE * sc->xbd_max_request_indirectpages,
1365 M_XENBLOCKFRONT, M_ZERO | M_NOWAIT, 0, ~0,
1366 PAGE_SIZE, 0);
1367 if (indirectpages == NULL)
1369 } else {
1370 indirectpages = NULL;
1371 }
1372 for (j = 0; j < sc->xbd_max_request_indirectpages; j++) {
1374 xenbus_get_otherend_id(sc->xbd_dev),
1375 (vtophys(indirectpages) >> PAGE_SHIFT) + j,
1376 1 /* grant read-only access */,
1377 &cm->cm_indirectionrefs[j]))
1378 break;
1379 }
1380 if (j < sc->xbd_max_request_indirectpages) {
1381 contigfree(indirectpages,
1382 PAGE_SIZE * sc->xbd_max_request_indirectpages,
1383 M_XENBLOCKFRONT);
1384 break;
1385 }
1386 cm->cm_indirectionpages = indirectpages;
1387 xbd_free_command(cm);
1388 }
1389
1390 if (sc->xbd_disk == NULL) {
1391 device_printf(dev, "%juMB <%s> at %s",
1392 (uintmax_t) sectors / (1048576 / sector_size),
1393 device_get_desc(dev),
1394 xenbus_get_node(dev));
1395 bus_print_child_footer(device_get_parent(dev), dev);
1396
1397 xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
1398 sector_size, phys_sector_size);
1399 }
1400
1401 (void)xenbus_set_state(dev, XenbusStateConnected);
1402
1403 /* Kick pending requests. */
1404 mtx_lock(&sc->xbd_io_lock);
1406 xbd_startio(sc);
1407 sc->xbd_flags |= XBDF_READY;
1408 mtx_unlock(&sc->xbd_io_lock);
1409}
1410
1417static void
1418xbd_closing(device_t dev)
1419{
1420 struct xbd_softc *sc = device_get_softc(dev);
1421
1422 xenbus_set_state(dev, XenbusStateClosing);
1423
1424 DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev));
1425
1426 if (sc->xbd_disk != NULL) {
1427 disk_destroy(sc->xbd_disk);
1428 sc->xbd_disk = NULL;
1429 }
1430
1431 xenbus_set_state(dev, XenbusStateClosed);
1432}
1433
1434/*---------------------------- NewBus Entrypoints ----------------------------*/
1435static int
1436xbd_probe(device_t dev)
1437{
1438 if (strcmp(xenbus_get_type(dev), "vbd") != 0)
1439 return (ENXIO);
1440
1441 if (xen_pv_disks_disabled())
1442 return (ENXIO);
1443
1444 if (xen_hvm_domain()) {
1445 int error;
1446 char *type;
1447
1448 /*
1449 * When running in an HVM domain, IDE disk emulation is
1450 * disabled early in boot so that native drivers will
1451 * not see emulated hardware. However, CDROM device
1452 * emulation cannot be disabled.
1453 *
1454 * Through use of FreeBSD's vm_guest and xen_hvm_domain()
1455 * APIs, we could modify the native CDROM driver to fail its
1456 * probe when running under Xen. Unfortunatlely, the PV
1457 * CDROM support in XenServer (up through at least version
1458 * 6.2) isn't functional, so we instead rely on the emulated
1459 * CDROM instance, and fail to attach the PV one here in
1460 * the blkfront driver.
1461 */
1462 error = xs_read(XST_NIL, xenbus_get_node(dev),
1463 "device-type", NULL, (void **) &type);
1464 if (error)
1465 return (ENXIO);
1466
1467 if (strncmp(type, "cdrom", 5) == 0) {
1468 free(type, M_XENSTORE);
1469 return (ENXIO);
1470 }
1471 free(type, M_XENSTORE);
1472 }
1473
1474 device_set_desc(dev, "Virtual Block Device");
1475 device_quiet(dev);
1476 return (0);
1477}
1478
1479/*
1480 * Setup supplies the backend dir, virtual device. We place an event
1481 * channel and shared frame entries. We watch backend to wait if it's
1482 * ok.
1483 */
1484static int
1485xbd_attach(device_t dev)
1486{
1487 struct xbd_softc *sc;
1488 const char *name;
1489 uint32_t vdevice;
1490 int error;
1491 int i;
1492 int unit;
1493
1494 /* FIXME: Use dynamic device id if this is not set. */
1495 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1496 "virtual-device", NULL, "%" PRIu32, &vdevice);
1497 if (error)
1498 error = xs_scanf(XST_NIL, xenbus_get_node(dev),
1499 "virtual-device-ext", NULL, "%" PRIu32, &vdevice);
1500 if (error) {
1501 xenbus_dev_fatal(dev, error, "reading virtual-device");
1502 device_printf(dev, "Couldn't determine virtual device.\n");
1503 return (error);
1504 }
1505
1506 xbd_vdevice_to_unit(vdevice, &unit, &name);
1507 if (!strcmp(name, "xbd"))
1508 device_set_unit(dev, unit);
1509
1510 sc = device_get_softc(dev);
1511 mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
1512 xbd_initqs(sc);
1513 for (i = 0; i < XBD_MAX_RING_PAGES; i++)
1514 sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
1515
1516 sc->xbd_dev = dev;
1517 sc->xbd_vdevice = vdevice;
1519
1520 xbd_setup_sysctl(sc);
1521
1522 /* Wait for backend device to publish its protocol capabilities. */
1523 xenbus_set_state(dev, XenbusStateInitialising);
1524
1525 return (0);
1526}
1527
1528static int
1529xbd_detach(device_t dev)
1530{
1531 struct xbd_softc *sc = device_get_softc(dev);
1532
1533 DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
1534
1535 xbd_free(sc);
1536 mtx_destroy(&sc->xbd_io_lock);
1537
1538 return 0;
1539}
1540
1541static int
1542xbd_suspend(device_t dev)
1543{
1544 struct xbd_softc *sc = device_get_softc(dev);
1545 int retval;
1546 int saved_state;
1547
1548 /* Prevent new requests being issued until we fix things up. */
1549 mtx_lock(&sc->xbd_io_lock);
1550 saved_state = sc->xbd_state;
1552
1553 /* Wait for outstanding I/O to drain. */
1554 retval = 0;
1555 while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) {
1556 if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
1557 PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
1558 retval = EBUSY;
1559 break;
1560 }
1561 }
1562 mtx_unlock(&sc->xbd_io_lock);
1563
1564 if (retval != 0)
1565 sc->xbd_state = saved_state;
1566
1567 return (retval);
1568}
1569
1570static int
1571xbd_resume(device_t dev)
1572{
1573 struct xbd_softc *sc = device_get_softc(dev);
1574
1577 return (0);
1578 }
1579
1580 DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
1581
1582 xbd_free(sc);
1583 xbd_initialize(sc);
1584 return (0);
1585}
1586
1590static void
1591xbd_backend_changed(device_t dev, XenbusState backend_state)
1592{
1593 struct xbd_softc *sc = device_get_softc(dev);
1594
1595 DPRINTK("backend_state=%d\n", backend_state);
1596
1597 switch (backend_state) {
1598 case XenbusStateUnknown:
1599 case XenbusStateInitialising:
1600 case XenbusStateReconfigured:
1601 case XenbusStateReconfiguring:
1602 case XenbusStateClosed:
1603 break;
1604
1605 case XenbusStateInitWait:
1606 case XenbusStateInitialised:
1607 xbd_initialize(sc);
1608 break;
1609
1610 case XenbusStateConnected:
1611 xbd_initialize(sc);
1612 xbd_connect(sc);
1613 break;
1614
1615 case XenbusStateClosing:
1616 if (sc->xbd_users > 0) {
1617 device_printf(dev, "detaching with pending users\n");
1618 KASSERT(sc->xbd_disk != NULL,
1619 ("NULL disk with pending users\n"));
1620 disk_gone(sc->xbd_disk);
1621 } else {
1622 xbd_closing(dev);
1623 }
1624 break;
1625 }
1626}
1627
1628/*---------------------------- NewBus Registration ---------------------------*/
1629static device_method_t xbd_methods[] = {
1630 /* Device interface */
1631 DEVMETHOD(device_probe, xbd_probe),
1632 DEVMETHOD(device_attach, xbd_attach),
1633 DEVMETHOD(device_detach, xbd_detach),
1634 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1635 DEVMETHOD(device_suspend, xbd_suspend),
1636 DEVMETHOD(device_resume, xbd_resume),
1637
1638 /* Xenbus interface */
1639 DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed),
1640
1641 { 0, 0 }
1642};
1643
1644static driver_t xbd_driver = {
1645 "xbd",
1646 xbd_methods,
1647 sizeof(struct xbd_softc),
1648};
1649devclass_t xbd_devclass;
1650
1651DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);
static void xbd_connect(struct xbd_softc *sc)
Definition: blkfront.c:1221
static void xbd_restart_queue_callback(void *arg)
Definition: blkfront.c:321
static int xbd_alloc_ring(struct xbd_softc *sc)
Definition: blkfront.c:750
static void xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
Definition: blkfront.c:116
static int xbd_close(struct disk *dp)
Definition: blkfront.c:687
static device_method_t xbd_methods[]
Definition: blkfront.c:1629
static int xbd_enable_indirect
Definition: blkfront.c:84
devclass_t xbd_devclass
Definition: blkfront.c:1649
static void xbd_flush_requests(struct xbd_softc *sc)
Definition: blkfront.c:136
static int xbd_resume(device_t dev)
Definition: blkfront.c:1571
static struct xbd_command * xbd_bio_command(struct xbd_softc *sc)
Definition: blkfront.c:335
static void xbd_setup_sysctl(struct xbd_softc *xbd)
Definition: blkfront.c:894
static int xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
Definition: blkfront.c:296
static int xbd_detach(device_t dev)
Definition: blkfront.c:1529
static void xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
Definition: blkfront.c:126
static int xbd_open(struct disk *dp)
Definition: blkfront.c:672
#define XBD_SECTOR_SHFT
Definition: blkfront.c:79
static void xbd_strategy(struct bio *bp)
Definition: blkfront.c:723
static void xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
Definition: blkfront.c:460
static void xbd_backend_changed(device_t dev, XenbusState backend_state)
Definition: blkfront.c:1591
__FBSDID("$FreeBSD$")
static void xbd_startio(struct xbd_softc *sc)
Definition: blkfront.c:418
static void xbd_free_ring(struct xbd_softc *sc)
Definition: blkfront.c:818
SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "xbd driver parameters")
static void xbd_int(void *xsc)
Definition: blkfront.c:482
DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0)
static int xbd_sysctl_features(SYSCTL_HANDLER_ARGS)
Definition: blkfront.c:876
static int xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td)
Definition: blkfront.c:708
int xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors, int vdevice, uint16_t vdisk_info, unsigned long sector_size, unsigned long phys_sector_size)
Definition: blkfront.c:1000
static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data")
static int xbd_attach(device_t dev)
Definition: blkfront.c:1485
static int xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
Definition: blkfront.c:596
static void xbd_free(struct xbd_softc *sc)
Definition: blkfront.c:1047
static void xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
Definition: blkfront.c:92
static void xbd_quiesce(struct xbd_softc *sc)
Definition: blkfront.c:566
static void xbd_dump_complete(struct xbd_command *cm)
Definition: blkfront.c:589
static void xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, grant_ref_t *gref_head, int otherend_id, int readonly, grant_ref_t *sg_ref, struct blkif_request_segment *sg)
Definition: blkfront.c:162
static void xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
Definition: blkfront.c:212
static void xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
Definition: blkfront.c:937
static void xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
Definition: blkfront.c:102
static void xbd_free_command(struct xbd_command *cm)
Definition: blkfront.c:147
#define DPRINTK(fmt, args...)
Definition: blkfront.c:76
static int xbd_feature_string(struct xbd_softc *sc, char *features, size_t len)
Definition: blkfront.c:837
static int xbd_suspend(device_t dev)
Definition: blkfront.c:1542
static driver_t xbd_driver
Definition: blkfront.c:1644
static void xbd_closing(device_t)
Definition: blkfront.c:1418
static int xbd_probe(device_t dev)
Definition: blkfront.c:1436
SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN, &xbd_enable_indirect, 0, "Enable xbd indirect segments")
static void xbd_initialize(struct xbd_softc *sc)
Definition: blkfront.c:1096
static void xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:232
static struct xbd_command * xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:271
static void xbd_initqs(struct xbd_softc *sc)
Definition: block.h:347
#define XBD_MAX_RING_PAGES
Definition: block.h:68
static void xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
Definition: block.h:295
static void xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
Definition: block.h:321
@ XBD_STATE_DISCONNECTED
Definition: block.h:149
@ XBD_STATE_CONNECTED
Definition: block.h:150
@ XBD_STATE_SUSPENDED
Definition: block.h:151
#define XBD_MAX_REQUESTS
Definition: block.h:74
#define XBD_SEGS_TO_SIZE(segs)
Definition: block.h:47
xbdc_flag_t
Definition: block.h:98
@ XBDCF_Q_FREEZE
Definition: block.h:103
@ XBDCF_Q_MASK
Definition: block.h:99
@ XBDCF_FROZEN
Definition: block.h:101
@ XBDCF_INITIALIZER
Definition: block.h:106
@ XBDCF_ASYNC_MAPPING
Definition: block.h:105
static void xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
Definition: block.h:257
xbd_flag_t
Definition: block.h:154
@ XBDF_NONE
Definition: block.h:155
@ XBDF_WAIT_IDLE
Definition: block.h:162
@ XBDF_BARRIER
Definition: block.h:157
@ XBDF_PERSISTENT
Definition: block.h:167
@ XBDF_OPEN
Definition: block.h:156
@ XBDF_DISCARD
Definition: block.h:166
@ XBDF_CM_SHORTAGE
Definition: block.h:160
@ XBDF_READY
Definition: block.h:159
@ XBDF_GNT_SHORTAGE
Definition: block.h:161
@ XBDF_FLUSH
Definition: block.h:158
static struct bio * xbd_dequeue_bio(struct xbd_softc *sc)
Definition: block.h:335
@ XBD_Q_COMPLETE
Definition: block.h:136
@ XBD_Q_NONE
Definition: block.h:139
@ XBD_Q_FREE
Definition: block.h:133
@ XBD_Q_READY
Definition: block.h:134
@ XBD_Q_BUSY
Definition: block.h:135
#define XBD_INDIRECT_SEGS_TO_PAGES(segs)
Definition: block.h:95
static void xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
Definition: block.h:328
static void xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
Definition: block.h:243
static uint32_t xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:226
#define XBD_MAX_INDIRECT_SEGMENTS
Definition: block.h:88
#define XBD_SIZE_TO_SEGS(size)
Definition: block.h:60
bool xen_suspend_cancelled
Definition: control.c:156
static void notify(struct notify_data *notify, vm_page_t page)
Definition: gntdev.c:838
void gnttab_free_grant_references(grant_ref_t head)
Definition: grant_table.c:325
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly)
Definition: grant_table.c:162
void gnttab_request_free_callback(struct gnttab_free_callback *callback, void(*fn)(void *), void *arg, uint16_t count)
Definition: grant_table.c:386
void gnttab_end_foreign_access_references(u_int count, grant_ref_t *refs)
Definition: grant_table.c:217
int gnttab_end_foreign_access_ref(grant_ref_t ref)
Definition: grant_table.c:183
int gnttab_alloc_grant_references(uint16_t count, grant_ref_t *head)
Definition: grant_table.c:347
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly, grant_ref_t *result)
Definition: grant_table.c:140
int gnttab_claim_grant_reference(grant_ref_t *private_head)
Definition: grant_table.c:367
bus_dmamap_t cm_map
Definition: block.h:116
blkif_sector_t cm_sector_number
Definition: block.h:125
xbd_cbcf_t * cm_complete
Definition: block.h:127
grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]
Definition: block.h:129
size_t cm_datalen
Definition: block.h:122
struct bio * cm_bp
Definition: block.h:119
int cm_operation
Definition: block.h:124
int cm_status
Definition: block.h:126
struct xbd_softc * cm_sc
Definition: block.h:114
u_int cm_nseg
Definition: block.h:123
xbdc_flag_t cm_flags
Definition: block.h:115
void * cm_data
Definition: block.h:121
grant_ref_t cm_gref_head
Definition: block.h:120
uint64_t cm_id
Definition: block.h:117
grant_ref_t * cm_sg_refs
Definition: block.h:118
void * cm_indirectionpages
Definition: block.h:128
device_t xbd_dev
Definition: block.h:174
struct xbd_command * xbd_shadow
Definition: block.h:201
int xbd_qfrozen_cnt
Definition: block.h:179
xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]
Definition: block.h:191
grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]
Definition: block.h:187
struct disk * xbd_disk
Definition: block.h:175
u_int xbd_ring_pages
Definition: block.h:182
int xbd_users
Definition: block.h:198
xbd_state_t xbd_state
Definition: block.h:181
uint32_t xbd_max_request_segments
Definition: block.h:184
uint32_t xbd_max_request_indirectpages
Definition: block.h:186
uint32_t xbd_max_request_size
Definition: block.h:185
uint32_t xbd_max_requests
Definition: block.h:183
struct gnttab_free_callback xbd_callback
Definition: block.h:190
blkif_front_ring_t xbd_ring
Definition: block.h:188
int xbd_unit
Definition: block.h:177
struct mtx xbd_io_lock
Definition: block.h:199
xen_intr_handle_t xen_intr_handle
Definition: block.h:189
xbd_flag_t xbd_flags
Definition: block.h:178
bus_dma_tag_t xbd_io_dmat
Definition: block.h:192
int xbd_vdevice
Definition: block.h:180
int xs_printf(struct xs_transaction t, const char *dir, const char *node, const char *fmt,...)
Definition: xenstore.c:1534
int xs_read(struct xs_transaction t, const char *dir, const char *node, u_int *len, void **result)
Definition: xenstore.c:1309
int xs_scanf(struct xs_transaction t, const char *dir, const char *node, int *scancountp, const char *fmt,...)
Definition: xenstore.c:1494
int xs_gather(struct xs_transaction t, const char *dir,...)
Definition: xenstore.c:1548