FreeBSD kernel /amd64 XEN device code
block.h
Go to the documentation of this file.
1/*
2 * XenBSD block device driver
3 *
4 * Copyright (c) 2010-2013 Spectra Logic Corporation
5 * Copyright (c) 2009 Scott Long, Yahoo!
6 * Copyright (c) 2009 Frank Suchomel, Citrix
7 * Copyright (c) 2009 Doug F. Rabson, Citrix
8 * Copyright (c) 2005 Kip Macy
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
11 *
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * $FreeBSD$
31 */
32
33#ifndef __XEN_BLKFRONT_BLOCK_H__
34#define __XEN_BLKFRONT_BLOCK_H__
35#include <xen/blkif.h>
36
47#define XBD_SEGS_TO_SIZE(segs) \
48 (((segs) - 1) * PAGE_SIZE)
49
60#define XBD_SIZE_TO_SEGS(size) \
61 ((size / PAGE_SIZE) + 1)
62
68#define XBD_MAX_RING_PAGES 32
69
74#define XBD_MAX_REQUESTS \
75 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
76
81#define XBD_MAX_SEGMENTS_PER_PAGE \
82 (PAGE_SIZE / sizeof(struct blkif_request_segment))
83
88#define XBD_MAX_INDIRECT_SEGMENTS \
89 (BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE)
90
95#define XBD_INDIRECT_SEGS_TO_PAGES(segs) \
96 ((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE)
97
98typedef enum {
100 /* This command has contributed to xbd_qfrozen_cnt. */
102 /* Freeze the command queue on dispatch (i.e. single step command). */
104 /* Bus DMA returned EINPROGRESS for this command. */
108
109struct xbd_command;
110typedef void xbd_cbcf_t(struct xbd_command *);
111
116 bus_dmamap_t cm_map;
117 uint64_t cm_id;
118 grant_ref_t *cm_sg_refs;
119 struct bio *cm_bp;
120 grant_ref_t cm_gref_head;
121 void *cm_data;
123 u_int cm_nseg;
125 blkif_sector_t cm_sector_number;
129 grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
130};
131
132typedef enum {
141
142typedef struct xbd_cm_q {
144 uint32_t q_length;
145 uint32_t q_max;
147
148typedef enum {
153
154typedef enum {
156 XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
157 XBDF_BARRIER = 1 << 1, /* backend supports barriers */
158 XBDF_FLUSH = 1 << 2, /* backend supports flush */
159 XBDF_READY = 1 << 3, /* Is ready */
160 XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */
161 XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
162 XBDF_WAIT_IDLE = 1 << 6, /*
163 * No new work until outstanding work
164 * completes.
165 */
166 XBDF_DISCARD = 1 << 7, /* backend supports discard */
167 XBDF_PERSISTENT = 1 << 8 /* backend supports persistent grants */
169
170/*
171 * We have one of these per vbd, whether ide, scsi or 'other'.
172 */
173struct xbd_softc {
174 device_t xbd_dev;
175 struct disk *xbd_disk; /* disk params */
176 struct bio_queue_head xbd_bioq; /* sort queue */
188 blkif_front_ring_t xbd_ring;
189 xen_intr_handle_t xen_intr_handle;
190 struct gnttab_free_callback xbd_callback;
192 bus_dma_tag_t xbd_io_dmat;
193
199 struct mtx xbd_io_lock;
200
202};
203
204int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
205 uint16_t vdisk_info, unsigned long sector_size,
206 unsigned long phys_sector_size);
207
208static inline void
210{
211 struct xbd_cm_q *cmq;
212
213 cmq = &sc->xbd_cm_q[index];
214 cmq->q_length++;
215 if (cmq->q_length > cmq->q_max)
216 cmq->q_max = cmq->q_length;
217}
218
219static inline void
221{
222 sc->xbd_cm_q[index].q_length--;
223}
224
225static inline uint32_t
227{
228 return (sc->xbd_cm_q[index].q_length);
229}
230
231static inline void
233{
234 struct xbd_cm_q *cmq;
235
236 cmq = &sc->xbd_cm_q[index];
237 TAILQ_INIT(&cmq->q_tailq);
238 cmq->q_length = 0;
239 cmq->q_max = 0;
240}
241
242static inline void
244{
245 KASSERT(index != XBD_Q_BIO,
246 ("%s: Commands cannot access the bio queue.", __func__));
247 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
248 panic("%s: command %p is already on queue %d.",
249 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
250 TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
251 cm->cm_flags &= ~XBDCF_Q_MASK;
252 cm->cm_flags |= index;
253 xbd_added_qentry(cm->cm_sc, index);
254}
255
256static inline void
258{
259 KASSERT(index != XBD_Q_BIO,
260 ("%s: Commands cannot access the bio queue.", __func__));
261 if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
262 panic("%s: command %p is already on queue %d.",
263 __func__, cm, cm->cm_flags & XBDCF_Q_MASK);
264 TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
265 cm->cm_flags &= ~XBDCF_Q_MASK;
266 cm->cm_flags |= index;
267 xbd_added_qentry(cm->cm_sc, index);
268}
269
270static inline struct xbd_command *
272{
273 struct xbd_command *cm;
274
275 KASSERT(index != XBD_Q_BIO,
276 ("%s: Commands cannot access the bio queue.", __func__));
277
278 if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
279 if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
280 panic("%s: command %p is on queue %d, "
281 "not specified queue %d",
282 __func__, cm,
284 index);
285 }
286 TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
287 cm->cm_flags &= ~XBDCF_Q_MASK;
288 cm->cm_flags |= XBD_Q_NONE;
289 xbd_removed_qentry(cm->cm_sc, index);
290 }
291 return (cm);
292}
293
294static inline void
295xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
296{
297 xbd_q_index_t index;
298
299 index = cm->cm_flags & XBDCF_Q_MASK;
300
301 KASSERT(index != XBD_Q_BIO,
302 ("%s: Commands cannot access the bio queue.", __func__));
303
304 if (index != expected_index) {
305 panic("%s: command %p is on queue %d, not specified queue %d",
306 __func__, cm, index, expected_index);
307 }
308 TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
309 cm->cm_flags &= ~XBDCF_Q_MASK;
310 cm->cm_flags |= XBD_Q_NONE;
311 xbd_removed_qentry(cm->cm_sc, index);
312}
313
314static inline void
316{
317 bioq_init(&sc->xbd_bioq);
318}
319
320static inline void
321xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
322{
323 bioq_insert_tail(&sc->xbd_bioq, bp);
325}
326
327static inline void
328xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
329{
330 bioq_insert_head(&sc->xbd_bioq, bp);
332}
333
334static inline struct bio *
336{
337 struct bio *bp;
338
339 if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
340 bioq_remove(&sc->xbd_bioq, bp);
342 }
343 return (bp);
344}
345
346static inline void
348{
349 u_int index;
350
351 for (index = 0; index < XBD_Q_COUNT; index++)
352 xbd_initq_cm(sc, index);
353
354 xbd_initq_bio(sc);
355}
356
357#endif /* __XEN_BLKFRONT_BLOCK_H__ */
static void xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:232
static struct xbd_command * xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:271
static void xbd_initqs(struct xbd_softc *sc)
Definition: block.h:347
#define XBD_MAX_RING_PAGES
Definition: block.h:68
static void xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
Definition: block.h:295
static void xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
Definition: block.h:321
struct xbd_cm_q xbd_cm_q_t
xbd_state_t
Definition: block.h:148
@ XBD_STATE_DISCONNECTED
Definition: block.h:149
@ XBD_STATE_CONNECTED
Definition: block.h:150
@ XBD_STATE_SUSPENDED
Definition: block.h:151
xbdc_flag_t
Definition: block.h:98
@ XBDCF_Q_FREEZE
Definition: block.h:103
@ XBDCF_Q_MASK
Definition: block.h:99
@ XBDCF_FROZEN
Definition: block.h:101
@ XBDCF_INITIALIZER
Definition: block.h:106
@ XBDCF_ASYNC_MAPPING
Definition: block.h:105
static void xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
Definition: block.h:257
xbd_flag_t
Definition: block.h:154
@ XBDF_NONE
Definition: block.h:155
@ XBDF_WAIT_IDLE
Definition: block.h:162
@ XBDF_BARRIER
Definition: block.h:157
@ XBDF_PERSISTENT
Definition: block.h:167
@ XBDF_OPEN
Definition: block.h:156
@ XBDF_DISCARD
Definition: block.h:166
@ XBDF_CM_SHORTAGE
Definition: block.h:160
@ XBDF_READY
Definition: block.h:159
@ XBDF_GNT_SHORTAGE
Definition: block.h:161
@ XBDF_FLUSH
Definition: block.h:158
static struct bio * xbd_dequeue_bio(struct xbd_softc *sc)
Definition: block.h:335
static void xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:220
xbd_q_index_t
Definition: block.h:132
@ XBD_Q_COUNT
Definition: block.h:138
@ XBD_Q_BIO
Definition: block.h:137
@ XBD_Q_COMPLETE
Definition: block.h:136
@ XBD_Q_NONE
Definition: block.h:139
@ XBD_Q_FREE
Definition: block.h:133
@ XBD_Q_READY
Definition: block.h:134
@ XBD_Q_BUSY
Definition: block.h:135
void xbd_cbcf_t(struct xbd_command *)
Definition: block.h:110
static void xbd_initq_bio(struct xbd_softc *sc)
Definition: block.h:315
static void xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
Definition: block.h:328
static void xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
Definition: block.h:243
static void xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:209
static uint32_t xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
Definition: block.h:226
int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, uint16_t vdisk_info, unsigned long sector_size, unsigned long phys_sector_size)
Definition: blkfront.c:1000
uint32_t q_length
Definition: block.h:144
TAILQ_HEAD(, xbd_command) q_tailq
uint32_t q_max
Definition: block.h:145
bus_dmamap_t cm_map
Definition: block.h:116
blkif_sector_t cm_sector_number
Definition: block.h:125
xbd_cbcf_t * cm_complete
Definition: block.h:127
grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]
Definition: block.h:129
size_t cm_datalen
Definition: block.h:122
struct bio * cm_bp
Definition: block.h:119
int cm_operation
Definition: block.h:124
TAILQ_ENTRY(xbd_command) cm_link
int cm_status
Definition: block.h:126
struct xbd_softc * cm_sc
Definition: block.h:114
u_int cm_nseg
Definition: block.h:123
xbdc_flag_t cm_flags
Definition: block.h:115
void * cm_data
Definition: block.h:121
grant_ref_t cm_gref_head
Definition: block.h:120
uint64_t cm_id
Definition: block.h:117
grant_ref_t * cm_sg_refs
Definition: block.h:118
void * cm_indirectionpages
Definition: block.h:128
device_t xbd_dev
Definition: block.h:174
struct xbd_command * xbd_shadow
Definition: block.h:201
int xbd_qfrozen_cnt
Definition: block.h:179
xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]
Definition: block.h:191
grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]
Definition: block.h:187
struct disk * xbd_disk
Definition: block.h:175
u_int xbd_ring_pages
Definition: block.h:182
int xbd_users
Definition: block.h:198
xbd_state_t xbd_state
Definition: block.h:181
struct bio_queue_head xbd_bioq
Definition: block.h:176
uint32_t xbd_max_request_segments
Definition: block.h:184
uint32_t xbd_max_request_indirectpages
Definition: block.h:186
uint32_t xbd_max_request_size
Definition: block.h:185
uint32_t xbd_max_requests
Definition: block.h:183
struct gnttab_free_callback xbd_callback
Definition: block.h:190
blkif_front_ring_t xbd_ring
Definition: block.h:188
int xbd_unit
Definition: block.h:177
struct mtx xbd_io_lock
Definition: block.h:199
xen_intr_handle_t xen_intr_handle
Definition: block.h:189
xbd_flag_t xbd_flags
Definition: block.h:178
bus_dma_tag_t xbd_io_dmat
Definition: block.h:192
int xbd_vdevice
Definition: block.h:180