33#ifndef __XEN_BLKFRONT_BLOCK_H__
34#define __XEN_BLKFRONT_BLOCK_H__
47#define XBD_SEGS_TO_SIZE(segs) \
48 (((segs) - 1) * PAGE_SIZE)
60#define XBD_SIZE_TO_SEGS(size) \
61 ((size / PAGE_SIZE) + 1)
68#define XBD_MAX_RING_PAGES 32
74#define XBD_MAX_REQUESTS \
75 __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
81#define XBD_MAX_SEGMENTS_PER_PAGE \
82 (PAGE_SIZE / sizeof(struct blkif_request_segment))
88#define XBD_MAX_INDIRECT_SEGMENTS \
89 (BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * XBD_MAX_SEGMENTS_PER_PAGE)
95#define XBD_INDIRECT_SEGS_TO_PAGES(segs) \
96 ((segs + XBD_MAX_SEGMENTS_PER_PAGE - 1) / XBD_MAX_SEGMENTS_PER_PAGE)
205 uint16_t vdisk_info,
unsigned long sector_size,
206 unsigned long phys_sector_size);
225static inline uint32_t
237 TAILQ_INIT(&cmq->q_tailq);
246 (
"%s: Commands cannot access the bio queue.", __func__));
248 panic(
"%s: command %p is already on queue %d.",
250 TAILQ_INSERT_TAIL(&cm->
cm_sc->
xbd_cm_q[index].q_tailq, cm, cm_link);
260 (
"%s: Commands cannot access the bio queue.", __func__));
262 panic(
"%s: command %p is already on queue %d.",
264 TAILQ_INSERT_HEAD(&cm->
cm_sc->
xbd_cm_q[index].q_tailq, cm, cm_link);
276 (
"%s: Commands cannot access the bio queue.", __func__));
278 if ((cm = TAILQ_FIRST(&sc->
xbd_cm_q[index].q_tailq)) != NULL) {
280 panic(
"%s: command %p is on queue %d, "
281 "not specified queue %d",
286 TAILQ_REMOVE(&sc->
xbd_cm_q[index].q_tailq, cm, cm_link);
302 (
"%s: Commands cannot access the bio queue.", __func__));
304 if (index != expected_index) {
305 panic(
"%s: command %p is on queue %d, not specified queue %d",
306 __func__, cm, index, expected_index);
308 TAILQ_REMOVE(&cm->
cm_sc->
xbd_cm_q[index].q_tailq, cm, cm_link);
323 bioq_insert_tail(&sc->
xbd_bioq, bp);
330 bioq_insert_head(&sc->
xbd_bioq, bp);
334static inline struct bio *
339 if ((bp = bioq_first(&sc->
xbd_bioq)) != NULL) {
static void xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
static struct xbd_command * xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
static void xbd_initqs(struct xbd_softc *sc)
#define XBD_MAX_RING_PAGES
static void xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
static void xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
struct xbd_cm_q xbd_cm_q_t
static void xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
static struct bio * xbd_dequeue_bio(struct xbd_softc *sc)
static void xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
void xbd_cbcf_t(struct xbd_command *)
static void xbd_initq_bio(struct xbd_softc *sc)
static void xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
static void xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
static void xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
static uint32_t xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device, uint16_t vdisk_info, unsigned long sector_size, unsigned long phys_sector_size)
TAILQ_HEAD(, xbd_command) q_tailq
blkif_sector_t cm_sector_number
grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]
TAILQ_ENTRY(xbd_command) cm_link
void * cm_indirectionpages
struct xbd_command * xbd_shadow
xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT]
grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]
struct bio_queue_head xbd_bioq
uint32_t xbd_max_request_segments
uint32_t xbd_max_request_indirectpages
uint32_t xbd_max_request_size
uint32_t xbd_max_requests
struct gnttab_free_callback xbd_callback
blkif_front_ring_t xbd_ring
xen_intr_handle_t xen_intr_handle
bus_dma_tag_t xbd_io_dmat