FreeBSD kernel CXGBE device code
iw_cxgbe.h
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $FreeBSD$
34 */
35#ifndef __IW_CXGB4_H__
36#define __IW_CXGB4_H__
37
38#include <linux/list.h>
39#include <linux/spinlock.h>
40#include <linux/idr.h>
41#include <linux/completion.h>
42#include <linux/sched.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/wait.h>
46#include <linux/kref.h>
47#include <linux/timer.h>
48#include <linux/io.h>
49#include <sys/vmem.h>
50
51#include <asm/byteorder.h>
52
53#include <netinet/in.h>
54#include <netinet/toecore.h>
55
56#include <rdma/ib_verbs.h>
57#include <rdma/iw_cm.h>
58#include <rdma/uverbs_ioctl.h>
59
60#include "common/common.h"
61#include "common/t4_msg.h"
62#include "common/t4_regs.h"
63#include "common/t4_tcb.h"
64#include "t4_l2t.h"
65
66#define DRV_NAME "iw_cxgbe"
67#define MOD DRV_NAME ":"
68#define KTR_IW_CXGBE KTR_SPARE3
69
70extern int c4iw_debug;
71extern int use_dsgl;
72extern int inline_threshold;
73
74#define PDBG(fmt, args...) \
75do { \
76 if (c4iw_debug) \
77 printf(MOD fmt, ## args); \
78} while (0)
79
80#include "t4.h"
81
82static inline void *cplhdr(struct mbuf *m)
83{
84 return mtod(m, void*);
85}
86
87#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
88#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
89
90#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
91#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
92#define C4IW_MAX_PAGE_SIZE 0x8000000
93
96 u32 start; /* logical minimal id */
97 u32 last; /* hint for find */
99 spinlock_t lock;
100 unsigned long *table;
101};
102
107};
108
110 struct list_head entry;
112};
113
115 struct list_head qpids;
116 struct list_head cqids;
117 struct mutex lock;
118};
119
123};
124
125struct c4iw_stat {
130};
131
133 struct mutex lock;
135 struct c4iw_stat pd;
139};
140
150};
151
152struct c4iw_rdev {
153 struct adapter *adap;
155 unsigned long qpshift;
157 unsigned long cqshift;
160 vmem_t *rqt_arena;
161 vmem_t *pbl_arena;
166 unsigned long bar2_pa;
167 void __iomem *bar2_kva;
168 unsigned int bar2_len;
169 struct workqueue_struct *free_workq;
170};
171
172static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
173{
174 return rdev->flags & T4_FATAL_ERROR;
175}
176
177static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
178{
179 return (int)(rdev->adap->vres.stag.size >> 5);
180}
181
182static inline int t4_max_fr_depth(struct c4iw_rdev *rdev, bool use_dsgl)
183{
186 else
188}
189
190#define C4IW_WR_TO (60*HZ)
191
193 int ret;
195};
196
197static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
198{
199 wr_waitp->ret = 0;
200 init_completion(&wr_waitp->completion);
201}
202
203static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
204{
205 wr_waitp->ret = ret;
206 complete(&wr_waitp->completion);
207}
208
209static inline int
210c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
211 u32 hwtid, u32 qpid, struct socket *so, const char *func)
212{
213 struct adapter *sc = rdev->adap;
214 unsigned to = C4IW_WR_TO;
215 int ret;
216 int timedout = 0;
217 struct timeval t1, t2;
218
219 if (c4iw_fatal_error(rdev)) {
220 wr_waitp->ret = -EIO;
221 goto out;
222 }
223
224 getmicrotime(&t1);
225 do {
226 /* If waiting for reply in rdma_init()/rdma_fini() threads, then
227 * check if there are any connection errors.
228 */
229 if (so && so->so_error) {
230 wr_waitp->ret = -ECONNRESET;
231 CTR5(KTR_IW_CXGBE, "%s - Connection ERROR %u for sock %p"
232 "tid %u qpid %u", func,
233 so->so_error, so, hwtid, qpid);
234 break;
235 }
236
237 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
238 if (!ret) {
239 getmicrotime(&t2);
240 timevalsub(&t2, &t1);
241 printf("%s - Device %s not responding after %ld.%06ld "
242 "seconds - tid %u qpid %u\n", func,
243 device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec,
244 hwtid, qpid);
245 if (c4iw_fatal_error(rdev)) {
246 wr_waitp->ret = -EIO;
247 break;
248 }
249 to = to << 2;
250 timedout = 1;
251 }
252 } while (!ret);
253
254out:
255 if (timedout) {
256 getmicrotime(&t2);
257 timevalsub(&t2, &t1);
258 printf("%s - Device %s reply after %ld.%06ld seconds - "
259 "tid %u qpid %u\n", func, device_get_nameunit(sc->dev),
260 t2.tv_sec, t2.tv_usec, hwtid, qpid);
261 }
262 if (wr_waitp->ret)
263 CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc,
264 wr_waitp->ret, hwtid, qpid);
265 return (wr_waitp->ret);
266}
267
268struct c4iw_dev {
269 struct ib_device ibdev;
270 struct pci_dev pdev;
273 struct idr cqidr;
274 struct idr qpidr;
275 struct idr mmidr;
276 spinlock_t lock;
277 struct dentry *debugfs_root;
279};
280
281static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
282{
283 return container_of(ibdev, struct c4iw_dev, ibdev);
284}
285
286static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
287{
288 return container_of(rdev, struct c4iw_dev, rdev);
289}
290
291static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
292{
293 return idr_find(&rhp->cqidr, cqid);
294}
295
296static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
297{
298 return idr_find(&rhp->qpidr, qpid);
299}
300
301static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
302{
303 return idr_find(&rhp->mmidr, mmid);
304}
305
306static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
307 void *handle, u32 id, int lock)
308{
309 int ret;
310 int newid;
311
312 do {
313 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
314 return -ENOMEM;
315 if (lock)
316 spin_lock_irq(&rhp->lock);
317 ret = idr_get_new_above(idr, handle, id, &newid);
318 BUG_ON(!ret && newid != id);
319 if (lock)
320 spin_unlock_irq(&rhp->lock);
321 } while (ret == -EAGAIN);
322
323 return ret;
324}
325
326static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
327 void *handle, u32 id)
328{
329 return _insert_handle(rhp, idr, handle, id, 1);
330}
331
332static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
333 void *handle, u32 id)
334{
335 return _insert_handle(rhp, idr, handle, id, 0);
336}
337
338static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
339 u32 id, int lock)
340{
341 if (lock)
342 spin_lock_irq(&rhp->lock);
343 idr_remove(idr, id);
344 if (lock)
345 spin_unlock_irq(&rhp->lock);
346}
347
348static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
349{
350 _remove_handle(rhp, idr, id, 1);
351}
352
353static inline void remove_handle_nolock(struct c4iw_dev *rhp,
354 struct idr *idr, u32 id)
355{
356 _remove_handle(rhp, idr, id, 0);
357}
358
359extern int c4iw_max_read_depth;
360
361static inline int cur_max_read_depth(struct c4iw_dev *dev)
362{
364}
365
366struct c4iw_pd {
367 struct ib_pd ibpd;
369 struct c4iw_dev *rhp;
370};
371
372static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
373{
374 return container_of(ibpd, struct c4iw_pd, ibpd);
375}
376
393};
394
395struct c4iw_mr {
396 struct ib_mr ibmr;
397 struct ib_umem *umem;
398 struct c4iw_dev *rhp;
402 dma_addr_t mpl_addr;
405};
406
407static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
408{
409 return container_of(ibmr, struct c4iw_mr, ibmr);
410}
411
412struct c4iw_mw {
413 struct ib_mw ibmw;
414 struct c4iw_dev *rhp;
417};
418
419static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
420{
421 return container_of(ibmw, struct c4iw_mw, ibmw);
422}
423
424struct c4iw_cq {
425 struct ib_cq ibcq;
426 struct c4iw_dev *rhp;
427 struct t4_cq cq;
428 spinlock_t lock;
430 atomic_t refcnt;
431 wait_queue_head_t wait;
432};
433
434static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
435{
436 return container_of(ibcq, struct c4iw_cq, ibcq);
437}
438
447};
448
476};
477
479 struct ib_srq ibsrq;
480};
481
483 struct ib_ah ibah;
484};
485
486struct c4iw_qp {
487 struct ib_qp ibqp;
488 struct c4iw_dev *rhp;
489 struct c4iw_ep *ep;
491 struct t4_wq wq;
492 spinlock_t lock;
493 struct mutex mutex;
494 struct kref kref;
495 wait_queue_head_t wait;
496 struct timer_list timer;
498 struct work_struct free_work;
500};
501
502static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
503{
504 return container_of(ibqp, struct c4iw_qp, ibqp);
505}
506
508 struct ib_ucontext ibucontext;
511 spinlock_t mmap_lock;
512 struct list_head mmaps;
513};
514
515static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
516{
517 return container_of(c, struct c4iw_ucontext, ibucontext);
518}
519
521 struct list_head entry;
524 unsigned len;
525};
526
527static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
528 u32 key, unsigned len)
529{
530 struct list_head *pos, *nxt;
531 struct c4iw_mm_entry *mm;
532
533 spin_lock(&ucontext->mmap_lock);
534 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
535
536 mm = list_entry(pos, struct c4iw_mm_entry, entry);
537 if (mm->key == key && mm->len == len) {
538 list_del_init(&mm->entry);
539 spin_unlock(&ucontext->mmap_lock);
540 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
541 __func__, key, (unsigned long long) mm->addr,
542 mm->len);
543 return mm;
544 }
545 }
546 spin_unlock(&ucontext->mmap_lock);
547 return NULL;
548}
549
550static inline void insert_mmap(struct c4iw_ucontext *ucontext,
551 struct c4iw_mm_entry *mm)
552{
553 spin_lock(&ucontext->mmap_lock);
554 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
555 (unsigned long long) mm->addr, mm->len);
556 list_add_tail(&mm->entry, &ucontext->mmaps);
557 spin_unlock(&ucontext->mmap_lock);
558}
559
582
583int c4iw_modify_qp(struct c4iw_dev *rhp,
584 struct c4iw_qp *qhp,
585 enum c4iw_qp_attr_mask mask,
586 struct c4iw_qp_attributes *attrs,
587 int internal);
588
597
598/*
599 * IW_CXGBE event bits.
600 * These bits are used for handling all events for a particular 'ep' serially.
601 */
602#define C4IW_EVENT_SOCKET 0x0001
603#define C4IW_EVENT_TIMEOUT 0x0002
604#define C4IW_EVENT_TERM 0x0004
605
606static inline int c4iw_convert_state(enum ib_qp_state ib_state)
607{
608 switch (ib_state) {
609 case IB_QPS_RESET:
610 case IB_QPS_INIT:
611 return C4IW_QP_STATE_IDLE;
612 case IB_QPS_RTS:
613 return C4IW_QP_STATE_RTS;
614 case IB_QPS_SQD:
616 case IB_QPS_SQE:
618 case IB_QPS_ERR:
619 return C4IW_QP_STATE_ERROR;
620 default:
621 return -1;
622 }
623}
624
625static inline int to_ib_qp_state(int c4iw_qp_state)
626{
627 switch (c4iw_qp_state) {
629 return IB_QPS_INIT;
631 return IB_QPS_RTS;
633 return IB_QPS_SQD;
635 return IB_QPS_SQE;
637 return IB_QPS_ERR;
638 }
639 return IB_QPS_ERR;
640}
641
642#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
643
644static inline u32 c4iw_ib_to_tpt_access(int a)
645{
646 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
647 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
648 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
650}
651
652static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
653{
654 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
655 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
656}
657
662
663#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
664
665#define MPA_KEY_REQ "MPA ID Req Frame"
666#define MPA_KEY_REP "MPA ID Rep Frame"
667
668#define MPA_MAX_PRIVATE_DATA 256
669#define MPA_ENHANCED_RDMA_CONN 0x10
670#define MPA_REJECT 0x20
671#define MPA_CRC 0x40
672#define MPA_MARKERS 0x80
673#define MPA_FLAGS_MASK 0xE0
674
675#define MPA_V2_PEER2PEER_MODEL 0x8000
676#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
677#define MPA_V2_RDMA_WRITE_RTR 0x8000
678#define MPA_V2_RDMA_READ_RTR 0x4000
679#define MPA_V2_IRD_ORD_MASK 0x3FFF
680
681#define c4iw_put_ep(ep) { \
682 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
683 __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \
684 WARN_ON(atomic_read(&(ep)->kref.refcount) < 1); \
685 kref_put(&((ep)->kref), _c4iw_free_ep); \
686}
687
688#define c4iw_get_ep(ep) { \
689 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
690 __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \
691 kref_get(&((ep)->kref)); \
692}
693
694void _c4iw_free_ep(struct kref *kref);
695
697 u8 key[16];
702};
703
707};
708
714};
715
716#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
717
720 LAYER_DDP = 0x10,
721 LAYER_MPA = 0x20,
728 DDP_LLP = 0x03
730
742 RDMAP_UNSPECIFIED = 0xff
744
756 DDPU_INV_VERS = 0x06
758
765};
766
768 IDLE = 0,
780};
781
790};
791
820 CM_ID_DEREFED = 28
822
824 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
825 struct iw_cm_id *cm_id;
826 struct c4iw_qp *qp;
827 struct c4iw_dev *dev;
829 struct kref kref;
830 struct mutex mutex;
831 struct sockaddr_storage local_addr;
832 struct sockaddr_storage remote_addr;
834 unsigned long flags;
835 unsigned long history;
838 struct thread *thread;
839 struct socket *so;
841};
842
845 unsigned int stid;
847 struct list_head listen_ep_list; /* list of all listener ep's bound
848 to one port address */
849};
850
851struct c4iw_ep {
854 struct timer_list timer;
855 unsigned int atid;
859 struct l2t_entry *l2t;
860 struct dst_entry *dst;
863 unsigned int mpa_pkt_len;
876};
877
878static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
879{
880 return cm_id->provider_data;
881}
882
883static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
884{
885 return cm_id->provider_data;
886}
887
888static inline int compute_wscale(int win)
889{
890 int wscale = 0;
891
892 while (wscale < 14 && (65535<<wscale) < win)
893 wscale++;
894 return wscale;
895}
896
898void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
899int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
900 u32 reserved, u32 flags);
902
903typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
904
905int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
906 struct l2t_entry *l2t);
908void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
909int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
910int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
919int __init c4iw_cm_init(void);
920void __exit c4iw_cm_term(void);
922 struct c4iw_dev_ucontext *uctx);
924 struct c4iw_dev_ucontext *uctx);
925int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
926int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
927 const struct ib_send_wr **bad_wr);
928int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
929 const struct ib_recv_wr **bad_wr);
930int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
931int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
932int c4iw_destroy_listen(struct iw_cm_id *cm_id);
933int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
934int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
935void c4iw_qp_add_ref(struct ib_qp *qp);
936void c4iw_qp_rem_ref(struct ib_qp *qp);
937struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
938 u32 max_num_sg, struct ib_udata *udata);
939int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
940 int sg_nents, unsigned int *sg_offset);
941int c4iw_dealloc_mw(struct ib_mw *mw);
942struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
943 struct ib_udata *udata);
944struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
945 virt, int acc, struct ib_udata *udata);
946struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
947int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
948void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
949void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
950int c4iw_create_cq(struct ib_cq *ibcq,
951 const struct ib_cq_init_attr *attr,
952 struct ib_udata *udata);
953int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
954int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
955int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
956struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
957 struct ib_qp_init_attr *attrs,
958 struct ib_udata *udata);
959int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
960 int attr_mask, struct ib_udata *udata);
961int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
962 int attr_mask, struct ib_qp_init_attr *init_attr);
963struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
964u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
965void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
966u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
967void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
968int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
969void c4iw_flush_hw_cq(struct c4iw_cq *cq);
970void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
971int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
972int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
973int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
974int c4iw_flush_sq(struct c4iw_qp *qhp);
975int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
977int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
978u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
979void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
980 struct c4iw_dev_ucontext *uctx);
981u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
982void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
983 struct c4iw_dev_ucontext *uctx);
984void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
985#endif
struct ib_mr * c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int c4iw_dealloc_mw(struct ib_mw *mw)
int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr)
static int c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, u32 hwtid, u32 qpid, struct socket *so, const char *func)
Definition: iw_cxgbe.h:210
struct ib_mr * c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata)
static u32 c4iw_ib_to_tpt_access(int a)
Definition: iw_cxgbe.h:644
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
int inline_threshold
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev)
int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr)
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int use_dsgl
void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
static struct c4iw_mr * get_mhp(struct c4iw_dev *rhp, u32 mmid)
Definition: iw_cxgbe.h:301
struct ib_mr * c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata)
static void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
Definition: iw_cxgbe.h:203
static int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id)
Definition: iw_cxgbe.h:332
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
void c4iw_destroy_resource(struct c4iw_resource *rscp)
static struct c4iw_qp * to_c4iw_qp(struct ib_qp *ibqp)
Definition: iw_cxgbe.h:502
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
static int to_ib_qp_state(int c4iw_qp_state)
Definition: iw_cxgbe.h:625
static void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id, int lock)
Definition: iw_cxgbe.h:338
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
static int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id, int lock)
Definition: iw_cxgbe.h:306
c4iw_ep_flags
Definition: iw_cxgbe.h:782
@ STOP_MPA_TIMER
Definition: iw_cxgbe.h:789
@ PEER_ABORT_IN_PROGRESS
Definition: iw_cxgbe.h:783
@ RELEASE_RESOURCES
Definition: iw_cxgbe.h:785
@ QP_REFERENCED
Definition: iw_cxgbe.h:788
@ CLOSE_SENT
Definition: iw_cxgbe.h:786
@ TIMEOUT
Definition: iw_cxgbe.h:787
@ ABORT_REQ_IN_PROGRESS
Definition: iw_cxgbe.h:784
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
static struct c4iw_listen_ep * to_listen_ep(struct iw_cm_id *cm_id)
Definition: iw_cxgbe.h:883
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal)
static struct c4iw_ep * to_ep(struct iw_cm_id *cm_id)
Definition: iw_cxgbe.h:878
static int cur_max_read_depth(struct c4iw_dev *dev)
Definition: iw_cxgbe.h:361
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
void c4iw_id_table_free(struct c4iw_id_table *alloc)
struct ib_qp * c4iw_get_qp(struct ib_device *dev, int qpn)
static struct c4iw_mm_entry * remove_mmap(struct c4iw_ucontext *ucontext, u32 key, unsigned len)
Definition: iw_cxgbe.h:527
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
int c4iw_max_read_depth
static void insert_mmap(struct c4iw_ucontext *ucontext, struct c4iw_mm_entry *mm)
Definition: iw_cxgbe.h:550
static struct c4iw_qp * get_qhp(struct c4iw_dev *rhp, u32 qpid)
Definition: iw_cxgbe.h:296
static struct c4iw_mw * to_c4iw_mw(struct ib_mw *ibmw)
Definition: iw_cxgbe.h:419
void c4iw_flush_hw_cq(struct c4iw_cq *cq)
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
int c4iw_register_device(struct c4iw_dev *dev)
static struct c4iw_cq * get_chp(struct c4iw_dev *rhp, u32 cqid)
Definition: iw_cxgbe.h:291
c4iw_ddp_ecodes
Definition: iw_cxgbe.h:745
@ DDPT_STAG_NOT_ASSOC
Definition: iw_cxgbe.h:748
@ DDPU_INV_MO
Definition: iw_cxgbe.h:754
@ DDPT_INV_STAG
Definition: iw_cxgbe.h:746
@ DDPU_INV_MSN_NOBUF
Definition: iw_cxgbe.h:752
@ DDPU_INV_VERS
Definition: iw_cxgbe.h:756
@ DDPU_INV_MSN_RANGE
Definition: iw_cxgbe.h:753
@ DDPU_INV_QN
Definition: iw_cxgbe.h:751
@ DDPT_INV_VERS
Definition: iw_cxgbe.h:750
@ DDPU_MSG_TOOBIG
Definition: iw_cxgbe.h:755
@ DDPT_TO_WRAP
Definition: iw_cxgbe.h:749
@ DDPT_BASE_BOUNDS
Definition: iw_cxgbe.h:747
int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr)
int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *)
int(* c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m)
Definition: iw_cxgbe.h:903
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
c4iw_rdev_flags
Definition: iw_cxgbe.h:120
@ T4_FATAL_ERROR
Definition: iw_cxgbe.h:121
@ T4_STATUS_PAGE_DISABLED
Definition: iw_cxgbe.h:122
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
c4iw_rdma_ecodes
Definition: iw_cxgbe.h:731
@ RDMAP_INV_OPCODE
Definition: iw_cxgbe.h:738
@ RDMAP_TO_WRAP
Definition: iw_cxgbe.h:736
@ RDMAP_INV_STAG
Definition: iw_cxgbe.h:732
@ RDMAP_GLOBAL_CATA
Definition: iw_cxgbe.h:740
@ RDMAP_CANT_INV_STAG
Definition: iw_cxgbe.h:741
@ RDMAP_ACC_VIOL
Definition: iw_cxgbe.h:734
@ RDMAP_STAG_NOT_ASSOC
Definition: iw_cxgbe.h:735
@ RDMAP_INV_VERS
Definition: iw_cxgbe.h:737
@ RDMAP_UNSPECIFIED
Definition: iw_cxgbe.h:742
@ RDMAP_STREAM_CATA
Definition: iw_cxgbe.h:739
@ RDMAP_BASE_BOUNDS
Definition: iw_cxgbe.h:733
#define MPA_MAX_PRIVATE_DATA
Definition: iw_cxgbe.h:668
struct ib_qp * c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_udata *udata)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
c4iw_ep_state
Definition: iw_cxgbe.h:767
@ CONNECTING
Definition: iw_cxgbe.h:770
@ MPA_REP_SENT
Definition: iw_cxgbe.h:774
@ ABORTING
Definition: iw_cxgbe.h:776
@ DEAD
Definition: iw_cxgbe.h:779
@ LISTEN
Definition: iw_cxgbe.h:769
@ MORIBUND
Definition: iw_cxgbe.h:778
@ FPDU_MODE
Definition: iw_cxgbe.h:775
@ MPA_REQ_SENT
Definition: iw_cxgbe.h:772
@ MPA_REQ_WAIT
Definition: iw_cxgbe.h:771
@ CLOSING
Definition: iw_cxgbe.h:777
@ MPA_REQ_RCVD
Definition: iw_cxgbe.h:773
@ IDLE
Definition: iw_cxgbe.h:768
static int c4iw_convert_state(enum ib_qp_state ib_state)
Definition: iw_cxgbe.h:606
#define C4IW_WR_TO
Definition: iw_cxgbe.h:190
static void * cplhdr(struct mbuf *m)
Definition: iw_cxgbe.h:82
static struct c4iw_cq * to_c4iw_cq(struct ib_cq *ibcq)
Definition: iw_cxgbe.h:434
int __init c4iw_cm_init(void)
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m)
c4iw_mmid_state
Definition: iw_cxgbe.h:658
@ C4IW_STAG_STATE_VALID
Definition: iw_cxgbe.h:659
@ C4IW_STAG_STATE_INVALID
Definition: iw_cxgbe.h:660
static void remove_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, u32 id)
Definition: iw_cxgbe.h:353
int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev)
u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
static struct c4iw_mr * to_c4iw_mr(struct ib_mr *ibmr)
Definition: iw_cxgbe.h:407
static struct c4iw_dev * to_c4iw_dev(struct ib_device *ibdev)
Definition: iw_cxgbe.h:281
static int c4iw_fatal_error(struct c4iw_rdev *rdev)
Definition: iw_cxgbe.h:172
static int insert_handle(struct c4iw_dev *rhp, struct idr *idr, void *handle, u32 id)
Definition: iw_cxgbe.h:326
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx)
u16 c4iw_rqes_posted(struct c4iw_qp *qhp)
u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags)
c4iw_layers_types
Definition: iw_cxgbe.h:718
@ LAYER_RDMAP
Definition: iw_cxgbe.h:719
@ DDP_LOCAL_CATA
Definition: iw_cxgbe.h:725
@ DDP_TAGGED_ERR
Definition: iw_cxgbe.h:726
@ DDP_UNTAGGED_ERR
Definition: iw_cxgbe.h:727
@ RDMAP_REMOTE_OP
Definition: iw_cxgbe.h:724
@ LAYER_MPA
Definition: iw_cxgbe.h:721
@ RDMAP_LOCAL_CATA
Definition: iw_cxgbe.h:722
@ LAYER_DDP
Definition: iw_cxgbe.h:720
@ RDMAP_REMOTE_PROT
Definition: iw_cxgbe.h:723
@ DDP_LLP
Definition: iw_cxgbe.h:728
void c4iw_qp_rem_ref(struct ib_qp *qp)
static int compute_wscale(int win)
Definition: iw_cxgbe.h:888
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
c4iw_ep_history
Definition: iw_cxgbe.h:792
@ ACT_OFLD_CONN
Definition: iw_cxgbe.h:794
@ ULP_REJECT
Definition: iw_cxgbe.h:803
@ DISCONN_UPCALL
Definition: iw_cxgbe.h:809
@ PEER_CLOSE
Definition: iw_cxgbe.h:806
@ CLOSE_CON_RPL
Definition: iw_cxgbe.h:815
@ CONNREQ_UPCALL
Definition: iw_cxgbe.h:807
@ PEER_ABORT
Definition: iw_cxgbe.h:805
@ ULP_ACCEPT
Definition: iw_cxgbe.h:802
@ EP_DISC_CLOSE
Definition: iw_cxgbe.h:810
@ ACT_OPEN_RPL
Definition: iw_cxgbe.h:795
@ CM_ID_DEREFED
Definition: iw_cxgbe.h:820
@ ABORT_CONN
Definition: iw_cxgbe.h:808
@ EP_DISC_ABORT
Definition: iw_cxgbe.h:811
@ ESTAB_UPCALL
Definition: iw_cxgbe.h:800
@ ACT_ESTAB
Definition: iw_cxgbe.h:796
@ PASS_ESTAB
Definition: iw_cxgbe.h:798
@ ACT_RETRY_NOMEM
Definition: iw_cxgbe.h:813
@ EP_DISC_FAIL
Definition: iw_cxgbe.h:816
@ ACT_OPEN_REQ
Definition: iw_cxgbe.h:793
@ PASS_ACCEPT_REQ
Definition: iw_cxgbe.h:797
@ CONN_RPL_UPCALL
Definition: iw_cxgbe.h:812
@ QP_REFED
Definition: iw_cxgbe.h:817
@ QP_DEREFED
Definition: iw_cxgbe.h:818
@ ABORT_UPCALL
Definition: iw_cxgbe.h:799
@ CM_ID_REFED
Definition: iw_cxgbe.h:819
@ CLOSE_UPCALL
Definition: iw_cxgbe.h:801
@ ACT_RETRY_INUSE
Definition: iw_cxgbe.h:814
@ TIMEDOUT
Definition: iw_cxgbe.h:804
int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata)
static void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
Definition: iw_cxgbe.h:348
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
c4iw_qp_state
Definition: iw_cxgbe.h:589
@ C4IW_QP_STATE_TOT
Definition: iw_cxgbe.h:595
@ C4IW_QP_STATE_TERMINATE
Definition: iw_cxgbe.h:593
@ C4IW_QP_STATE_ERROR
Definition: iw_cxgbe.h:592
@ C4IW_QP_STATE_CLOSING
Definition: iw_cxgbe.h:594
@ C4IW_QP_STATE_IDLE
Definition: iw_cxgbe.h:590
@ C4IW_QP_STATE_RTS
Definition: iw_cxgbe.h:591
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx)
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
static int t4_max_fr_depth(struct c4iw_rdev *rdev, bool use_dsgl)
Definition: iw_cxgbe.h:182
struct ib_mw * c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata)
void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata)
static int c4iw_num_stags(struct c4iw_rdev *rdev)
Definition: iw_cxgbe.h:177
c4iw_mpa_ecodes
Definition: iw_cxgbe.h:759
@ MPA_CRC_ERR
Definition: iw_cxgbe.h:760
@ MPA_LOCAL_CATA
Definition: iw_cxgbe.h:762
@ MPA_INSUFF_IRD
Definition: iw_cxgbe.h:763
@ MPA_NOMATCH_RTR
Definition: iw_cxgbe.h:764
@ MPA_MARKER_ERR
Definition: iw_cxgbe.h:761
static struct c4iw_pd * to_c4iw_pd(struct ib_pd *ibpd)
Definition: iw_cxgbe.h:372
void c4iw_unregister_device(struct c4iw_dev *dev)
c4iw_qp_attr_mask
Definition: iw_cxgbe.h:560
@ C4IW_QP_ATTR_ENABLE_RDMA_BIND
Definition: iw_cxgbe.h:566
@ C4IW_QP_ATTR_MAX_IRD
Definition: iw_cxgbe.h:568
@ C4IW_QP_ATTR_ENABLE_RDMA_READ
Definition: iw_cxgbe.h:564
@ C4IW_QP_ATTR_MPA_ATTR
Definition: iw_cxgbe.h:571
@ C4IW_QP_ATTR_RQ_DB
Definition: iw_cxgbe.h:563
@ C4IW_QP_ATTR_NEXT_STATE
Definition: iw_cxgbe.h:561
@ C4IW_QP_ATTR_MAX_ORD
Definition: iw_cxgbe.h:567
@ C4IW_QP_ATTR_SQ_DB
Definition: iw_cxgbe.h:562
@ C4IW_QP_ATTR_ENABLE_RDMA_WRITE
Definition: iw_cxgbe.h:565
@ C4IW_QP_ATTR_STREAM_MSG_BUFFER
Definition: iw_cxgbe.h:570
@ C4IW_QP_ATTR_VALID_MODIFY
Definition: iw_cxgbe.h:573
@ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE
Definition: iw_cxgbe.h:572
@ C4IW_QP_ATTR_LLP_STREAM_HANDLE
Definition: iw_cxgbe.h:569
int c4iw_flush_sq(struct c4iw_qp *qhp)
static u32 c4iw_ib_to_tpt_bind_access(int acc)
Definition: iw_cxgbe.h:652
static void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
Definition: iw_cxgbe.h:197
void __exit c4iw_cm_term(void)
static struct c4iw_ucontext * to_c4iw_ucontext(struct ib_ucontext *c)
Definition: iw_cxgbe.h:515
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset)
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
int c4iw_debug
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
void c4iw_qp_add_ref(struct ib_qp *qp)
#define KTR_IW_CXGBE
Definition: iw_cxgbe.h:68
static struct c4iw_dev * rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
Definition: iw_cxgbe.h:286
void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
void _c4iw_free_ep(struct kref *kref)
uint64_t u64
Definition: osdep.h:62
uint8_t u8
Definition: osdep.h:59
uint16_t __be16
Definition: osdep.h:68
uint16_t u16
Definition: osdep.h:60
#define container_of(p, s, f)
Definition: osdep.h:95
uint32_t u32
Definition: osdep.h:61
bool ulptx_memwrite_dsgl
Definition: common.h:406
bool dev_512sgl_mr
Definition: common.h:408
unsigned int max_ordird_qp
Definition: common.h:401
struct adapter_params params
Definition: adapter.h:958
struct t4_virt_res vres
Definition: adapter.h:960
device_t dev
Definition: adapter.h:866
spinlock_t lock
Definition: iw_cxgbe.h:428
wait_queue_head_t wait
Definition: iw_cxgbe.h:431
atomic_t refcnt
Definition: iw_cxgbe.h:430
struct ib_cq ibcq
Definition: iw_cxgbe.h:425
struct t4_cq cq
Definition: iw_cxgbe.h:427
struct c4iw_dev * rhp
Definition: iw_cxgbe.h:426
spinlock_t comp_handler_lock
Definition: iw_cxgbe.h:429
struct mutex lock
Definition: iw_cxgbe.h:117
struct list_head qpids
Definition: iw_cxgbe.h:115
struct list_head cqids
Definition: iw_cxgbe.h:116
spinlock_t lock
Definition: iw_cxgbe.h:276
struct pci_dev pdev
Definition: iw_cxgbe.h:270
struct idr qpidr
Definition: iw_cxgbe.h:274
struct dentry * debugfs_root
Definition: iw_cxgbe.h:277
struct c4iw_rdev rdev
Definition: iw_cxgbe.h:271
u32 avail_ird
Definition: iw_cxgbe.h:278
struct ib_device ibdev
Definition: iw_cxgbe.h:269
struct idr cqidr
Definition: iw_cxgbe.h:273
u32 device_cap_flags
Definition: iw_cxgbe.h:272
struct idr mmidr
Definition: iw_cxgbe.h:275
struct iw_cm_id * cm_id
Definition: iw_cxgbe.h:825
unsigned long flags
Definition: iw_cxgbe.h:834
unsigned long history
Definition: iw_cxgbe.h:835
struct socket * so
Definition: iw_cxgbe.h:839
TAILQ_ENTRY(c4iw_ep_common) entry
struct sockaddr_storage local_addr
Definition: iw_cxgbe.h:831
struct kref kref
Definition: iw_cxgbe.h:829
struct mutex mutex
Definition: iw_cxgbe.h:830
struct c4iw_wr_wait wr_wait
Definition: iw_cxgbe.h:833
struct c4iw_qp * qp
Definition: iw_cxgbe.h:826
struct c4iw_dev * dev
Definition: iw_cxgbe.h:827
struct sockaddr_storage remote_addr
Definition: iw_cxgbe.h:832
struct thread * thread
Definition: iw_cxgbe.h:838
enum c4iw_ep_state state
Definition: iw_cxgbe.h:828
struct l2t_entry * l2t
Definition: iw_cxgbe.h:859
u32 hwtid
Definition: iw_cxgbe.h:856
u32 snd_seq
Definition: iw_cxgbe.h:857
struct c4iw_mpa_attributes mpa_attr
Definition: iw_cxgbe.h:861
u8 mpa_pkt[sizeof(struct mpa_message)+MPA_MAX_PRIVATE_DATA]
Definition: iw_cxgbe.h:862
struct c4iw_listen_ep * parent_ep
Definition: iw_cxgbe.h:853
u16 txq_idx
Definition: iw_cxgbe.h:871
u32 mtu
Definition: iw_cxgbe.h:867
u8 retry_with_mpa_v1
Definition: iw_cxgbe.h:874
u16 rss_qid
Definition: iw_cxgbe.h:870
u32 ird
Definition: iw_cxgbe.h:864
unsigned int mpa_pkt_len
Definition: iw_cxgbe.h:863
struct dst_entry * dst
Definition: iw_cxgbe.h:860
u16 plen
Definition: iw_cxgbe.h:869
unsigned int atid
Definition: iw_cxgbe.h:855
struct timer_list timer
Definition: iw_cxgbe.h:854
u32 tx_chan
Definition: iw_cxgbe.h:866
u32 ord
Definition: iw_cxgbe.h:865
u16 ctrlq_idx
Definition: iw_cxgbe.h:872
struct c4iw_ep_common com
Definition: iw_cxgbe.h:852
u16 mss
Definition: iw_cxgbe.h:868
u8 tried_with_mpa_v1
Definition: iw_cxgbe.h:875
u8 tos
Definition: iw_cxgbe.h:873
u32 rcv_seq
Definition: iw_cxgbe.h:858
int t4_max_qp_depth
Definition: iw_cxgbe.h:147
int t4_stat_len
Definition: iw_cxgbe.h:149
int t4_max_cq_depth
Definition: iw_cxgbe.h:148
int t4_max_rq_size
Definition: iw_cxgbe.h:145
int t4_eq_status_entries
Definition: iw_cxgbe.h:142
int t4_max_sq_size
Definition: iw_cxgbe.h:146
int t4_max_eq_size
Definition: iw_cxgbe.h:143
int t4_max_iq_size
Definition: iw_cxgbe.h:144
struct ib_ah ibah
Definition: iw_cxgbe.h:483
struct ib_srq ibsrq
Definition: iw_cxgbe.h:479
unsigned long * table
Definition: iw_cxgbe.h:100
spinlock_t lock
Definition: iw_cxgbe.h:99
struct c4iw_ep_common com
Definition: iw_cxgbe.h:844
struct list_head listen_ep_list
Definition: iw_cxgbe.h:847
unsigned int stid
Definition: iw_cxgbe.h:845
Definition: iw_cxgbe.h:520
struct list_head entry
Definition: iw_cxgbe.h:521
u64 addr
Definition: iw_cxgbe.h:522
unsigned len
Definition: iw_cxgbe.h:524
u32 key
Definition: iw_cxgbe.h:523
struct c4iw_dev * rhp
Definition: iw_cxgbe.h:398
u64 kva
Definition: iw_cxgbe.h:399
struct ib_mr ibmr
Definition: iw_cxgbe.h:396
struct ib_umem * umem
Definition: iw_cxgbe.h:397
struct tpt_attributes attr
Definition: iw_cxgbe.h:400
u64 * mpl
Definition: iw_cxgbe.h:401
dma_addr_t mpl_addr
Definition: iw_cxgbe.h:402
u32 mpl_len
Definition: iw_cxgbe.h:404
u32 max_mpl_len
Definition: iw_cxgbe.h:403
struct tpt_attributes attr
Definition: iw_cxgbe.h:416
struct c4iw_dev * rhp
Definition: iw_cxgbe.h:414
struct ib_mw ibmw
Definition: iw_cxgbe.h:413
u64 kva
Definition: iw_cxgbe.h:415
struct ib_pd ibpd
Definition: iw_cxgbe.h:367
struct c4iw_dev * rhp
Definition: iw_cxgbe.h:369
u32 pdid
Definition: iw_cxgbe.h:368
struct list_head entry
Definition: iw_cxgbe.h:110
u32 sq_max_sges_rdma_write
Definition: iw_cxgbe.h:455
struct c4iw_mpa_attributes mpa_attr
Definition: iw_cxgbe.h:469
char terminate_buffer[52]
Definition: iw_cxgbe.h:466
struct c4iw_ep * llp_stream_handle
Definition: iw_cxgbe.h:470
struct c4iw_dev * rhp
Definition: iw_cxgbe.h:488
spinlock_t lock
Definition: iw_cxgbe.h:492
struct t4_wq wq
Definition: iw_cxgbe.h:491
struct mutex mutex
Definition: iw_cxgbe.h:493
struct work_struct free_work
Definition: iw_cxgbe.h:498
struct c4iw_qp_attributes attr
Definition: iw_cxgbe.h:490
struct ib_qp ibqp
Definition: iw_cxgbe.h:487
struct c4iw_ucontext * ucontext
Definition: iw_cxgbe.h:499
int sq_sig_all
Definition: iw_cxgbe.h:497
struct kref kref
Definition: iw_cxgbe.h:494
wait_queue_head_t wait
Definition: iw_cxgbe.h:495
struct c4iw_ep * ep
Definition: iw_cxgbe.h:489
struct timer_list timer
Definition: iw_cxgbe.h:496
struct c4iw_resource resource
Definition: iw_cxgbe.h:154
unsigned long cqshift
Definition: iw_cxgbe.h:157
struct t4_dev_status_page * status_page
Definition: iw_cxgbe.h:165
unsigned long qpshift
Definition: iw_cxgbe.h:155
unsigned long bar2_pa
Definition: iw_cxgbe.h:166
vmem_t * rqt_arena
Definition: iw_cxgbe.h:160
u32 cqmask
Definition: iw_cxgbe.h:158
struct c4iw_stats stats
Definition: iw_cxgbe.h:163
struct adapter * adap
Definition: iw_cxgbe.h:153
struct c4iw_hw_queue hw_queue
Definition: iw_cxgbe.h:164
unsigned int bar2_len
Definition: iw_cxgbe.h:168
u32 flags
Definition: iw_cxgbe.h:162
u32 qpmask
Definition: iw_cxgbe.h:156
vmem_t * pbl_arena
Definition: iw_cxgbe.h:161
struct workqueue_struct * free_workq
Definition: iw_cxgbe.h:169
struct c4iw_dev_ucontext uctx
Definition: iw_cxgbe.h:159
void __iomem * bar2_kva
Definition: iw_cxgbe.h:167
struct c4iw_id_table tpt_table
Definition: iw_cxgbe.h:104
struct c4iw_id_table qid_table
Definition: iw_cxgbe.h:105
struct c4iw_id_table pdid_table
Definition: iw_cxgbe.h:106
u64 cur
Definition: iw_cxgbe.h:127
u64 fail
Definition: iw_cxgbe.h:129
u64 max
Definition: iw_cxgbe.h:128
u64 total
Definition: iw_cxgbe.h:126
struct c4iw_stat pbl
Definition: iw_cxgbe.h:137
struct c4iw_stat pd
Definition: iw_cxgbe.h:135
struct c4iw_stat stag
Definition: iw_cxgbe.h:136
struct c4iw_stat qid
Definition: iw_cxgbe.h:134
struct mutex lock
Definition: iw_cxgbe.h:133
struct c4iw_stat rqt
Definition: iw_cxgbe.h:138
struct c4iw_dev_ucontext uctx
Definition: iw_cxgbe.h:509
struct list_head mmaps
Definition: iw_cxgbe.h:512
struct ib_ucontext ibucontext
Definition: iw_cxgbe.h:508
spinlock_t mmap_lock
Definition: iw_cxgbe.h:511
struct completion completion
Definition: iw_cxgbe.h:194
Definition: t4_l2t.h:63
u8 key[16]
Definition: iw_cxgbe.h:697
u8 private_data[0]
Definition: iw_cxgbe.h:701
__be16 private_data_size
Definition: iw_cxgbe.h:700
Definition: t4.h:546
Definition: t4.h:188
u_int size
Definition: offload.h:187
struct t4_range stag
Definition: offload.h:193
Definition: t4.h:362
__be16 hdrct_rsvd
Definition: iw_cxgbe.h:712
u32 remote_invaliate_disable
Definition: iw_cxgbe.h:389
u32 mw_bind_enable
Definition: iw_cxgbe.h:391
enum fw_ri_mem_perms perms
Definition: iw_cxgbe.h:380
#define T4_MAX_FR_FW_DSGL_DEPTH
Definition: t4.h:107
#define T4_MAX_FR_IMMD_DEPTH
Definition: t4.h:103
#define T4_MAX_FR_DSGL_DEPTH
Definition: t4.h:105
fw_ri_mem_perms
@ FW_RI_MEM_ACCESS_REM_WRITE
@ FW_RI_MEM_ACCESS_REM_READ
@ FW_RI_MEM_ACCESS_LOCAL_WRITE
@ FW_RI_MEM_ACCESS_LOCAL_READ