FreeBSD kernel IXGBE device code
if_sriov.c
Go to the documentation of this file.
1/******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "ixgbe.h"
36#include "ixgbe_sriov.h"
37
38#ifdef PCI_IOV
39
40#include <sys/ktr.h>
41
42MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
43
44/************************************************************************
45 * ixgbe_pci_iov_detach
46 ************************************************************************/
47int
48ixgbe_pci_iov_detach(device_t dev)
49{
50 return pci_iov_detach(dev);
51}
52
53/************************************************************************
54 * ixgbe_define_iov_schemas
55 ************************************************************************/
56void
57ixgbe_define_iov_schemas(device_t dev, int *error)
58{
59 nvlist_t *pf_schema, *vf_schema;
60
61 pf_schema = pci_iov_schema_alloc_node();
62 vf_schema = pci_iov_schema_alloc_node();
63 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
64 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
65 IOV_SCHEMA_HASDEFAULT, true);
66 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
67 IOV_SCHEMA_HASDEFAULT, false);
68 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
69 IOV_SCHEMA_HASDEFAULT, false);
70 *error = pci_iov_attach(dev, pf_schema, vf_schema);
71 if (*error != 0) {
72 device_printf(dev,
73 "Error %d setting up SR-IOV\n", *error);
74 }
75} /* ixgbe_define_iov_schemas */
76
77/************************************************************************
78 * ixgbe_align_all_queue_indices
79 ************************************************************************/
80inline void
82{
83 int i;
84 int index;
85
86 for (i = 0; i < sc->num_rx_queues; i++) {
87 index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
88 sc->rx_queues[i].rxr.me = index;
89 }
90
91 for (i = 0; i < sc->num_tx_queues; i++) {
92 index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
93 sc->tx_queues[i].txr.me = index;
94 }
95}
96
97/* Support functions for SR-IOV/VF management */
98static inline void
99ixgbe_send_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
100{
101 if (vf->flags & IXGBE_VF_CTS)
103
104 sc->hw.mbx.ops.write(&sc->hw, &msg, 1, vf->pool);
105}
106
107static inline void
108ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
109{
110 msg &= IXGBE_VT_MSG_MASK;
111 ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_ACK);
112}
113
114static inline void
115ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
116{
117 msg &= IXGBE_VT_MSG_MASK;
118 ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_NACK);
119}
120
121static inline void
122ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
123{
124 if (!(vf->flags & IXGBE_VF_CTS))
125 ixgbe_send_vf_nack(sc, vf, 0);
126}
127
128static inline boolean_t
129ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
130{
131 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
132}
133
134static inline int
135ixgbe_vf_queues(int mode)
136{
137 switch (mode) {
138 case IXGBE_64_VM:
139 return (2);
140 case IXGBE_32_VM:
141 return (4);
142 case IXGBE_NO_VM:
143 default:
144 return (0);
145 }
146}
147
148inline int
149ixgbe_vf_que_index(int mode, int vfnum, int num)
150{
151 return ((vfnum * ixgbe_vf_queues(mode)) + num);
152}
153
154static inline void
155ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
156{
157 if (sc->max_frame_size < max_frame)
158 sc->max_frame_size = max_frame;
159}
160
161inline u32
162ixgbe_get_mrqc(int iov_mode)
163{
164 u32 mrqc;
165
166 switch (iov_mode) {
167 case IXGBE_64_VM:
169 break;
170 case IXGBE_32_VM:
172 break;
173 case IXGBE_NO_VM:
174 mrqc = 0;
175 break;
176 default:
177 panic("Unexpected SR-IOV mode %d", iov_mode);
178 }
179
180 return mrqc;
181}
182
183
184inline u32
185ixgbe_get_mtqc(int iov_mode)
186{
187 uint32_t mtqc;
188
189 switch (iov_mode) {
190 case IXGBE_64_VM:
192 break;
193 case IXGBE_32_VM:
195 break;
196 case IXGBE_NO_VM:
197 mtqc = IXGBE_MTQC_64Q_1PB;
198 break;
199 default:
200 panic("Unexpected SR-IOV mode %d", iov_mode);
201 }
202
203 return mtqc;
204}
205
206void
208{
209 struct ixgbe_vf *vf;
210
211 for (int i = 0; i < sc->num_vfs; i++) {
212 vf = &sc->vfs[i];
213 if (vf->flags & IXGBE_VF_ACTIVE)
214 ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
215 }
216} /* ixgbe_ping_all_vfs */
217
218
219static void
220ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
221 uint16_t tag)
222{
223 struct ixgbe_hw *hw;
224 uint32_t vmolr, vmvir;
225
226 hw = &sc->hw;
227
228 vf->vlan_tag = tag;
229
230 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
231
232 /* Do not receive packets that pass inexact filters. */
234
235 /* Disable Multicast Promicuous Mode. */
236 vmolr &= ~IXGBE_VMOLR_MPE;
237
238 /* Accept broadcasts. */
239 vmolr |= IXGBE_VMOLR_BAM;
240
241 if (tag == 0) {
242 /* Accept non-vlan tagged traffic. */
243 vmolr |= IXGBE_VMOLR_AUPE;
244
245 /* Allow VM to tag outgoing traffic; no default tag. */
246 vmvir = 0;
247 } else {
248 /* Require vlan-tagged traffic. */
249 vmolr &= ~IXGBE_VMOLR_AUPE;
250
251 /* Tag all traffic with provided vlan tag. */
252 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
253 }
254 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
255 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
256} /* ixgbe_vf_set_default_vlan */
257
258
259static boolean_t
260ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
261{
262
263 /*
264 * Frame size compatibility between PF and VF is only a problem on
265 * 82599-based cards. X540 and later support any combination of jumbo
266 * frames on PFs and VFs.
267 */
268 if (sc->hw.mac.type != ixgbe_mac_82599EB)
269 return (true);
270
271 switch (vf->api_ver) {
272 case IXGBE_API_VER_1_0:
273 case IXGBE_API_VER_UNKNOWN:
274 /*
275 * On legacy (1.0 and older) VF versions, we don't support jumbo
276 * frames on either the PF or the VF.
277 */
278 if (sc->max_frame_size > ETHER_MAX_LEN ||
279 vf->maximum_frame_size > ETHER_MAX_LEN)
280 return (false);
281
282 return (true);
283
284 break;
285 case IXGBE_API_VER_1_1:
286 default:
287 /*
288 * 1.1 or later VF versions always work if they aren't using
289 * jumbo frames.
290 */
291 if (vf->maximum_frame_size <= ETHER_MAX_LEN)
292 return (true);
293
294 /*
295 * Jumbo frames only work with VFs if the PF is also using jumbo
296 * frames.
297 */
298 if (sc->max_frame_size <= ETHER_MAX_LEN)
299 return (true);
300
301 return (false);
302 }
303} /* ixgbe_vf_frame_size_compatible */
304
305
306static void
307ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
308{
309 ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
310
311 // XXX clear multicast addresses
312
313 ixgbe_clear_rar(&sc->hw, vf->rar_index);
314
315 vf->api_ver = IXGBE_API_VER_UNKNOWN;
316} /* ixgbe_process_vf_reset */
317
318
319static void
320ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
321{
322 struct ixgbe_hw *hw;
323 uint32_t vf_index, vfte;
324
325 hw = &sc->hw;
326
327 vf_index = IXGBE_VF_INDEX(vf->pool);
328 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
329 vfte |= IXGBE_VF_BIT(vf->pool);
330 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
331} /* ixgbe_vf_enable_transmit */
332
333
334static void
335ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
336{
337 struct ixgbe_hw *hw;
338 uint32_t vf_index, vfre;
339
340 hw = &sc->hw;
341
342 vf_index = IXGBE_VF_INDEX(vf->pool);
343 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
344 if (ixgbe_vf_frame_size_compatible(sc, vf))
345 vfre |= IXGBE_VF_BIT(vf->pool);
346 else
347 vfre &= ~IXGBE_VF_BIT(vf->pool);
348 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
349} /* ixgbe_vf_enable_receive */
350
351
352static void
353ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
354{
355 struct ixgbe_hw *hw;
356 uint32_t ack;
357 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
358
359 hw = &sc->hw;
360
361 ixgbe_process_vf_reset(sc, vf);
362
363 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
364 ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
365 vf->pool, true);
367 } else
369
370 ixgbe_vf_enable_transmit(sc, vf);
371 ixgbe_vf_enable_receive(sc, vf);
372
373 vf->flags |= IXGBE_VF_CTS;
374
375 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
376 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
377 resp[3] = hw->mac.mc_filter_type;
378 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
379} /* ixgbe_vf_reset_msg */
380
381
382static void
383ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
384{
385 uint8_t *mac;
386
387 mac = (uint8_t*)&msg[1];
388
389 /* Check that the VF has permission to change the MAC address. */
390 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
391 ixgbe_send_vf_nack(sc, vf, msg[0]);
392 return;
393 }
394
395 if (ixgbe_validate_mac_addr(mac) != 0) {
396 ixgbe_send_vf_nack(sc, vf, msg[0]);
397 return;
398 }
399
400 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
401
402 ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
403 true);
404
405 ixgbe_send_vf_ack(sc, vf, msg[0]);
406} /* ixgbe_vf_set_mac */
407
408
409/*
410 * VF multicast addresses are set by using the appropriate bit in
411 * 1 of 128 32 bit addresses (4096 possible).
412 */
413static void
414ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
415{
416 u16 *list = (u16*)&msg[1];
417 int entries;
418 u32 vmolr, vec_bit, vec_reg, mta_reg;
419
420 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
421 entries = min(entries, IXGBE_MAX_VF_MC);
422
423 vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
424
425 vf->num_mc_hashes = entries;
426
427 /* Set the appropriate MTA bit */
428 for (int i = 0; i < entries; i++) {
429 vf->mc_hash[i] = list[i];
430 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
431 vec_bit = vf->mc_hash[i] & 0x1F;
432 mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
433 mta_reg |= (1 << vec_bit);
434 IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
435 }
436
437 vmolr |= IXGBE_VMOLR_ROMPE;
438 IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
439 ixgbe_send_vf_ack(sc, vf, msg[0]);
440} /* ixgbe_vf_set_mc_addr */
441
442
443static void
444ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
445{
446 struct ixgbe_hw *hw;
447 int enable;
448 uint16_t tag;
449
450 hw = &sc->hw;
451 enable = IXGBE_VT_MSGINFO(msg[0]);
452 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
453
454 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
455 ixgbe_send_vf_nack(sc, vf, msg[0]);
456 return;
457 }
458
459 /* It is illegal to enable vlan tag 0. */
460 if (tag == 0 && enable != 0) {
461 ixgbe_send_vf_nack(sc, vf, msg[0]);
462 return;
463 }
464
465 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
466 ixgbe_send_vf_ack(sc, vf, msg[0]);
467} /* ixgbe_vf_set_vlan */
468
469
470static void
471ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
472{
473 struct ixgbe_hw *hw;
474 uint32_t vf_max_size, pf_max_size, mhadd;
475
476 hw = &sc->hw;
477 vf_max_size = msg[1];
478
479 if (vf_max_size < ETHER_CRC_LEN) {
480 /* We intentionally ACK invalid LPE requests. */
481 ixgbe_send_vf_ack(sc, vf, msg[0]);
482 return;
483 }
484
485 vf_max_size -= ETHER_CRC_LEN;
486
487 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
488 /* We intentionally ACK invalid LPE requests. */
489 ixgbe_send_vf_ack(sc, vf, msg[0]);
490 return;
491 }
492
493 vf->maximum_frame_size = vf_max_size;
494 ixgbe_update_max_frame(sc, vf->maximum_frame_size);
495
496 /*
497 * We might have to disable reception to this VF if the frame size is
498 * not compatible with the config on the PF.
499 */
500 ixgbe_vf_enable_receive(sc, vf);
501
502 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
503 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
504
505 if (pf_max_size < sc->max_frame_size) {
506 mhadd &= ~IXGBE_MHADD_MFS_MASK;
507 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
508 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
509 }
510
511 ixgbe_send_vf_ack(sc, vf, msg[0]);
512} /* ixgbe_vf_set_lpe */
513
514
515static void
516ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
517 uint32_t *msg)
518{
519 //XXX implement this
520 ixgbe_send_vf_nack(sc, vf, msg[0]);
521} /* ixgbe_vf_set_macvlan */
522
523
524static void
525ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
526 uint32_t *msg)
527{
528
529 switch (msg[1]) {
530 case IXGBE_API_VER_1_0:
531 case IXGBE_API_VER_1_1:
532 vf->api_ver = msg[1];
533 ixgbe_send_vf_ack(sc, vf, msg[0]);
534 break;
535 default:
536 vf->api_ver = IXGBE_API_VER_UNKNOWN;
537 ixgbe_send_vf_nack(sc, vf, msg[0]);
538 break;
539 }
540} /* ixgbe_vf_api_negotiate */
541
542
543static void
544ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
545{
546 struct ixgbe_hw *hw;
547 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
548 int num_queues;
549
550 hw = &sc->hw;
551
552 /* GET_QUEUES is not supported on pre-1.1 APIs. */
553 switch (msg[0]) {
554 case IXGBE_API_VER_1_0:
555 case IXGBE_API_VER_UNKNOWN:
556 ixgbe_send_vf_nack(sc, vf, msg[0]);
557 return;
558 }
559
562
563 num_queues = ixgbe_vf_queues(sc->iov_mode);
564 resp[IXGBE_VF_TX_QUEUES] = num_queues;
565 resp[IXGBE_VF_RX_QUEUES] = num_queues;
566 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
567 resp[IXGBE_VF_DEF_QUEUE] = 0;
568
569 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
570} /* ixgbe_vf_get_queues */
571
572
573static void
574ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
575{
576 struct ixgbe_softc *sc = iflib_get_softc(ctx);
577#ifdef KTR
578 struct ifnet *ifp = iflib_get_ifp(ctx);
579#endif
580 struct ixgbe_hw *hw;
581 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
582 int error;
583
584 hw = &sc->hw;
585
586 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
587
588 if (error != 0)
589 return;
590
591 CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
592 msg[0], vf->pool);
593 if (msg[0] == IXGBE_VF_RESET) {
594 ixgbe_vf_reset_msg(sc, vf, msg);
595 return;
596 }
597
598 if (!(vf->flags & IXGBE_VF_CTS)) {
599 ixgbe_send_vf_nack(sc, vf, msg[0]);
600 return;
601 }
602
603 switch (msg[0] & IXGBE_VT_MSG_MASK) {
605 ixgbe_vf_set_mac(sc, vf, msg);
606 break;
608 ixgbe_vf_set_mc_addr(sc, vf, msg);
609 break;
611 ixgbe_vf_set_vlan(sc, vf, msg);
612 break;
613 case IXGBE_VF_SET_LPE:
614 ixgbe_vf_set_lpe(sc, vf, msg);
615 break;
617 ixgbe_vf_set_macvlan(sc, vf, msg);
618 break;
620 ixgbe_vf_api_negotiate(sc, vf, msg);
621 break;
623 ixgbe_vf_get_queues(sc, vf, msg);
624 break;
625 default:
626 ixgbe_send_vf_nack(sc, vf, msg[0]);
627 }
628} /* ixgbe_process_vf_msg */
629
630
631/* Tasklet for handling VF -> PF mailbox messages */
632void
633ixgbe_handle_mbx(void *context)
634{
635 if_ctx_t ctx = context;
636 struct ixgbe_softc *sc = iflib_get_softc(ctx);
637 struct ixgbe_hw *hw;
638 struct ixgbe_vf *vf;
639 int i;
640
641 hw = &sc->hw;
642
643 for (i = 0; i < sc->num_vfs; i++) {
644 vf = &sc->vfs[i];
645
646 if (vf->flags & IXGBE_VF_ACTIVE) {
647 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
648 ixgbe_process_vf_reset(sc, vf);
649
650 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
651 ixgbe_process_vf_msg(ctx, vf);
652
653 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
654 ixgbe_process_vf_ack(sc, vf);
655 }
656 }
657} /* ixgbe_handle_mbx */
658
659int
660ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
661{
662 struct ixgbe_softc *sc;
663 int retval = 0;
664
665 sc = iflib_get_softc(ctx);
666 sc->iov_mode = IXGBE_NO_VM;
667
668 if (num_vfs == 0) {
669 /* Would we ever get num_vfs = 0? */
670 retval = EINVAL;
671 goto err_init_iov;
672 }
673
674 /*
675 * We've got to reserve a VM's worth of queues for the PF,
676 * thus we go into "64 VF mode" if 32+ VFs are requested.
677 * With 64 VFs, you can only have two queues per VF.
678 * With 32 VFs, you can have up to four queues per VF.
679 */
680 if (num_vfs >= IXGBE_32_VM)
681 sc->iov_mode = IXGBE_64_VM;
682 else
683 sc->iov_mode = IXGBE_32_VM;
684
685 /* Again, reserving 1 VM's worth of queues for the PF */
686 sc->pool = sc->iov_mode - 1;
687
688 if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
689 retval = ENOSPC;
690 goto err_init_iov;
691 }
692
693 sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
694 M_NOWAIT | M_ZERO);
695
696 if (sc->vfs == NULL) {
697 retval = ENOMEM;
698 goto err_init_iov;
699 }
700
701 sc->num_vfs = num_vfs;
702 ixgbe_if_init(sc->ctx);
704
705 return (retval);
706
707err_init_iov:
708 sc->num_vfs = 0;
709 sc->pool = 0;
710 sc->iov_mode = IXGBE_NO_VM;
711
712 return (retval);
713} /* ixgbe_if_iov_init */
714
715void
716ixgbe_if_iov_uninit(if_ctx_t ctx)
717{
718 struct ixgbe_hw *hw;
719 struct ixgbe_softc *sc;
720 uint32_t pf_reg, vf_reg;
721
722 sc = iflib_get_softc(ctx);
723 hw = &sc->hw;
724
725 /* Enable rx/tx for the PF and disable it for all VFs. */
726 pf_reg = IXGBE_VF_INDEX(sc->pool);
727 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
728 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
729
730 if (pf_reg == 0)
731 vf_reg = 1;
732 else
733 vf_reg = 0;
734 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
735 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
736
738
739 free(sc->vfs, M_IXGBE_SRIOV);
740 sc->vfs = NULL;
741 sc->num_vfs = 0;
742 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
743} /* ixgbe_if_iov_uninit */
744
745static void
746ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
747{
748 struct ixgbe_hw *hw;
749 uint32_t vf_index, pfmbimr;
750
751 hw = &sc->hw;
752
753 if (!(vf->flags & IXGBE_VF_ACTIVE))
754 return;
755
756 vf_index = IXGBE_VF_INDEX(vf->pool);
757 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
758 pfmbimr |= IXGBE_VF_BIT(vf->pool);
759 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
760
761 ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
762
763 // XXX multicast addresses
764
765 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
766 ixgbe_set_rar(&sc->hw, vf->rar_index,
767 vf->ether_addr, vf->pool, true);
768 }
769
770 ixgbe_vf_enable_transmit(sc, vf);
771 ixgbe_vf_enable_receive(sc, vf);
772
773 ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
774} /* ixgbe_init_vf */
775
776void
778{
779 struct ixgbe_hw *hw = &sc->hw;
780 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
781 int i;
782
783 if (sc->iov_mode == IXGBE_NO_VM)
784 return;
785
786 /* RMW appropriate registers based on IOV mode */
787 /* Read... */
788 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
789 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
790 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
791 /* Modify... */
792 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
793 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
794 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
795 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
796 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
797 switch (sc->iov_mode) {
798 case IXGBE_64_VM:
800 mtqc |= IXGBE_MTQC_64VF;
801 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
802 gpie |= IXGBE_GPIE_VTMODE_64;
803 break;
804 case IXGBE_32_VM:
806 mtqc |= IXGBE_MTQC_32VF;
807 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
808 gpie |= IXGBE_GPIE_VTMODE_32;
809 break;
810 default:
811 panic("Unexpected SR-IOV mode %d", sc->iov_mode);
812 }
813 /* Write... */
814 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
815 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
816 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
817 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
818
819 /* Enable rx/tx for the PF. */
820 vf_reg = IXGBE_VF_INDEX(sc->pool);
821 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
822 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
823
824 /* Allow VM-to-VM communication. */
826
828 vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
829 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
830
831 for (i = 0; i < sc->num_vfs; i++)
832 ixgbe_init_vf(sc, &sc->vfs[i]);
833} /* ixgbe_initialize_iov */
834
835
836/* Check the max frame setting of all active VF's */
837void
839{
840 struct ixgbe_vf *vf;
841
842 for (int i = 0; i < sc->num_vfs; i++) {
843 vf = &sc->vfs[i];
844 if (vf->flags & IXGBE_VF_ACTIVE)
845 ixgbe_update_max_frame(sc, vf->maximum_frame_size);
846 }
847} /* ixgbe_recalculate_max_frame */
848
849int
850ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
851{
852 struct ixgbe_softc *sc;
853 struct ixgbe_vf *vf;
854 const void *mac;
855
856 sc = iflib_get_softc(ctx);
857
858 KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
859 vfnum, sc->num_vfs));
860
861 vf = &sc->vfs[vfnum];
862 vf->pool= vfnum;
863
864 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
865 vf->rar_index = vfnum + 1;
866 vf->default_vlan = 0;
867 vf->maximum_frame_size = ETHER_MAX_LEN;
868 ixgbe_update_max_frame(sc, vf->maximum_frame_size);
869
870 if (nvlist_exists_binary(config, "mac-addr")) {
871 mac = nvlist_get_binary(config, "mac-addr", NULL);
872 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
873 if (nvlist_get_bool(config, "allow-set-mac"))
874 vf->flags |= IXGBE_VF_CAP_MAC;
875 } else
876 /*
877 * If the administrator has not specified a MAC address then
878 * we must allow the VF to choose one.
879 */
880 vf->flags |= IXGBE_VF_CAP_MAC;
881
882 vf->flags |= IXGBE_VF_ACTIVE;
883
884 ixgbe_init_vf(sc, vf);
885
886 return (0);
887} /* ixgbe_if_iov_vf_add */
888
889#else
890
891void
892ixgbe_handle_mbx(void *context)
893{
894 UNREFERENCED_PARAMETER(context);
895} /* ixgbe_handle_mbx */
896
897#endif
MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations")
void ixgbe_if_init(if_ctx_t ctx)
Definition: if_ix.c:3029
void ixgbe_handle_mbx(void *context)
Definition: if_sriov.c:892
#define IXGBE_MAX_FRAME_SIZE
Definition: ixgbe.h:138
#define max_frame_size
Definition: ixgbe.h:359
#define IXGBE_MAX_VF_MC
Definition: ixgbe.h:336
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr)
Definition: ixgbe_api.c:943
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass)
Definition: ixgbe_api.c:1109
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
Definition: ixgbe_api.c:957
s32 ixgbe_validate_mac_addr(u8 *mac_addr)
#define IXGBE_FEATURE_SRIOV
#define IXGBE_VT_MSGTYPE_ACK
Definition: ixgbe_mbx.h:74
#define IXGBE_VF_RESET
Definition: ixgbe_mbx.h:98
#define IXGBE_VF_GET_QUEUES
Definition: ixgbe_mbx.h:140
#define IXGBE_VT_MSGINFO_MASK
Definition: ixgbe_mbx.h:79
#define IXGBE_VF_SET_VLAN
Definition: ixgbe_mbx.h:101
#define IXGBE_VF_SET_LPE
Definition: ixgbe_mbx.h:104
#define IXGBE_VT_MSGTYPE_NACK
Definition: ixgbe_mbx.h:75
#define IXGBE_VF_TX_QUEUES
Definition: ixgbe_mbx.h:126
#define IXGBE_VT_MSGINFO_SHIFT
Definition: ixgbe_mbx.h:77
#define IXGBE_VF_RX_QUEUES
Definition: ixgbe_mbx.h:127
#define IXGBE_VFMAILBOX_SIZE
Definition: ixgbe_mbx.h:41
#define IXGBE_VF_API_NEGOTIATE
Definition: ixgbe_mbx.h:139
#define IXGBE_VF_SET_MACVLAN
Definition: ixgbe_mbx.h:105
#define IXGBE_VF_DEF_QUEUE
Definition: ixgbe_mbx.h:129
#define IXGBE_VF_PERMADDR_MSG_LEN
Definition: ixgbe_mbx.h:132
#define IXGBE_VF_TRANS_VLAN
Definition: ixgbe_mbx.h:128
#define IXGBE_VT_MSGTYPE_CTS
Definition: ixgbe_mbx.h:76
#define IXGBE_PF_CONTROL_MSG
Definition: ixgbe_mbx.h:136
#define IXGBE_VF_SET_MULTICAST
Definition: ixgbe_mbx.h:100
#define IXGBE_VF_SET_MAC_ADDR
Definition: ixgbe_mbx.h:99
#define IXGBE_READ_REG(a, reg)
Definition: ixgbe_osdep.h:224
#define UNREFERENCED_PARAMETER(_p)
Definition: ixgbe_osdep.h:125
#define IXGBE_WRITE_REG(a, reg, val)
Definition: ixgbe_osdep.h:227
uint16_t u16
Definition: ixgbe_osdep.h:145
uint32_t u32
Definition: ixgbe_osdep.h:147
#define ixgbe_vf_que_index(_a, _b, _c)
Definition: ixgbe_sriov.h:96
#define ixgbe_get_mrqc(_a)
Definition: ixgbe_sriov.h:98
#define ixgbe_align_all_queue_indices(_a)
Definition: ixgbe_sriov.h:95
#define ixgbe_pci_iov_detach(_a)
Definition: ixgbe_sriov.h:93
#define ixgbe_ping_all_vfs(_a)
Definition: ixgbe_sriov.h:92
#define ixgbe_define_iov_schemas(_a, _b)
Definition: ixgbe_sriov.h:94
#define ixgbe_initialize_iov(_a)
Definition: ixgbe_sriov.h:90
#define ixgbe_get_mtqc(_a)
Definition: ixgbe_sriov.h:97
#define ixgbe_recalculate_max_frame(_a)
Definition: ixgbe_sriov.h:91
@ ixgbe_mac_82599EB
Definition: ixgbe_type.h:3675
#define IXGBE_GCR_EXT
Definition: ixgbe_type.h:1204
#define IXGBE_MTQC_VT_ENA
Definition: ixgbe_type.h:2720
#define IXGBE_MTA(_i)
Definition: ixgbe_type.h:450
#define IXGBE_MHADD_MFS_MASK
Definition: ixgbe_type.h:1495
#define IXGBE_VMOLR_ROMPE
Definition: ixgbe_type.h:1827
#define IXGBE_VFTE(_i)
Definition: ixgbe_type.h:489
#define IXGBE_MHADD_MFS_SHIFT
Definition: ixgbe_type.h:1496
#define IXGBE_MRQC_VMDQRSS64EN
Definition: ixgbe_type.h:2678
#define IXGBE_VMOLR_AUPE
Definition: ixgbe_type.h:1826
#define IXGBE_MHADD
Definition: ixgbe_type.h:1378
#define IXGBE_VFRE(_i)
Definition: ixgbe_type.h:488
#define IXGBE_VT_CTL_VT_ENABLE
Definition: ixgbe_type.h:1819
#define IXGBE_GCR_EXT_MSIX_EN
Definition: ixgbe_type.h:1259
#define IXGBE_VT_CTL_REPLEN
Definition: ixgbe_type.h:1818
#define IXGBE_VT_CTL
Definition: ixgbe_type.h:482
#define IXGBE_GPIE
Definition: ixgbe_type.h:380
#define IXGBE_VMOLR_BAM
Definition: ixgbe_type.h:1829
#define IXGBE_GPIE_VTMODE_32
Definition: ixgbe_type.h:1751
#define IXGBE_GCR_EXT_VT_MODE_32
Definition: ixgbe_type.h:1262
#define IXGBE_PFDTXGSWC
Definition: ixgbe_type.h:565
#define IXGBE_GPIE_VTMODE_64
Definition: ixgbe_type.h:1752
#define IXGBE_MRQC_VMDQRSS32EN
Definition: ixgbe_type.h:2677
#define IXGBE_MRQC
Definition: ixgbe_type.h:467
#define IXGBE_VMVIR_VLANA_DEFAULT
Definition: ixgbe_type.h:2124
#define IXGBE_VMOLR(_i)
Definition: ixgbe_type.h:493
#define IXGBE_MTQC_64VF
Definition: ixgbe_type.h:2723
#define IXGBE_VLVF_VLANID_MASK
Definition: ixgbe_type.h:2122
#define IXGBE_MTQC
Definition: ixgbe_type.h:476
#define IXGBE_VT_CTL_POOL_SHIFT
Definition: ixgbe_type.h:1820
#define IXGBE_GCR_EXT_VT_MODE_64
Definition: ixgbe_type.h:1263
#define IXGBE_PFDTXGSWC_VT_LBEN
Definition: ixgbe_type.h:579
#define IXGBE_PFMBIMR(_i)
Definition: ixgbe_type.h:487
#define IXGBE_VMVIR(_i)
Definition: ixgbe_type.h:479
#define IXGBE_VMOLR_ROPE
Definition: ixgbe_type.h:1828
#define IXGBE_MTQC_64Q_1PB
Definition: ixgbe_type.h:2721
#define IXGBE_MTQC_32VF
Definition: ixgbe_type.h:2722
struct rx_ring rxr
Definition: ixgbe.h:325
struct tx_ring txr
Definition: ixgbe.h:333
struct ixgbe_mac_info mac
Definition: ixgbe_type.h:4207
struct ixgbe_mbx_info mbx
Definition: ixgbe_type.h:4214
enum ixgbe_mac_type type
Definition: ixgbe_type.h:4115
struct ixgbe_mbx_operations ops
Definition: ixgbe_type.h:4196
s32(* check_for_ack)(struct ixgbe_hw *, u16)
Definition: ixgbe_type.h:4182
s32(* check_for_msg)(struct ixgbe_hw *, u16)
Definition: ixgbe_type.h:4181
s32(* check_for_rst)(struct ixgbe_hw *, u16)
Definition: ixgbe_type.h:4183
s32(* read)(struct ixgbe_hw *, u32 *, u16, u16)
Definition: ixgbe_type.h:4177
s32(* write)(struct ixgbe_hw *, u32 *, u16, u16)
Definition: ixgbe_type.h:4178
int iov_mode
Definition: ixgbe.h:430
u32 feat_en
Definition: ixgbe.h:463
struct ixgbe_hw hw
Definition: ixgbe.h:353
struct ix_tx_queue * tx_queues
Definition: ixgbe.h:423
struct ix_rx_queue * rx_queues
Definition: ixgbe.h:424
if_ctx_t ctx
Definition: ixgbe.h:355
struct ixgbevf_hw_stats vf
Definition: ixgbe.h:446
int num_vfs
Definition: ixgbe.h:431
int pool
Definition: ixgbe.h:432
struct ixgbe_vf * vfs
Definition: ixgbe.h:433
uint16_t mc_hash[IXGBE_MAX_VF_MC]
Definition: ixgbe.h:344
u_int rar_index
Definition: ixgbe.h:340
u_int pool
Definition: ixgbe.h:339
u_int maximum_frame_size
Definition: ixgbe.h:341
uint8_t ether_addr[ETHER_ADDR_LEN]
Definition: ixgbe.h:343
uint16_t vlan_tag
Definition: ixgbe.h:347
uint16_t api_ver
Definition: ixgbe.h:348
uint16_t num_mc_hashes
Definition: ixgbe.h:345
uint32_t flags
Definition: ixgbe.h:342
uint16_t default_vlan
Definition: ixgbe.h:346
u32 me
Definition: ixgbe.h:291
uint8_t me
Definition: ixgbe.h:271