1/* $NetBSD: if_vmx.c,v 1.7 2016/06/10 13:27:13 ozaki-r Exp $ */
2/* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
3
4/*
5 * Copyright (c) 2013 Tsubai Masanari
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/cdefs.h>
21__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.7 2016/06/10 13:27:13 ozaki-r Exp $");
22
23#include <sys/param.h>
24#include <sys/bus.h>
25#include <sys/device.h>
26#include <sys/mbuf.h>
27#include <sys/sockio.h>
28
29#include <net/bpf.h>
30#include <net/if.h>
31#include <net/if_ether.h>
32#include <net/if_media.h>
33
34#include <netinet/if_inarp.h>
35#include <netinet/in_systm.h> /* for <netinet/ip.h> */
36#include <netinet/in.h> /* for <netinet/ip.h> */
37#include <netinet/ip.h> /* for struct ip */
38#include <netinet/tcp.h> /* for struct tcphdr */
39#include <netinet/udp.h> /* for struct udphdr */
40
41#include <dev/pci/pcivar.h>
42#include <dev/pci/pcireg.h>
43#include <dev/pci/pcidevs.h>
44
45#include <arch/x86/pci/if_vmxreg.h>
46
47#define NRXQUEUE 1
48#define NTXQUEUE 1
49
50#define NTXDESC 128 /* tx ring size */
51#define NTXSEGS 8 /* tx descriptors per packet */
52#define NRXDESC 128
53#define NTXCOMPDESC NTXDESC
54#define NRXCOMPDESC (NRXDESC * 2) /* ring1 + ring2 */
55
56#define VMXNET3_DRIVER_VERSION 0x00010000
57
58struct vmxnet3_txring {
59 struct mbuf *m[NTXDESC];
60 bus_dmamap_t dmap[NTXDESC];
61 struct vmxnet3_txdesc *txd;
62 u_int head;
63 u_int next;
64 uint8_t gen;
65};
66
67struct vmxnet3_rxring {
68 struct mbuf *m[NRXDESC];
69 bus_dmamap_t dmap[NRXDESC];
70 struct vmxnet3_rxdesc *rxd;
71 u_int fill;
72 uint8_t gen;
73 uint8_t rid;
74};
75
76struct vmxnet3_comp_ring {
77 union {
78 struct vmxnet3_txcompdesc *txcd;
79 struct vmxnet3_rxcompdesc *rxcd;
80 };
81 u_int next;
82 uint8_t gen;
83};
84
85struct vmxnet3_txqueue {
86 struct vmxnet3_txring cmd_ring;
87 struct vmxnet3_comp_ring comp_ring;
88 struct vmxnet3_txq_shared *ts;
89};
90
91struct vmxnet3_rxqueue {
92 struct vmxnet3_rxring cmd_ring[2];
93 struct vmxnet3_comp_ring comp_ring;
94 struct vmxnet3_rxq_shared *rs;
95};
96
97struct vmxnet3_softc {
98 device_t sc_dev;
99 struct ethercom sc_ethercom;
100 struct ifmedia sc_media;
101
102 bus_space_tag_t sc_iot0;
103 bus_space_tag_t sc_iot1;
104 bus_space_handle_t sc_ioh0;
105 bus_space_handle_t sc_ioh1;
106 bus_dma_tag_t sc_dmat;
107
108 struct vmxnet3_txqueue sc_txq[NTXQUEUE];
109 struct vmxnet3_rxqueue sc_rxq[NRXQUEUE];
110 struct vmxnet3_driver_shared *sc_ds;
111 uint8_t *sc_mcast;
112};
113
114#define VMXNET3_STAT
115
116#ifdef VMXNET3_STAT
117struct {
118 u_int ntxdesc;
119 u_int nrxdesc;
120 u_int txhead;
121 u_int txdone;
122 u_int maxtxlen;
123 u_int rxdone;
124 u_int rxfill;
125 u_int intr;
126} vmxstat = {
127 .ntxdesc = NTXDESC,
128 .nrxdesc = NRXDESC
129};
130#endif
131
132#define JUMBO_LEN (MCLBYTES - ETHER_ALIGN) /* XXX */
133#define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
134
135#define READ_BAR0(sc, reg) bus_space_read_4((sc)->sc_iot0, (sc)->sc_ioh0, reg)
136#define READ_BAR1(sc, reg) bus_space_read_4((sc)->sc_iot1, (sc)->sc_ioh1, reg)
137#define WRITE_BAR0(sc, reg, val) \
138 bus_space_write_4((sc)->sc_iot0, (sc)->sc_ioh0, reg, val)
139#define WRITE_BAR1(sc, reg, val) \
140 bus_space_write_4((sc)->sc_iot1, (sc)->sc_ioh1, reg, val)
141#define WRITE_CMD(sc, cmd) WRITE_BAR1(sc, VMXNET3_BAR1_CMD, cmd)
142#define vtophys(va) 0 /* XXX ok? */
143
144int vmxnet3_match(device_t, cfdata_t, void *);
145void vmxnet3_attach(device_t, device_t, void *);
146int vmxnet3_dma_init(struct vmxnet3_softc *);
147int vmxnet3_alloc_txring(struct vmxnet3_softc *, int);
148int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int);
149void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
150void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
151void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
152void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
153void vmxnet3_link_state(struct vmxnet3_softc *);
154void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
155void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
156int vmxnet3_intr(void *);
157void vmxnet3_evintr(struct vmxnet3_softc *);
158void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
159void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
160void vmxnet3_iff(struct vmxnet3_softc *);
161int vmxnet3_ifflags_cb(struct ethercom *);
162void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
163int vmxnet3_getbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
164void vmxnet3_stop(struct ifnet *, int disable);
165void vmxnet3_reset(struct ifnet *);
166int vmxnet3_init(struct ifnet *);
167int vmxnet3_ioctl(struct ifnet *, u_long, void *);
168int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
169void vmxnet3_start(struct ifnet *);
170int vmxnet3_load_mbuf(struct vmxnet3_softc *, struct mbuf *);
171void vmxnet3_watchdog(struct ifnet *);
172void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
173int vmxnet3_media_change(struct ifnet *);
174void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *);
175
176CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc),
177 vmxnet3_match, vmxnet3_attach, NULL, NULL, NULL, NULL, 0);
178
179int
180vmxnet3_match(device_t parent, cfdata_t match, void *aux)
181{
182 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
183
184 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
185 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3)
186 return 1;
187
188 return 0;
189}
190
191void
192vmxnet3_attach(device_t parent, device_t self, void *aux)
193{
194 struct vmxnet3_softc *sc = device_private(self);
195 struct pci_attach_args *pa = aux;
196 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
197 pci_intr_handle_t ih;
198 const char *intrstr;
199 void *vih;
200 u_int memtype, ver, macl, mach;
201 pcireg_t preg;
202 u_char enaddr[ETHER_ADDR_LEN];
203 char intrbuf[PCI_INTRSTR_LEN];
204
205 sc->sc_dev = self;
206
207 pci_aprint_devinfo_fancy(pa, "Ethernet controller", "vmxnet3", 1);
208
209 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10);
210 if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0,
211 NULL, NULL)) {
212 aprint_error_dev(sc->sc_dev, "failed to map BAR0\n");
213 return;
214 }
215 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x14);
216 if (pci_mapreg_map(pa, 0x14, memtype, 0, &sc->sc_iot1, &sc->sc_ioh1,
217 NULL, NULL)) {
218 aprint_error_dev(sc->sc_dev, "failed to map BAR1\n");
219 return;
220 }
221
222 ver = READ_BAR1(sc, VMXNET3_BAR1_VRRS);
223 if ((ver & 0x1) == 0) {
224 aprint_error_dev(sc->sc_dev,
225 "unsupported hardware version 0x%x\n", ver);
226 return;
227 }
228 WRITE_BAR1(sc, VMXNET3_BAR1_VRRS, 1);
229
230 ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS);
231 if ((ver & 0x1) == 0) {
232 aprint_error_dev(sc->sc_dev,
233 "incompatiable UPT version 0x%x\n", ver);
234 return;
235 }
236 WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1);
237
238 preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
239 preg |= PCI_COMMAND_MASTER_ENABLE;
240 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
241
242 if (pci_dma64_available(pa))
243 sc->sc_dmat = pa->pa_dmat64;
244 else
245 sc->sc_dmat = pa->pa_dmat;
246 if (vmxnet3_dma_init(sc)) {
247 aprint_error_dev(sc->sc_dev, "failed to setup DMA\n");
248 return;
249 }
250
251 if (pci_intr_map(pa, &ih)) {
252 aprint_error_dev(sc->sc_dev, "failed to map interrupt\n");
253 return;
254 }
255 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf));
256 vih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, vmxnet3_intr, sc);
257 if (vih == NULL) {
258 aprint_error_dev(sc->sc_dev,
259 "unable to establish interrupt%s%s\n",
260 intrstr ? " at " : "", intrstr ? intrstr : "");
261 return;
262 }
263 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
264
265 WRITE_CMD(sc, VMXNET3_CMD_GET_MACL);
266 macl = READ_BAR1(sc, VMXNET3_BAR1_CMD);
267 enaddr[0] = macl;
268 enaddr[1] = macl >> 8;
269 enaddr[2] = macl >> 16;
270 enaddr[3] = macl >> 24;
271 WRITE_CMD(sc, VMXNET3_CMD_GET_MACH);
272 mach = READ_BAR1(sc, VMXNET3_BAR1_CMD);
273 enaddr[4] = mach;
274 enaddr[5] = mach >> 8;
275
276 WRITE_BAR1(sc, VMXNET3_BAR1_MACL, macl);
277 WRITE_BAR1(sc, VMXNET3_BAR1_MACH, mach);
278 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
279 ether_sprintf(enaddr));
280
281 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
282 ifp->if_softc = sc;
283 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
284 ifp->if_ioctl = vmxnet3_ioctl;
285 ifp->if_start = vmxnet3_start;
286 ifp->if_watchdog = vmxnet3_watchdog;
287 ifp->if_init = vmxnet3_init;
288 ifp->if_stop = vmxnet3_stop;
289 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
290 if (sc->sc_ds->upt_features & UPT1_F_CSUM)
291 sc->sc_ethercom.ec_if.if_capabilities |=
292 IFCAP_CSUM_IPv4_Rx |
293 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
294 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
295 if (sc->sc_ds->upt_features & UPT1_F_VLAN)
296 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
297
298 IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESC);
299 IFQ_SET_READY(&ifp->if_snd);
300
301 ifmedia_init(&sc->sc_media, IFM_IMASK, vmxnet3_media_change,
302 vmxnet3_media_status);
303 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
304 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T|IFM_FDX, 0, NULL);
305 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_T, 0, NULL);
306 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
307 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
308 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
309
310 if_attach(ifp);
311 ether_ifattach(ifp, enaddr);
312 ether_set_ifflags_cb(&sc->sc_ethercom, vmxnet3_ifflags_cb);
313 vmxnet3_link_state(sc);
314}
315
316int
317vmxnet3_dma_init(struct vmxnet3_softc *sc)
318{
319 struct vmxnet3_driver_shared *ds;
320 struct vmxnet3_txq_shared *ts;
321 struct vmxnet3_rxq_shared *rs;
322 bus_addr_t ds_pa, qs_pa, mcast_pa;
323 int i, queue, qs_len;
324 u_int major, minor, release_code, rev;
325
326 qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs;
327 ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa);
328 if (ts == NULL)
329 return -1;
330 for (queue = 0; queue < NTXQUEUE; queue++)
331 sc->sc_txq[queue].ts = ts++;
332 rs = (void *)ts;
333 for (queue = 0; queue < NRXQUEUE; queue++)
334 sc->sc_rxq[queue].rs = rs++;
335
336 for (queue = 0; queue < NTXQUEUE; queue++)
337 if (vmxnet3_alloc_txring(sc, queue))
338 return -1;
339 for (queue = 0; queue < NRXQUEUE; queue++)
340 if (vmxnet3_alloc_rxring(sc, queue))
341 return -1;
342
343 sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa);
344 if (sc->sc_mcast == NULL)
345 return -1;
346
347 ds = vmxnet3_dma_allocmem(sc, sizeof *sc->sc_ds, 8, &ds_pa);
348 if (ds == NULL)
349 return -1;
350 sc->sc_ds = ds;
351 ds->magic = VMXNET3_REV1_MAGIC;
352 ds->version = VMXNET3_DRIVER_VERSION;
353
354 /*
355 * XXX FreeBSD version uses following values:
356 * (Does the device behavior depend on them?)
357 *
358 * major = __FreeBSD_version / 100000;
359 * minor = (__FreeBSD_version / 1000) % 100;
360 * release_code = (__FreeBSD_version / 100) % 10;
361 * rev = __FreeBSD_version % 100;
362 */
363 major = 0;
364 minor = 0;
365 release_code = 0;
366 rev = 0;
367#ifdef __LP64__
368 ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
369 | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_64BIT;
370#else
371 ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6
372 | VMXNET3_GOS_FREEBSD | VMXNET3_GOS_32BIT;
373#endif
374 ds->vmxnet3_revision = 1;
375 ds->upt_version = 1;
376 ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
377 ds->driver_data = vtophys(sc);
378 ds->driver_data_len = sizeof(struct vmxnet3_softc);
379 ds->queue_shared = qs_pa;
380 ds->queue_shared_len = qs_len;
381 ds->mtu = ETHERMTU;
382 ds->ntxqueue = NTXQUEUE;
383 ds->nrxqueue = NRXQUEUE;
384 ds->mcast_table = mcast_pa;
385 ds->automask = 1;
386 ds->nintr = VMXNET3_NINTR;
387 ds->evintr = 0;
388 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
389 for (i = 0; i < VMXNET3_NINTR; i++)
390 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
391 WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa);
392 WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (uint64_t)ds_pa >> 32);
393 return 0;
394}
395
396int
397vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue)
398{
399 struct vmxnet3_txqueue *tq = &sc->sc_txq[queue];
400 struct vmxnet3_txq_shared *ts;
401 struct vmxnet3_txring *ring = &tq->cmd_ring;
402 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
403 bus_addr_t pa, comp_pa;
404 int idx;
405
406 ring->txd = vmxnet3_dma_allocmem(sc, NTXDESC * sizeof ring->txd[0], 512, &pa);
407 if (ring->txd == NULL)
408 return -1;
409 comp_ring->txcd = vmxnet3_dma_allocmem(sc,
410 NTXCOMPDESC * sizeof comp_ring->txcd[0], 512, &comp_pa);
411 if (comp_ring->txcd == NULL)
412 return -1;
413
414 for (idx = 0; idx < NTXDESC; idx++) {
415 if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, NTXSEGS,
416 JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
417 return -1;
418 }
419
420 ts = tq->ts;
421 memset(ts, 0, sizeof *ts);
422 ts->npending = 0;
423 ts->intr_threshold = 1;
424 ts->cmd_ring = pa;
425 ts->cmd_ring_len = NTXDESC;
426 ts->comp_ring = comp_pa;
427 ts->comp_ring_len = NTXCOMPDESC;
428 ts->driver_data = vtophys(tq);
429 ts->driver_data_len = sizeof *tq;
430 ts->intr_idx = 0;
431 ts->stopped = 1;
432 ts->error = 0;
433 return 0;
434}
435
436int
437vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue)
438{
439 struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue];
440 struct vmxnet3_rxq_shared *rs;
441 struct vmxnet3_rxring *ring;
442 struct vmxnet3_comp_ring *comp_ring;
443 bus_addr_t pa[2], comp_pa;
444 int i, idx;
445
446 for (i = 0; i < 2; i++) {
447 ring = &rq->cmd_ring[i];
448 ring->rxd = vmxnet3_dma_allocmem(sc, NRXDESC * sizeof ring->rxd[0],
449 512, &pa[i]);
450 if (ring->rxd == NULL)
451 return -1;
452 }
453 comp_ring = &rq->comp_ring;
454 comp_ring->rxcd = vmxnet3_dma_allocmem(sc,
455 NRXCOMPDESC * sizeof comp_ring->rxcd[0], 512, &comp_pa);
456 if (comp_ring->rxcd == NULL)
457 return -1;
458
459 for (i = 0; i < 2; i++) {
460 ring = &rq->cmd_ring[i];
461 ring->rid = i;
462 for (idx = 0; idx < NRXDESC; idx++) {
463 if (bus_dmamap_create(sc->sc_dmat, JUMBO_LEN, 1,
464 JUMBO_LEN, 0, BUS_DMA_NOWAIT, &ring->dmap[idx]))
465 return -1;
466 }
467 }
468
469 rs = rq->rs;
470 memset(rs, 0, sizeof *rs);
471 rs->cmd_ring[0] = pa[0];
472 rs->cmd_ring[1] = pa[1];
473 rs->cmd_ring_len[0] = NRXDESC;
474 rs->cmd_ring_len[1] = NRXDESC;
475 rs->comp_ring = comp_pa;
476 rs->comp_ring_len = NRXCOMPDESC;
477 rs->driver_data = vtophys(rq);
478 rs->driver_data_len = sizeof *rq;
479 rs->intr_idx = 0;
480 rs->stopped = 1;
481 rs->error = 0;
482 return 0;
483}
484
485void
486vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
487{
488 struct vmxnet3_txring *ring = &tq->cmd_ring;
489 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
490
491 ring->head = ring->next = 0;
492 ring->gen = 1;
493 comp_ring->next = 0;
494 comp_ring->gen = 1;
495 memset(ring->txd, 0, NTXDESC * sizeof ring->txd[0]);
496 memset(comp_ring->txcd, 0, NTXCOMPDESC * sizeof comp_ring->txcd[0]);
497}
498
499void
500vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
501{
502 struct vmxnet3_rxring *ring;
503 struct vmxnet3_comp_ring *comp_ring;
504 int i, idx;
505
506 for (i = 0; i < 2; i++) {
507 ring = &rq->cmd_ring[i];
508 ring->fill = 0;
509 ring->gen = 1;
510 memset(ring->rxd, 0, NRXDESC * sizeof ring->rxd[0]);
511 for (idx = 0; idx < NRXDESC; idx++) {
512 if (vmxnet3_getbuf(sc, ring))
513 break;
514 }
515 }
516 comp_ring = &rq->comp_ring;
517 comp_ring->next = 0;
518 comp_ring->gen = 1;
519 memset(comp_ring->rxcd, 0, NRXCOMPDESC * sizeof comp_ring->rxcd[0]);
520}
521
522void
523vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
524{
525 struct vmxnet3_txring *ring = &tq->cmd_ring;
526 int idx;
527
528 for (idx = 0; idx < NTXDESC; idx++) {
529 if (ring->m[idx]) {
530 bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
531 m_freem(ring->m[idx]);
532 ring->m[idx] = NULL;
533 }
534 }
535}
536
537void
538vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
539{
540 struct vmxnet3_rxring *ring;
541 int i, idx;
542
543 for (i = 0; i < 2; i++) {
544 ring = &rq->cmd_ring[i];
545 for (idx = 0; idx < NRXDESC; idx++) {
546 if (ring->m[idx]) {
547 m_freem(ring->m[idx]);
548 ring->m[idx] = NULL;
549 }
550 }
551 }
552}
553
554void
555vmxnet3_link_state(struct vmxnet3_softc *sc)
556{
557 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
558 u_int x, link, speed;
559
560 WRITE_CMD(sc, VMXNET3_CMD_GET_LINK);
561 x = READ_BAR1(sc, VMXNET3_BAR1_CMD);
562 speed = x >> 16;
563 if (x & 1) {
564 ifp->if_baudrate = IF_Mbps(speed);
565 link = LINK_STATE_UP;
566 } else
567 link = LINK_STATE_DOWN;
568
569 if_link_state_change(ifp, link);
570}
571
572static inline void
573vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
574{
575 WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 0);
576}
577
578static inline void
579vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
580{
581 WRITE_BAR0(sc, VMXNET3_BAR0_IMASK(irq), 1);
582}
583
584void
585vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
586{
587 int i;
588
589 sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
590 for (i = 0; i < VMXNET3_NINTR; i++)
591 vmxnet3_enable_intr(sc, i);
592}
593
594void
595vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
596{
597 int i;
598
599 sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
600 for (i = 0; i < VMXNET3_NINTR; i++)
601 vmxnet3_disable_intr(sc, i);
602}
603
604int
605vmxnet3_intr(void *arg)
606{
607 struct vmxnet3_softc *sc = arg;
608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
609
610 if ((ifp->if_flags & IFF_RUNNING) == 0)
611 return 0;
612 if (READ_BAR1(sc, VMXNET3_BAR1_INTR) == 0)
613 return 0;
614 if (sc->sc_ds->event)
615 vmxnet3_evintr(sc);
616 vmxnet3_rxintr(sc, &sc->sc_rxq[0]);
617 vmxnet3_txintr(sc, &sc->sc_txq[0]);
618#ifdef VMXNET3_STAT
619 vmxstat.intr++;
620#endif
621 vmxnet3_enable_intr(sc, 0);
622 return 1;
623}
624
625void
626vmxnet3_evintr(struct vmxnet3_softc *sc)
627{
628 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
629 u_int event = sc->sc_ds->event;
630 struct vmxnet3_txq_shared *ts;
631 struct vmxnet3_rxq_shared *rs;
632
633 /* Clear events. */
634 WRITE_BAR1(sc, VMXNET3_BAR1_EVENT, event);
635
636 /* Link state change? */
637 if (event & VMXNET3_EVENT_LINK)
638 vmxnet3_link_state(sc);
639
640 /* Queue error? */
641 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
642 WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS);
643
644 ts = sc->sc_txq[0].ts;
645 if (ts->stopped)
646 printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error);
647 rs = sc->sc_rxq[0].rs;
648 if (rs->stopped)
649 printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error);
650 vmxnet3_reset(ifp);
651 }
652
653 if (event & VMXNET3_EVENT_DIC)
654 printf("%s: device implementation change event\n",
655 ifp->if_xname);
656 if (event & VMXNET3_EVENT_DEBUG)
657 printf("%s: debug event\n", ifp->if_xname);
658}
659
660void
661vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq)
662{
663 struct vmxnet3_txring *ring = &tq->cmd_ring;
664 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
665 struct vmxnet3_txcompdesc *txcd;
666 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
667 u_int sop;
668
669 for (;;) {
670 txcd = &comp_ring->txcd[comp_ring->next];
671
672 if (le32toh((txcd->txc_word3 >> VMXNET3_TXC_GEN_S) &
673 VMXNET3_TXC_GEN_M) != comp_ring->gen)
674 break;
675
676 comp_ring->next++;
677 if (comp_ring->next == NTXCOMPDESC) {
678 comp_ring->next = 0;
679 comp_ring->gen ^= 1;
680 }
681
682 sop = ring->next;
683 if (ring->m[sop] == NULL)
684 panic("vmxnet3_txintr");
685 m_freem(ring->m[sop]);
686 ring->m[sop] = NULL;
687 bus_dmamap_unload(sc->sc_dmat, ring->dmap[sop]);
688 ring->next = (le32toh((txcd->txc_word0 >>
689 VMXNET3_TXC_EOPIDX_S) & VMXNET3_TXC_EOPIDX_M) + 1)
690 % NTXDESC;
691
692 ifp->if_flags &= ~IFF_OACTIVE;
693 }
694 if (ring->head == ring->next)
695 ifp->if_timer = 0;
696 vmxnet3_start(ifp);
697}
698
699void
700vmxnet3_rxintr(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq)
701{
702 struct vmxnet3_comp_ring *comp_ring = &rq->comp_ring;
703 struct vmxnet3_rxring *ring;
704 struct vmxnet3_rxdesc *rxd;
705 struct vmxnet3_rxcompdesc *rxcd;
706 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
707 struct mbuf *m;
708 int idx, len;
709
710 for (;;) {
711 rxcd = &comp_ring->rxcd[comp_ring->next];
712 if (le32toh((rxcd->rxc_word3 >> VMXNET3_RXC_GEN_S) &
713 VMXNET3_RXC_GEN_M) != comp_ring->gen)
714 break;
715
716 comp_ring->next++;
717 if (comp_ring->next == NRXCOMPDESC) {
718 comp_ring->next = 0;
719 comp_ring->gen ^= 1;
720 }
721
722 idx = le32toh((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) &
723 VMXNET3_RXC_IDX_M);
724 if (le32toh((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) &
725 VMXNET3_RXC_QID_M) < NRXQUEUE)
726 ring = &rq->cmd_ring[0];
727 else
728 ring = &rq->cmd_ring[1];
729 rxd = &ring->rxd[idx];
730 len = le32toh((rxcd->rxc_word2 >> VMXNET3_RXC_LEN_S) &
731 VMXNET3_RXC_LEN_M);
732 m = ring->m[idx];
733 ring->m[idx] = NULL;
734 bus_dmamap_unload(sc->sc_dmat, ring->dmap[idx]);
735
736 if (m == NULL)
737 panic("NULL mbuf");
738
739 if (le32toh((rxd->rx_word2 >> VMXNET3_RX_BTYPE_S) &
740 VMXNET3_RX_BTYPE_M) != VMXNET3_BTYPE_HEAD) {
741 m_freem(m);
742 goto skip_buffer;
743 }
744 if (le32toh(rxcd->rxc_word2 & VMXNET3_RXC_ERROR)) {
745 ifp->if_ierrors++;
746 m_freem(m);
747 goto skip_buffer;
748 }
749 if (len < VMXNET3_MIN_MTU) {
750 printf("%s: short packet (%d)\n", ifp->if_xname, len);
751 m_freem(m);
752 goto skip_buffer;
753 }
754
755 ifp->if_ipackets++;
756 ifp->if_ibytes += len;
757
758 vmxnet3_rx_csum(rxcd, m);
759 m_set_rcvif(m, ifp);
760 m->m_pkthdr.len = m->m_len = len;
761 if (le32toh(rxcd->rxc_word2 & VMXNET3_RXC_VLAN)) {
762 VLAN_INPUT_TAG(ifp, m,
763 le32toh((rxcd->rxc_word2 >>
764 VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M),
765 m_freem(m); goto skip_buffer);
766 }
767
768 bpf_mtap(ifp, m);
769
770 if_percpuq_enqueue(ifp->if_percpuq, m);
771
772skip_buffer:
773#ifdef VMXNET3_STAT
774 vmxstat.rxdone = idx;
775#endif
776 if (rq->rs->update_rxhead) {
777 u_int qid = le32toh((rxcd->rxc_word0 >>
778 VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M);
779
780 idx = (idx + 1) % NRXDESC;
781 if (qid < NRXQUEUE) {
782 WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx);
783 } else {
784 qid -= NRXQUEUE;
785 WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx);
786 }
787 }
788 }
789
790 /* XXX Should we (try to) allocate buffers for ring 2 too? */
791 ring = &rq->cmd_ring[0];
792 for (;;) {
793 idx = ring->fill;
794 if (ring->m[idx])
795 return;
796 if (vmxnet3_getbuf(sc, ring))
797 return;
798 }
799}
800
801void
802vmxnet3_iff(struct vmxnet3_softc *sc)
803{
804 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
805 struct ethercom *ec = &sc->sc_ethercom;
806 struct vmxnet3_driver_shared *ds = sc->sc_ds;
807 struct ether_multi *enm;
808 struct ether_multistep step;
809 u_int mode;
810 uint8_t *p;
811
812 ds->mcast_tablelen = 0;
813 CLR(ifp->if_flags, IFF_ALLMULTI);
814
815 /*
816 * Always accept broadcast frames.
817 * Always accept frames destined to our station address.
818 */
819 mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
820
821 if (ISSET(ifp->if_flags, IFF_PROMISC) || ec->ec_multicnt > 682)
822 goto allmulti;
823
824 p = sc->sc_mcast;
825 ETHER_FIRST_MULTI(step, ec, enm);
826 while (enm != NULL) {
827 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
828 /*
829 * We must listen to a range of multicast addresses.
830 * For now, just accept all multicasts, rather than
831 * trying to set only those filter bits needed to match
832 * the range. (At this time, the only use of address
833 * ranges is for IP multicast routing, for which the
834 * range is big enough to require all bits set.)
835 */
836 goto allmulti;
837 }
838 memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN);
839
840 p += ETHER_ADDR_LEN;
841
842 ETHER_NEXT_MULTI(step, enm);
843 }
844
845 if (ec->ec_multicnt > 0) {
846 SET(mode, VMXNET3_RXMODE_MCAST);
847 ds->mcast_tablelen = p - sc->sc_mcast;
848 }
849
850 goto setit;
851
852allmulti:
853 SET(ifp->if_flags, IFF_ALLMULTI);
854 SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
855 if (ifp->if_flags & IFF_PROMISC)
856 SET(mode, VMXNET3_RXMODE_PROMISC);
857
858setit:
859 WRITE_CMD(sc, VMXNET3_CMD_SET_FILTER);
860 ds->rxmode = mode;
861 WRITE_CMD(sc, VMXNET3_CMD_SET_RXMODE);
862}
863
864int
865vmxnet3_ifflags_cb(struct ethercom *ec)
866{
867
868 vmxnet3_iff((struct vmxnet3_softc *)ec->ec_if.if_softc);
869
870 return 0;
871}
872
873
874void
875vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
876{
877 if (le32toh(rxcd->rxc_word0 & VMXNET3_RXC_NOCSUM))
878 return;
879
880 if (rxcd->rxc_word3 & VMXNET3_RXC_IPV4) {
881 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
882 if ((rxcd->rxc_word3 & VMXNET3_RXC_IPSUM_OK) == 0)
883 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
884 }
885
886 if (rxcd->rxc_word3 & VMXNET3_RXC_FRAGMENT)
887 return;
888
889 if (rxcd->rxc_word3 & VMXNET3_RXC_TCP) {
890 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
891 if ((rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK) == 0)
892 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
893 }
894
895 if (rxcd->rxc_word3 & VMXNET3_RXC_UDP) {
896 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
897 if ((rxcd->rxc_word3 & VMXNET3_RXC_CSUM_OK) == 0)
898 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
899 }
900}
901
902int
903vmxnet3_getbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *ring)
904{
905 int idx = ring->fill;
906 struct vmxnet3_rxdesc *rxd = &ring->rxd[idx];
907 struct mbuf *m;
908 int btype;
909
910 if (ring->m[idx])
911 panic("vmxnet3_getbuf: buffer has mbuf");
912
913#if 1
914 /* XXX Don't allocate buffers for ring 2 for now. */
915 if (ring->rid != 0)
916 return -1;
917 btype = VMXNET3_BTYPE_HEAD;
918#else
919 if (ring->rid == 0)
920 btype = VMXNET3_BTYPE_HEAD;
921 else
922 btype = VMXNET3_BTYPE_BODY;
923#endif
924
925 MGETHDR(m, M_DONTWAIT, MT_DATA);
926 if (m == NULL)
927 return -1;
928
929 MCLGET(m, M_DONTWAIT);
930 if ((m->m_flags & M_EXT) == 0) {
931 m_freem(m);
932 return -1;
933 }
934
935 m->m_pkthdr.len = m->m_len = JUMBO_LEN;
936 m_adj(m, ETHER_ALIGN);
937 ring->m[idx] = m;
938
939 if (bus_dmamap_load_mbuf(sc->sc_dmat, ring->dmap[idx], m,
940 BUS_DMA_NOWAIT))
941 panic("load mbuf");
942 rxd->rx_addr = htole64(DMAADDR(ring->dmap[idx]));
943 rxd->rx_word2 = htole32(((m->m_pkthdr.len & VMXNET3_RX_LEN_M) <<
944 VMXNET3_RX_LEN_S) | ((btype & VMXNET3_RX_BTYPE_M) <<
945 VMXNET3_RX_BTYPE_S) | ((ring->gen & VMXNET3_RX_GEN_M) <<
946 VMXNET3_RX_GEN_S));
947 idx++;
948 if (idx == NRXDESC) {
949 idx = 0;
950 ring->gen ^= 1;
951 }
952 ring->fill = idx;
953#ifdef VMXNET3_STAT
954 vmxstat.rxfill = ring->fill;
955#endif
956 return 0;
957}
958
959void
960vmxnet3_stop(struct ifnet *ifp, int disable)
961{
962 struct vmxnet3_softc *sc = ifp->if_softc;
963 int queue;
964
965 if ((ifp->if_flags & IFF_RUNNING) == 0)
966 return;
967
968 vmxnet3_disable_all_intrs(sc);
969 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
970 ifp->if_timer = 0;
971
972 if (!disable)
973 return;
974
975 WRITE_CMD(sc, VMXNET3_CMD_DISABLE);
976
977 for (queue = 0; queue < NTXQUEUE; queue++)
978 vmxnet3_txstop(sc, &sc->sc_txq[queue]);
979 for (queue = 0; queue < NRXQUEUE; queue++)
980 vmxnet3_rxstop(sc, &sc->sc_rxq[queue]);
981}
982
983void
984vmxnet3_reset(struct ifnet *ifp)
985{
986 struct vmxnet3_softc *sc = ifp->if_softc;
987
988 vmxnet3_stop(ifp, 1);
989 WRITE_CMD(sc, VMXNET3_CMD_RESET);
990 vmxnet3_init(ifp);
991}
992
993int
994vmxnet3_init(struct ifnet *ifp)
995{
996 struct vmxnet3_softc *sc = ifp->if_softc;
997 int queue;
998
999 if (ifp->if_flags & IFF_RUNNING)
1000 return 0;
1001
1002 ifp->if_flags |= IFF_RUNNING;
1003 ifp->if_flags &= ~IFF_OACTIVE;
1004
1005 for (queue = 0; queue < NTXQUEUE; queue++)
1006 vmxnet3_txinit(sc, &sc->sc_txq[queue]);
1007 for (queue = 0; queue < NRXQUEUE; queue++)
1008 vmxnet3_rxinit(sc, &sc->sc_rxq[queue]);
1009
1010 WRITE_CMD(sc, VMXNET3_CMD_ENABLE);
1011 if (READ_BAR1(sc, VMXNET3_BAR1_CMD)) {
1012 printf("%s: failed to initialize\n", ifp->if_xname);
1013 vmxnet3_stop(ifp, 1);
1014 return EIO;
1015 }
1016
1017 for (queue = 0; queue < NRXQUEUE; queue++) {
1018 WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0);
1019 WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0);
1020 }
1021
1022 vmxnet3_iff(sc);
1023 vmxnet3_enable_all_intrs(sc);
1024 vmxnet3_link_state(sc);
1025 return 0;
1026}
1027
1028int
1029vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
1030{
1031 struct vmxnet3_driver_shared *ds = sc->sc_ds;
1032 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1033 int error;
1034
1035 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
1036 return EINVAL;
1037 vmxnet3_stop(ifp, 1);
1038 ifp->if_mtu = ds->mtu = mtu;
1039 error = vmxnet3_init(ifp);
1040 return error;
1041}
1042
1043int
1044vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1045{
1046 struct vmxnet3_softc *sc = ifp->if_softc;
1047 struct ifreq *ifr = (struct ifreq *)data;
1048 int error = 0, s;
1049
1050 s = splnet();
1051
1052 switch (cmd) {
1053 case SIOCSIFMTU:
1054 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
1055 break;
1056 case SIOCSIFMEDIA:
1057 case SIOCGIFMEDIA:
1058 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1059 break;
1060 default:
1061 error = ether_ioctl(ifp, cmd, data);
1062 }
1063
1064 if (error == ENETRESET) {
1065 if (ifp->if_flags & IFF_RUNNING)
1066 vmxnet3_iff(sc);
1067 error = 0;
1068 }
1069
1070 splx(s);
1071 return error;
1072}
1073
1074void
1075vmxnet3_start(struct ifnet *ifp)
1076{
1077 struct vmxnet3_softc *sc = ifp->if_softc;
1078 struct vmxnet3_txqueue *tq = &sc->sc_txq[0];
1079 struct vmxnet3_txring *ring = &tq->cmd_ring;
1080 struct mbuf *m;
1081 int n = 0;
1082
1083 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1084 return;
1085
1086 for (;;) {
1087 IFQ_POLL(&ifp->if_snd, m);
1088 if (m == NULL)
1089 break;
1090 if ((ring->next - ring->head - 1) % NTXDESC < NTXSEGS) {
1091 ifp->if_flags |= IFF_OACTIVE;
1092 break;
1093 }
1094
1095 IFQ_DEQUEUE(&ifp->if_snd, m);
1096 if (vmxnet3_load_mbuf(sc, m) != 0) {
1097 ifp->if_oerrors++;
1098 continue;
1099 }
1100 bpf_mtap(ifp, m);
1101
1102 ifp->if_timer = 5;
1103 ifp->if_opackets++;
1104 n++;
1105 }
1106
1107 if (n > 0)
1108 WRITE_BAR0(sc, VMXNET3_BAR0_TXH(0), ring->head);
1109#ifdef VMXNET3_STAT
1110 vmxstat.txhead = ring->head;
1111 vmxstat.txdone = ring->next;
1112 vmxstat.maxtxlen =
1113 max(vmxstat.maxtxlen, (ring->head - ring->next) % NTXDESC);
1114#endif
1115}
1116
1117int
1118vmxnet3_load_mbuf(struct vmxnet3_softc *sc, struct mbuf *m)
1119{
1120 struct vmxnet3_txqueue *tq = &sc->sc_txq[0];
1121 struct vmxnet3_txring *ring = &tq->cmd_ring;
1122 struct vmxnet3_txdesc *txd = NULL, *sop;
1123 struct mbuf *mp;
1124 struct m_tag *mtag;
1125 struct ip *ip;
1126 bus_dmamap_t map = ring->dmap[ring->head];
1127 u_int hlen = ETHER_HDR_LEN, csum_off = 0;
1128 int offp, gen, i;
1129
1130#if 0
1131 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1132 printf("%s: IP checksum offloading is not supported\n",
1133 sc->sc_dev.dv_xname);
1134 return -1;
1135 }
1136#endif
1137 if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_TCPv4)) {
1138 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1139 csum_off = offsetof(struct tcphdr, th_sum);
1140 else
1141 csum_off = offsetof(struct udphdr, uh_sum);
1142
1143 mp = m_pulldown(m, hlen, sizeof(*ip), &offp);
1144 if (mp == NULL)
1145 return (-1);
1146
1147 ip = (struct ip *)(mp->m_data + offp);
1148 hlen += ip->ip_hl << 2;
1149
1150 mp = m_pulldown(m, 0, hlen + csum_off + 2, &offp);
1151 if (mp == NULL)
1152 return (-1);
1153 }
1154
1155 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1156 case 0:
1157 break;
1158 case EFBIG:
1159 mp = m_defrag(m, M_DONTWAIT);
1160 if (mp != NULL) {
1161 m = mp;
1162 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1163 BUS_DMA_NOWAIT) == 0)
1164 break;
1165 }
1166 /* FALLTHROUGH */
1167 default:
1168 m_freem(m);
1169 return -1;
1170 }
1171
1172 ring->m[ring->head] = m;
1173 sop = &ring->txd[ring->head];
1174 gen = ring->gen ^ 1; /* owned by cpu (yet) */
1175 for (i = 0; i < map->dm_nsegs; i++) {
1176 txd = &ring->txd[ring->head];
1177 txd->tx_addr = htole64(map->dm_segs[i].ds_addr);
1178 txd->tx_word2 = htole32(((map->dm_segs[i].ds_len &
1179 VMXNET3_TX_LEN_M) << VMXNET3_TX_LEN_S) |
1180 ((gen & VMXNET3_TX_GEN_M) << VMXNET3_TX_GEN_S));
1181 txd->tx_word3 = 0;
1182 ring->head++;
1183 if (ring->head == NTXDESC) {
1184 ring->head = 0;
1185 ring->gen ^= 1;
1186 }
1187 gen = ring->gen;
1188 }
1189 if (txd != NULL)
1190 txd->tx_word3 |= htole32(VMXNET3_TX_EOP | VMXNET3_TX_COMPREQ);
1191
1192 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m)) != NULL) {
1193 sop->tx_word3 |= htole32(VMXNET3_TX_VTAG_MODE);
1194 sop->tx_word3 |= htole32((VLAN_TAG_VALUE(mtag) &
1195 VMXNET3_TX_VLANTAG_M) << VMXNET3_TX_VLANTAG_S);
1196 }
1197 if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_TCPv4)) {
1198 sop->tx_word2 |= htole32(((hlen + csum_off) &
1199 VMXNET3_TX_OP_M) << VMXNET3_TX_OP_S);
1200 sop->tx_word3 |= htole32(((hlen & VMXNET3_TX_HLEN_M) <<
1201 VMXNET3_TX_HLEN_S) | (VMXNET3_OM_CSUM << VMXNET3_TX_OM_S));
1202 }
1203
1204 /* Change the ownership by flipping the "generation" bit */
1205 sop->tx_word2 ^= htole32(VMXNET3_TX_GEN_M << VMXNET3_TX_GEN_S);
1206
1207 return (0);
1208}
1209
1210void
1211vmxnet3_watchdog(struct ifnet *ifp)
1212{
1213 int s;
1214
1215 printf("%s: device timeout\n", ifp->if_xname);
1216 s = splnet();
1217 vmxnet3_stop(ifp, 1);
1218 vmxnet3_init(ifp);
1219 splx(s);
1220}
1221
1222void
1223vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1224{
1225 struct vmxnet3_softc *sc = ifp->if_softc;
1226
1227 vmxnet3_link_state(sc);
1228
1229 ifmr->ifm_status = IFM_AVALID;
1230 ifmr->ifm_active = IFM_ETHER;
1231
1232 if (ifp->if_link_state != LINK_STATE_UP)
1233 return;
1234
1235 ifmr->ifm_status |= IFM_ACTIVE;
1236
1237 if (ifp->if_baudrate >= IF_Gbps(10ULL))
1238 ifmr->ifm_active |= IFM_10G_T;
1239}
1240
1241int
1242vmxnet3_media_change(struct ifnet *ifp)
1243{
1244 return 0;
1245}
1246
1247void *
1248vmxnet3_dma_allocmem(struct vmxnet3_softc *sc, u_int size, u_int align, bus_addr_t *pa)
1249{
1250 bus_dma_tag_t t = sc->sc_dmat;
1251 bus_dma_segment_t segs[1];
1252 bus_dmamap_t map;
1253 void *va;
1254 int n;
1255
1256 if (bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT))
1257 return NULL;
1258 if (bus_dmamem_map(t, segs, 1, size, &va, BUS_DMA_NOWAIT))
1259 return NULL;
1260 if (bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &map))
1261 return NULL;
1262 if (bus_dmamap_load(t, map, va, size, NULL, BUS_DMA_NOWAIT))
1263 return NULL;
1264 memset(va, 0, size);
1265 *pa = DMAADDR(map);
1266 bus_dmamap_unload(t, map);
1267 bus_dmamap_destroy(t, map);
1268 return va;
1269}
1270