[b8e0c66] | 1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
| 2 | |
---|
| 3 | /*- |
---|
| 4 | * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> |
---|
| 5 | * All rights reserved. |
---|
| 6 | * |
---|
| 7 | * Redistribution and use in source and binary forms, with or without |
---|
| 8 | * modification, are permitted provided that the following conditions |
---|
| 9 | * are met: |
---|
| 10 | * 1. Redistributions of source code must retain the above copyright |
---|
| 11 | * notice, this list of conditions and the following disclaimer. |
---|
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
| 13 | * notice, this list of conditions and the following disclaimer in the |
---|
| 14 | * documentation and/or other materials provided with the distribution. |
---|
| 15 | * |
---|
| 16 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
| 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
| 20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
| 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
| 22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
| 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
| 24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
| 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
| 26 | * SUCH DAMAGE. |
---|
| 27 | */ |
---|
| 28 | |
---|
| 29 | /* |
---|
| 30 | * A network interface driver for Cadence GEM Gigabit Ethernet |
---|
| 31 | * interface such as the one used in Xilinx Zynq-7000 SoC. |
---|
| 32 | * |
---|
| 33 | * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. |
---|
| 34 | * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 |
---|
| 35 | * and register definitions are in appendix B.18. |
---|
| 36 | */ |
---|
| 37 | |
---|
| 38 | #include <sys/cdefs.h> |
---|
| 39 | __FBSDID("$FreeBSD$"); |
---|
| 40 | |
---|
| 41 | #include <rtems/bsd/sys/param.h> |
---|
| 42 | #include <sys/systm.h> |
---|
| 43 | #include <sys/bus.h> |
---|
| 44 | #include <sys/kernel.h> |
---|
| 45 | #include <sys/malloc.h> |
---|
| 46 | #include <sys/mbuf.h> |
---|
| 47 | #include <sys/module.h> |
---|
| 48 | #include <sys/rman.h> |
---|
| 49 | #include <sys/socket.h> |
---|
| 50 | #include <sys/sockio.h> |
---|
| 51 | #include <sys/sysctl.h> |
---|
| 52 | |
---|
| 53 | #include <machine/bus.h> |
---|
| 54 | |
---|
| 55 | #include <net/ethernet.h> |
---|
| 56 | #include <net/if.h> |
---|
| 57 | #include <net/if_var.h> |
---|
| 58 | #include <net/if_arp.h> |
---|
| 59 | #include <net/if_dl.h> |
---|
| 60 | #include <net/if_media.h> |
---|
| 61 | #include <net/if_mib.h> |
---|
| 62 | #include <net/if_types.h> |
---|
| 63 | |
---|
| 64 | #ifdef INET |
---|
| 65 | #include <netinet/in.h> |
---|
| 66 | #include <netinet/in_systm.h> |
---|
| 67 | #include <netinet/in_var.h> |
---|
| 68 | #include <netinet/ip.h> |
---|
| 69 | #endif |
---|
| 70 | |
---|
| 71 | #include <net/bpf.h> |
---|
| 72 | #include <net/bpfdesc.h> |
---|
| 73 | |
---|
[14ecf75d] | 74 | #ifndef __rtems__ |
---|
[b8e0c66] | 75 | #include <dev/fdt/fdt_common.h> |
---|
| 76 | #include <dev/ofw/ofw_bus.h> |
---|
| 77 | #include <dev/ofw/ofw_bus_subr.h> |
---|
[14ecf75d] | 78 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 79 | |
---|
| 80 | #include <dev/mii/mii.h> |
---|
| 81 | #include <dev/mii/miivar.h> |
---|
| 82 | |
---|
| 83 | #include <dev/cadence/if_cgem_hw.h> |
---|
| 84 | |
---|
| 85 | #include <rtems/bsd/local/miibus_if.h> |
---|
[8c3823e] | 86 | #ifdef __rtems__ |
---|
| 87 | #pragma GCC diagnostic ignored "-Wpointer-sign" |
---|
| 88 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 89 | |
---|
| 90 | #define IF_CGEM_NAME "cgem" |
---|
| 91 | |
---|
| 92 | #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ |
---|
| 93 | #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ |
---|
| 94 | |
---|
| 95 | #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ |
---|
| 96 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) |
---|
| 97 | |
---|
| 98 | |
---|
| 99 | /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ |
---|
| 100 | #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ |
---|
| 101 | |
---|
| 102 | #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ |
---|
| 103 | |
---|
| 104 | #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ |
---|
| 105 | CSUM_TCP_IPV6 | CSUM_UDP_IPV6) |
---|
| 106 | |
---|
| 107 | struct cgem_softc { |
---|
| 108 | struct ifnet *ifp; |
---|
| 109 | struct mtx sc_mtx; |
---|
| 110 | device_t dev; |
---|
| 111 | device_t miibus; |
---|
| 112 | u_int mii_media_active; /* last active media */ |
---|
| 113 | int if_old_flags; |
---|
| 114 | struct resource *mem_res; |
---|
| 115 | struct resource *irq_res; |
---|
| 116 | void *intrhand; |
---|
| 117 | struct callout tick_ch; |
---|
| 118 | uint32_t net_ctl_shadow; |
---|
| 119 | int ref_clk_num; |
---|
| 120 | u_char eaddr[6]; |
---|
| 121 | |
---|
| 122 | bus_dma_tag_t desc_dma_tag; |
---|
| 123 | bus_dma_tag_t mbuf_dma_tag; |
---|
| 124 | |
---|
| 125 | /* receive descriptor ring */ |
---|
| 126 | struct cgem_rx_desc *rxring; |
---|
| 127 | bus_addr_t rxring_physaddr; |
---|
| 128 | struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; |
---|
| 129 | bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; |
---|
| 130 | int rxring_hd_ptr; /* where to put rcv bufs */ |
---|
| 131 | int rxring_tl_ptr; /* where to get receives */ |
---|
| 132 | int rxring_queued; /* how many rcv bufs queued */ |
---|
| 133 | bus_dmamap_t rxring_dma_map; |
---|
| 134 | int rxbufs; /* tunable number rcv bufs */ |
---|
| 135 | int rxhangwar; /* rx hang work-around */ |
---|
| 136 | u_int rxoverruns; /* rx overruns */ |
---|
| 137 | u_int rxnobufs; /* rx buf ring empty events */ |
---|
| 138 | u_int rxdmamapfails; /* rx dmamap failures */ |
---|
| 139 | uint32_t rx_frames_prev; |
---|
| 140 | |
---|
| 141 | /* transmit descriptor ring */ |
---|
| 142 | struct cgem_tx_desc *txring; |
---|
| 143 | bus_addr_t txring_physaddr; |
---|
| 144 | struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; |
---|
| 145 | bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; |
---|
| 146 | int txring_hd_ptr; /* where to put next xmits */ |
---|
| 147 | int txring_tl_ptr; /* next xmit mbuf to free */ |
---|
| 148 | int txring_queued; /* num xmits segs queued */ |
---|
| 149 | bus_dmamap_t txring_dma_map; |
---|
| 150 | u_int txfull; /* tx ring full events */ |
---|
| 151 | u_int txdefrags; /* tx calls to m_defrag() */ |
---|
| 152 | u_int txdefragfails; /* tx m_defrag() failures */ |
---|
| 153 | u_int txdmamapfails; /* tx dmamap failures */ |
---|
| 154 | |
---|
| 155 | /* hardware provided statistics */ |
---|
| 156 | struct cgem_hw_stats { |
---|
| 157 | uint64_t tx_bytes; |
---|
| 158 | uint32_t tx_frames; |
---|
| 159 | uint32_t tx_frames_bcast; |
---|
| 160 | uint32_t tx_frames_multi; |
---|
| 161 | uint32_t tx_frames_pause; |
---|
| 162 | uint32_t tx_frames_64b; |
---|
| 163 | uint32_t tx_frames_65to127b; |
---|
| 164 | uint32_t tx_frames_128to255b; |
---|
| 165 | uint32_t tx_frames_256to511b; |
---|
| 166 | uint32_t tx_frames_512to1023b; |
---|
| 167 | uint32_t tx_frames_1024to1536b; |
---|
| 168 | uint32_t tx_under_runs; |
---|
| 169 | uint32_t tx_single_collisn; |
---|
| 170 | uint32_t tx_multi_collisn; |
---|
| 171 | uint32_t tx_excsv_collisn; |
---|
| 172 | uint32_t tx_late_collisn; |
---|
| 173 | uint32_t tx_deferred_frames; |
---|
| 174 | uint32_t tx_carrier_sense_errs; |
---|
| 175 | |
---|
| 176 | uint64_t rx_bytes; |
---|
| 177 | uint32_t rx_frames; |
---|
| 178 | uint32_t rx_frames_bcast; |
---|
| 179 | uint32_t rx_frames_multi; |
---|
| 180 | uint32_t rx_frames_pause; |
---|
| 181 | uint32_t rx_frames_64b; |
---|
| 182 | uint32_t rx_frames_65to127b; |
---|
| 183 | uint32_t rx_frames_128to255b; |
---|
| 184 | uint32_t rx_frames_256to511b; |
---|
| 185 | uint32_t rx_frames_512to1023b; |
---|
| 186 | uint32_t rx_frames_1024to1536b; |
---|
| 187 | uint32_t rx_frames_undersize; |
---|
| 188 | uint32_t rx_frames_oversize; |
---|
| 189 | uint32_t rx_frames_jabber; |
---|
| 190 | uint32_t rx_frames_fcs_errs; |
---|
| 191 | uint32_t rx_frames_length_errs; |
---|
| 192 | uint32_t rx_symbol_errs; |
---|
| 193 | uint32_t rx_align_errs; |
---|
| 194 | uint32_t rx_resource_errs; |
---|
| 195 | uint32_t rx_overrun_errs; |
---|
| 196 | uint32_t rx_ip_hdr_csum_errs; |
---|
| 197 | uint32_t rx_tcp_csum_errs; |
---|
| 198 | uint32_t rx_udp_csum_errs; |
---|
| 199 | } stats; |
---|
| 200 | }; |
---|
| 201 | |
---|
| 202 | #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) |
---|
| 203 | #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) |
---|
| 204 | #define BARRIER(sc, off, len, flags) \ |
---|
| 205 | (bus_barrier((sc)->mem_res, (off), (len), (flags)) |
---|
| 206 | |
---|
| 207 | #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) |
---|
| 208 | #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) |
---|
| 209 | #define CGEM_LOCK_INIT(sc) \ |
---|
| 210 | mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ |
---|
| 211 | MTX_NETWORK_LOCK, MTX_DEF) |
---|
| 212 | #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) |
---|
| 213 | #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) |
---|
| 214 | |
---|
| 215 | /* Allow platforms to optionally provide a way to set the reference clock. */ |
---|
| 216 | int cgem_set_ref_clk(int unit, int frequency); |
---|
| 217 | |
---|
| 218 | static devclass_t cgem_devclass; |
---|
| 219 | |
---|
| 220 | static int cgem_probe(device_t dev); |
---|
| 221 | static int cgem_attach(device_t dev); |
---|
| 222 | static int cgem_detach(device_t dev); |
---|
| 223 | static void cgem_tick(void *); |
---|
| 224 | static void cgem_intr(void *); |
---|
| 225 | |
---|
| 226 | static void cgem_mediachange(struct cgem_softc *, struct mii_data *); |
---|
| 227 | |
---|
| 228 | static void |
---|
| 229 | cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) |
---|
| 230 | { |
---|
| 231 | int i; |
---|
| 232 | uint32_t rnd; |
---|
| 233 | |
---|
| 234 | /* See if boot loader gave us a MAC address already. */ |
---|
| 235 | for (i = 0; i < 4; i++) { |
---|
| 236 | uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); |
---|
| 237 | uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; |
---|
| 238 | if (low != 0 || high != 0) { |
---|
| 239 | eaddr[0] = low & 0xff; |
---|
| 240 | eaddr[1] = (low >> 8) & 0xff; |
---|
| 241 | eaddr[2] = (low >> 16) & 0xff; |
---|
| 242 | eaddr[3] = (low >> 24) & 0xff; |
---|
| 243 | eaddr[4] = high & 0xff; |
---|
| 244 | eaddr[5] = (high >> 8) & 0xff; |
---|
| 245 | break; |
---|
| 246 | } |
---|
| 247 | } |
---|
| 248 | |
---|
| 249 | /* No MAC from boot loader? Assign a random one. */ |
---|
| 250 | if (i == 4) { |
---|
| 251 | rnd = arc4random(); |
---|
| 252 | |
---|
| 253 | eaddr[0] = 'b'; |
---|
| 254 | eaddr[1] = 's'; |
---|
| 255 | eaddr[2] = 'd'; |
---|
| 256 | eaddr[3] = (rnd >> 16) & 0xff; |
---|
| 257 | eaddr[4] = (rnd >> 8) & 0xff; |
---|
| 258 | eaddr[5] = rnd & 0xff; |
---|
| 259 | |
---|
| 260 | device_printf(sc->dev, "no mac address found, assigning " |
---|
| 261 | "random: %02x:%02x:%02x:%02x:%02x:%02x\n", |
---|
| 262 | eaddr[0], eaddr[1], eaddr[2], |
---|
| 263 | eaddr[3], eaddr[4], eaddr[5]); |
---|
| 264 | } |
---|
| 265 | |
---|
| 266 | /* Move address to first slot and zero out the rest. */ |
---|
| 267 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
| 268 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
| 269 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
| 270 | |
---|
| 271 | for (i = 1; i < 4; i++) { |
---|
| 272 | WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); |
---|
| 273 | WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); |
---|
| 274 | } |
---|
| 275 | } |
---|
| 276 | |
---|
| 277 | /* cgem_mac_hash(): map 48-bit address to a 6-bit hash. |
---|
| 278 | * The 6-bit hash corresponds to a bit in a 64-bit hash |
---|
| 279 | * register. Setting that bit in the hash register enables |
---|
| 280 | * reception of all frames with a destination address that hashes |
---|
| 281 | * to that 6-bit value. |
---|
| 282 | * |
---|
| 283 | * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech |
---|
| 284 | * Reference Manual. Bits 0-5 in the hash are the exclusive-or of |
---|
| 285 | * every sixth bit in the destination address. |
---|
| 286 | */ |
---|
| 287 | static int |
---|
| 288 | cgem_mac_hash(u_char eaddr[]) |
---|
| 289 | { |
---|
| 290 | int hash; |
---|
| 291 | int i, j; |
---|
| 292 | |
---|
| 293 | hash = 0; |
---|
| 294 | for (i = 0; i < 6; i++) |
---|
| 295 | for (j = i; j < 48; j += 6) |
---|
| 296 | if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) |
---|
| 297 | hash ^= (1 << i); |
---|
| 298 | |
---|
| 299 | return hash; |
---|
| 300 | } |
---|
| 301 | |
---|
| 302 | /* After any change in rx flags or multi-cast addresses, set up |
---|
| 303 | * hash registers and net config register bits. |
---|
| 304 | */ |
---|
| 305 | static void |
---|
| 306 | cgem_rx_filter(struct cgem_softc *sc) |
---|
| 307 | { |
---|
| 308 | struct ifnet *ifp = sc->ifp; |
---|
| 309 | struct ifmultiaddr *ifma; |
---|
| 310 | int index; |
---|
| 311 | uint32_t hash_hi, hash_lo; |
---|
| 312 | uint32_t net_cfg; |
---|
| 313 | |
---|
| 314 | hash_hi = 0; |
---|
| 315 | hash_lo = 0; |
---|
| 316 | |
---|
| 317 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
| 318 | |
---|
| 319 | net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | |
---|
| 320 | CGEM_NET_CFG_NO_BCAST | |
---|
| 321 | CGEM_NET_CFG_COPY_ALL); |
---|
| 322 | |
---|
| 323 | if ((ifp->if_flags & IFF_PROMISC) != 0) |
---|
| 324 | net_cfg |= CGEM_NET_CFG_COPY_ALL; |
---|
| 325 | else { |
---|
| 326 | if ((ifp->if_flags & IFF_BROADCAST) == 0) |
---|
| 327 | net_cfg |= CGEM_NET_CFG_NO_BCAST; |
---|
| 328 | if ((ifp->if_flags & IFF_ALLMULTI) != 0) { |
---|
| 329 | hash_hi = 0xffffffff; |
---|
| 330 | hash_lo = 0xffffffff; |
---|
| 331 | } else { |
---|
| 332 | if_maddr_rlock(ifp); |
---|
| 333 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
---|
| 334 | if (ifma->ifma_addr->sa_family != AF_LINK) |
---|
| 335 | continue; |
---|
| 336 | index = cgem_mac_hash( |
---|
| 337 | LLADDR((struct sockaddr_dl *) |
---|
| 338 | ifma->ifma_addr)); |
---|
| 339 | if (index > 31) |
---|
| 340 | hash_hi |= (1<<(index-32)); |
---|
| 341 | else |
---|
| 342 | hash_lo |= (1<<index); |
---|
| 343 | } |
---|
| 344 | if_maddr_runlock(ifp); |
---|
| 345 | } |
---|
| 346 | |
---|
| 347 | if (hash_hi != 0 || hash_lo != 0) |
---|
| 348 | net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; |
---|
| 349 | } |
---|
| 350 | |
---|
| 351 | WR4(sc, CGEM_HASH_TOP, hash_hi); |
---|
| 352 | WR4(sc, CGEM_HASH_BOT, hash_lo); |
---|
| 353 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
| 354 | } |
---|
| 355 | |
---|
| 356 | /* For bus_dmamap_load() callback. */ |
---|
| 357 | static void |
---|
| 358 | cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) |
---|
| 359 | { |
---|
| 360 | |
---|
| 361 | if (nsegs != 1 || error != 0) |
---|
| 362 | return; |
---|
| 363 | *(bus_addr_t *)arg = segs[0].ds_addr; |
---|
| 364 | } |
---|
| 365 | |
---|
| 366 | /* Create DMA'able descriptor rings. */ |
---|
| 367 | static int |
---|
| 368 | cgem_setup_descs(struct cgem_softc *sc) |
---|
| 369 | { |
---|
| 370 | int i, err; |
---|
| 371 | |
---|
| 372 | sc->txring = NULL; |
---|
| 373 | sc->rxring = NULL; |
---|
| 374 | |
---|
| 375 | /* Allocate non-cached DMA space for RX and TX descriptors. |
---|
| 376 | */ |
---|
| 377 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
| 378 | BUS_SPACE_MAXADDR_32BIT, |
---|
| 379 | BUS_SPACE_MAXADDR, |
---|
| 380 | NULL, NULL, |
---|
| 381 | MAX_DESC_RING_SIZE, |
---|
| 382 | 1, |
---|
| 383 | MAX_DESC_RING_SIZE, |
---|
| 384 | 0, |
---|
| 385 | busdma_lock_mutex, |
---|
| 386 | &sc->sc_mtx, |
---|
| 387 | &sc->desc_dma_tag); |
---|
| 388 | if (err) |
---|
| 389 | return (err); |
---|
| 390 | |
---|
| 391 | /* Set up a bus_dma_tag for mbufs. */ |
---|
| 392 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
| 393 | BUS_SPACE_MAXADDR_32BIT, |
---|
| 394 | BUS_SPACE_MAXADDR, |
---|
| 395 | NULL, NULL, |
---|
| 396 | MCLBYTES, |
---|
| 397 | TX_MAX_DMA_SEGS, |
---|
| 398 | MCLBYTES, |
---|
| 399 | 0, |
---|
| 400 | busdma_lock_mutex, |
---|
| 401 | &sc->sc_mtx, |
---|
| 402 | &sc->mbuf_dma_tag); |
---|
| 403 | if (err) |
---|
| 404 | return (err); |
---|
| 405 | |
---|
| 406 | /* Allocate DMA memory in non-cacheable space. */ |
---|
| 407 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
| 408 | (void **)&sc->rxring, |
---|
| 409 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
| 410 | &sc->rxring_dma_map); |
---|
| 411 | if (err) |
---|
| 412 | return (err); |
---|
| 413 | |
---|
| 414 | /* Load descriptor DMA memory. */ |
---|
| 415 | err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, |
---|
| 416 | (void *)sc->rxring, |
---|
| 417 | CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), |
---|
| 418 | cgem_getaddr, &sc->rxring_physaddr, |
---|
| 419 | BUS_DMA_NOWAIT); |
---|
| 420 | if (err) |
---|
| 421 | return (err); |
---|
| 422 | |
---|
| 423 | /* Initialize RX descriptors. */ |
---|
| 424 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
| 425 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
| 426 | sc->rxring[i].ctl = 0; |
---|
| 427 | sc->rxring_m[i] = NULL; |
---|
| 428 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
| 429 | &sc->rxring_m_dmamap[i]); |
---|
| 430 | if (err) |
---|
| 431 | return (err); |
---|
| 432 | } |
---|
| 433 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
| 434 | |
---|
| 435 | sc->rxring_hd_ptr = 0; |
---|
| 436 | sc->rxring_tl_ptr = 0; |
---|
| 437 | sc->rxring_queued = 0; |
---|
| 438 | |
---|
| 439 | /* Allocate DMA memory for TX descriptors in non-cacheable space. */ |
---|
| 440 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
| 441 | (void **)&sc->txring, |
---|
| 442 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
| 443 | &sc->txring_dma_map); |
---|
| 444 | if (err) |
---|
| 445 | return (err); |
---|
| 446 | |
---|
| 447 | /* Load TX descriptor DMA memory. */ |
---|
| 448 | err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, |
---|
| 449 | (void *)sc->txring, |
---|
| 450 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), |
---|
| 451 | cgem_getaddr, &sc->txring_physaddr, |
---|
| 452 | BUS_DMA_NOWAIT); |
---|
| 453 | if (err) |
---|
| 454 | return (err); |
---|
| 455 | |
---|
| 456 | /* Initialize TX descriptor ring. */ |
---|
| 457 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
| 458 | sc->txring[i].addr = 0; |
---|
| 459 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
| 460 | sc->txring_m[i] = NULL; |
---|
| 461 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
| 462 | &sc->txring_m_dmamap[i]); |
---|
| 463 | if (err) |
---|
| 464 | return (err); |
---|
| 465 | } |
---|
| 466 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
| 467 | |
---|
| 468 | sc->txring_hd_ptr = 0; |
---|
| 469 | sc->txring_tl_ptr = 0; |
---|
| 470 | sc->txring_queued = 0; |
---|
| 471 | |
---|
| 472 | return (0); |
---|
| 473 | } |
---|
| 474 | |
---|
| 475 | /* Fill receive descriptor ring with mbufs. */ |
---|
| 476 | static void |
---|
| 477 | cgem_fill_rqueue(struct cgem_softc *sc) |
---|
| 478 | { |
---|
| 479 | struct mbuf *m = NULL; |
---|
| 480 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
| 481 | int nsegs; |
---|
| 482 | |
---|
| 483 | CGEM_ASSERT_LOCKED(sc); |
---|
| 484 | |
---|
| 485 | while (sc->rxring_queued < sc->rxbufs) { |
---|
| 486 | /* Get a cluster mbuf. */ |
---|
| 487 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
---|
| 488 | if (m == NULL) |
---|
| 489 | break; |
---|
| 490 | |
---|
| 491 | m->m_len = MCLBYTES; |
---|
| 492 | m->m_pkthdr.len = MCLBYTES; |
---|
| 493 | m->m_pkthdr.rcvif = sc->ifp; |
---|
| 494 | |
---|
| 495 | /* Load map and plug in physical address. */ |
---|
| 496 | if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
| 497 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, |
---|
| 498 | segs, &nsegs, BUS_DMA_NOWAIT)) { |
---|
| 499 | sc->rxdmamapfails++; |
---|
| 500 | m_free(m); |
---|
| 501 | break; |
---|
| 502 | } |
---|
| 503 | sc->rxring_m[sc->rxring_hd_ptr] = m; |
---|
| 504 | |
---|
| 505 | /* Sync cache with receive buffer. */ |
---|
| 506 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
| 507 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], |
---|
| 508 | BUS_DMASYNC_PREREAD); |
---|
| 509 | |
---|
| 510 | /* Write rx descriptor and increment head pointer. */ |
---|
| 511 | sc->rxring[sc->rxring_hd_ptr].ctl = 0; |
---|
| 512 | if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { |
---|
| 513 | sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | |
---|
| 514 | CGEM_RXDESC_WRAP; |
---|
| 515 | sc->rxring_hd_ptr = 0; |
---|
| 516 | } else |
---|
| 517 | sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; |
---|
| 518 | |
---|
| 519 | sc->rxring_queued++; |
---|
| 520 | } |
---|
| 521 | } |
---|
| 522 | |
---|
| 523 | /* Pull received packets off of receive descriptor ring. */ |
---|
| 524 | static void |
---|
| 525 | cgem_recv(struct cgem_softc *sc) |
---|
| 526 | { |
---|
| 527 | struct ifnet *ifp = sc->ifp; |
---|
| 528 | struct mbuf *m, *m_hd, **m_tl; |
---|
| 529 | uint32_t ctl; |
---|
| 530 | |
---|
| 531 | CGEM_ASSERT_LOCKED(sc); |
---|
| 532 | |
---|
| 533 | /* Pick up all packets in which the OWN bit is set. */ |
---|
| 534 | m_hd = NULL; |
---|
| 535 | m_tl = &m_hd; |
---|
| 536 | while (sc->rxring_queued > 0 && |
---|
| 537 | (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { |
---|
| 538 | |
---|
| 539 | ctl = sc->rxring[sc->rxring_tl_ptr].ctl; |
---|
| 540 | |
---|
| 541 | /* Grab filled mbuf. */ |
---|
| 542 | m = sc->rxring_m[sc->rxring_tl_ptr]; |
---|
| 543 | sc->rxring_m[sc->rxring_tl_ptr] = NULL; |
---|
| 544 | |
---|
| 545 | /* Sync cache with receive buffer. */ |
---|
| 546 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
| 547 | sc->rxring_m_dmamap[sc->rxring_tl_ptr], |
---|
| 548 | BUS_DMASYNC_POSTREAD); |
---|
| 549 | |
---|
| 550 | /* Unload dmamap. */ |
---|
| 551 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
| 552 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
| 553 | |
---|
| 554 | /* Increment tail pointer. */ |
---|
| 555 | if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) |
---|
| 556 | sc->rxring_tl_ptr = 0; |
---|
| 557 | sc->rxring_queued--; |
---|
| 558 | |
---|
| 559 | /* Check FCS and make sure entire packet landed in one mbuf |
---|
| 560 | * cluster (which is much bigger than the largest ethernet |
---|
| 561 | * packet). |
---|
| 562 | */ |
---|
| 563 | if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || |
---|
| 564 | (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != |
---|
| 565 | (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { |
---|
| 566 | /* discard. */ |
---|
| 567 | m_free(m); |
---|
[a09f00e] | 568 | #ifndef __rtems__ |
---|
[b8e0c66] | 569 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); |
---|
[a09f00e] | 570 | #else /* __rtems__ */ |
---|
| 571 | ifp->if_ierrors++; |
---|
| 572 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 573 | continue; |
---|
| 574 | } |
---|
| 575 | |
---|
| 576 | /* Ready it to hand off to upper layers. */ |
---|
| 577 | m->m_data += ETHER_ALIGN; |
---|
| 578 | m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); |
---|
| 579 | m->m_pkthdr.rcvif = ifp; |
---|
| 580 | m->m_pkthdr.len = m->m_len; |
---|
| 581 | |
---|
| 582 | /* Are we using hardware checksumming? Check the |
---|
| 583 | * status in the receive descriptor. |
---|
| 584 | */ |
---|
| 585 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { |
---|
| 586 | /* TCP or UDP checks out, IP checks out too. */ |
---|
| 587 | if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
| 588 | CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || |
---|
| 589 | (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
| 590 | CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { |
---|
| 591 | m->m_pkthdr.csum_flags |= |
---|
| 592 | CSUM_IP_CHECKED | CSUM_IP_VALID | |
---|
| 593 | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
---|
| 594 | m->m_pkthdr.csum_data = 0xffff; |
---|
| 595 | } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
| 596 | CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { |
---|
| 597 | /* Only IP checks out. */ |
---|
| 598 | m->m_pkthdr.csum_flags |= |
---|
| 599 | CSUM_IP_CHECKED | CSUM_IP_VALID; |
---|
| 600 | m->m_pkthdr.csum_data = 0xffff; |
---|
| 601 | } |
---|
| 602 | } |
---|
| 603 | |
---|
| 604 | /* Queue it up for delivery below. */ |
---|
| 605 | *m_tl = m; |
---|
| 606 | m_tl = &m->m_next; |
---|
| 607 | } |
---|
| 608 | |
---|
| 609 | /* Replenish receive buffers. */ |
---|
| 610 | cgem_fill_rqueue(sc); |
---|
| 611 | |
---|
| 612 | /* Unlock and send up packets. */ |
---|
| 613 | CGEM_UNLOCK(sc); |
---|
| 614 | while (m_hd != NULL) { |
---|
| 615 | m = m_hd; |
---|
| 616 | m_hd = m_hd->m_next; |
---|
| 617 | m->m_next = NULL; |
---|
[a09f00e] | 618 | #ifndef __rtems__ |
---|
[b8e0c66] | 619 | if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); |
---|
[a09f00e] | 620 | #else /* __rtems__ */ |
---|
| 621 | ifp->if_ipackets++; |
---|
| 622 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 623 | (*ifp->if_input)(ifp, m); |
---|
| 624 | } |
---|
| 625 | CGEM_LOCK(sc); |
---|
| 626 | } |
---|
| 627 | |
---|
| 628 | /* Find completed transmits and free their mbufs. */ |
---|
| 629 | static void |
---|
| 630 | cgem_clean_tx(struct cgem_softc *sc) |
---|
| 631 | { |
---|
| 632 | struct mbuf *m; |
---|
| 633 | uint32_t ctl; |
---|
| 634 | |
---|
| 635 | CGEM_ASSERT_LOCKED(sc); |
---|
| 636 | |
---|
| 637 | /* free up finished transmits. */ |
---|
| 638 | while (sc->txring_queued > 0 && |
---|
| 639 | ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & |
---|
| 640 | CGEM_TXDESC_USED) != 0) { |
---|
| 641 | |
---|
| 642 | /* Sync cache. nop? */ |
---|
| 643 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
| 644 | sc->txring_m_dmamap[sc->txring_tl_ptr], |
---|
| 645 | BUS_DMASYNC_POSTWRITE); |
---|
| 646 | |
---|
| 647 | /* Unload DMA map. */ |
---|
| 648 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
| 649 | sc->txring_m_dmamap[sc->txring_tl_ptr]); |
---|
| 650 | |
---|
| 651 | /* Free up the mbuf. */ |
---|
| 652 | m = sc->txring_m[sc->txring_tl_ptr]; |
---|
| 653 | sc->txring_m[sc->txring_tl_ptr] = NULL; |
---|
| 654 | m_freem(m); |
---|
| 655 | |
---|
| 656 | /* Check the status. */ |
---|
| 657 | if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { |
---|
| 658 | /* Serious bus error. log to console. */ |
---|
| 659 | device_printf(sc->dev, "cgem_clean_tx: Whoa! " |
---|
| 660 | "AHB error, addr=0x%x\n", |
---|
| 661 | sc->txring[sc->txring_tl_ptr].addr); |
---|
| 662 | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | |
---|
| 663 | CGEM_TXDESC_LATE_COLL)) != 0) { |
---|
[a09f00e] | 664 | #ifndef __rtems__ |
---|
[b8e0c66] | 665 | if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); |
---|
[a09f00e] | 666 | #else /* __rtems__ */ |
---|
| 667 | sc->ifp->if_oerrors++; |
---|
| 668 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 669 | } else |
---|
[a09f00e] | 670 | #ifndef __rtems__ |
---|
[b8e0c66] | 671 | if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); |
---|
[a09f00e] | 672 | #else /* __rtems__ */ |
---|
| 673 | sc->ifp->if_opackets++; |
---|
| 674 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 675 | |
---|
| 676 | /* If the packet spanned more than one tx descriptor, |
---|
| 677 | * skip descriptors until we find the end so that only |
---|
| 678 | * start-of-frame descriptors are processed. |
---|
| 679 | */ |
---|
| 680 | while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { |
---|
| 681 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
| 682 | sc->txring_tl_ptr = 0; |
---|
| 683 | else |
---|
| 684 | sc->txring_tl_ptr++; |
---|
| 685 | sc->txring_queued--; |
---|
| 686 | |
---|
| 687 | ctl = sc->txring[sc->txring_tl_ptr].ctl; |
---|
| 688 | |
---|
| 689 | sc->txring[sc->txring_tl_ptr].ctl = |
---|
| 690 | ctl | CGEM_TXDESC_USED; |
---|
| 691 | } |
---|
| 692 | |
---|
| 693 | /* Next descriptor. */ |
---|
| 694 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
| 695 | sc->txring_tl_ptr = 0; |
---|
| 696 | else |
---|
| 697 | sc->txring_tl_ptr++; |
---|
| 698 | sc->txring_queued--; |
---|
| 699 | |
---|
| 700 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
| 701 | } |
---|
| 702 | } |
---|
| 703 | |
---|
| 704 | /* Start transmits. */ |
---|
| 705 | static void |
---|
| 706 | cgem_start_locked(struct ifnet *ifp) |
---|
| 707 | { |
---|
| 708 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
| 709 | struct mbuf *m; |
---|
| 710 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
| 711 | uint32_t ctl; |
---|
| 712 | int i, nsegs, wrap, err; |
---|
| 713 | |
---|
| 714 | CGEM_ASSERT_LOCKED(sc); |
---|
| 715 | |
---|
| 716 | if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) |
---|
| 717 | return; |
---|
| 718 | |
---|
| 719 | for (;;) { |
---|
| 720 | /* Check that there is room in the descriptor ring. */ |
---|
| 721 | if (sc->txring_queued >= |
---|
| 722 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
| 723 | |
---|
| 724 | /* Try to make room. */ |
---|
| 725 | cgem_clean_tx(sc); |
---|
| 726 | |
---|
| 727 | /* Still no room? */ |
---|
| 728 | if (sc->txring_queued >= |
---|
| 729 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
| 730 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
| 731 | sc->txfull++; |
---|
| 732 | break; |
---|
| 733 | } |
---|
| 734 | } |
---|
| 735 | |
---|
| 736 | /* Grab next transmit packet. */ |
---|
| 737 | IFQ_DRV_DEQUEUE(&ifp->if_snd, m); |
---|
| 738 | if (m == NULL) |
---|
| 739 | break; |
---|
| 740 | |
---|
| 741 | /* Load DMA map. */ |
---|
| 742 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
| 743 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
| 744 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
| 745 | if (err == EFBIG) { |
---|
| 746 | /* Too many segments! defrag and try again. */ |
---|
| 747 | struct mbuf *m2 = m_defrag(m, M_NOWAIT); |
---|
| 748 | |
---|
| 749 | if (m2 == NULL) { |
---|
| 750 | sc->txdefragfails++; |
---|
| 751 | m_freem(m); |
---|
| 752 | continue; |
---|
| 753 | } |
---|
| 754 | m = m2; |
---|
| 755 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
| 756 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
| 757 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
| 758 | sc->txdefrags++; |
---|
| 759 | } |
---|
| 760 | if (err) { |
---|
| 761 | /* Give up. */ |
---|
| 762 | m_freem(m); |
---|
| 763 | sc->txdmamapfails++; |
---|
| 764 | continue; |
---|
| 765 | } |
---|
| 766 | sc->txring_m[sc->txring_hd_ptr] = m; |
---|
| 767 | |
---|
| 768 | /* Sync tx buffer with cache. */ |
---|
| 769 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
| 770 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
| 771 | BUS_DMASYNC_PREWRITE); |
---|
| 772 | |
---|
| 773 | /* Set wrap flag if next packet might run off end of ring. */ |
---|
| 774 | wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= |
---|
| 775 | CGEM_NUM_TX_DESCS; |
---|
| 776 | |
---|
| 777 | /* Fill in the TX descriptors back to front so that USED |
---|
| 778 | * bit in first descriptor is cleared last. |
---|
| 779 | */ |
---|
| 780 | for (i = nsegs - 1; i >= 0; i--) { |
---|
| 781 | /* Descriptor address. */ |
---|
| 782 | sc->txring[sc->txring_hd_ptr + i].addr = |
---|
| 783 | segs[i].ds_addr; |
---|
| 784 | |
---|
| 785 | /* Descriptor control word. */ |
---|
| 786 | ctl = segs[i].ds_len; |
---|
| 787 | if (i == nsegs - 1) { |
---|
| 788 | ctl |= CGEM_TXDESC_LAST_BUF; |
---|
| 789 | if (wrap) |
---|
| 790 | ctl |= CGEM_TXDESC_WRAP; |
---|
| 791 | } |
---|
| 792 | sc->txring[sc->txring_hd_ptr + i].ctl = ctl; |
---|
| 793 | |
---|
| 794 | if (i != 0) |
---|
| 795 | sc->txring_m[sc->txring_hd_ptr + i] = NULL; |
---|
| 796 | } |
---|
| 797 | |
---|
| 798 | if (wrap) |
---|
| 799 | sc->txring_hd_ptr = 0; |
---|
| 800 | else |
---|
| 801 | sc->txring_hd_ptr += nsegs; |
---|
| 802 | sc->txring_queued += nsegs; |
---|
| 803 | |
---|
| 804 | /* Kick the transmitter. */ |
---|
| 805 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
| 806 | CGEM_NET_CTRL_START_TX); |
---|
| 807 | |
---|
| 808 | /* If there is a BPF listener, bounce a copy to to him. */ |
---|
| 809 | ETHER_BPF_MTAP(ifp, m); |
---|
| 810 | } |
---|
| 811 | } |
---|
| 812 | |
---|
| 813 | static void |
---|
| 814 | cgem_start(struct ifnet *ifp) |
---|
| 815 | { |
---|
| 816 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
| 817 | |
---|
| 818 | CGEM_LOCK(sc); |
---|
| 819 | cgem_start_locked(ifp); |
---|
| 820 | CGEM_UNLOCK(sc); |
---|
| 821 | } |
---|
| 822 | |
---|
| 823 | static void |
---|
| 824 | cgem_poll_hw_stats(struct cgem_softc *sc) |
---|
| 825 | { |
---|
| 826 | uint32_t n; |
---|
| 827 | |
---|
| 828 | CGEM_ASSERT_LOCKED(sc); |
---|
| 829 | |
---|
| 830 | sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); |
---|
| 831 | sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; |
---|
| 832 | |
---|
| 833 | sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); |
---|
| 834 | sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); |
---|
| 835 | sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); |
---|
| 836 | sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); |
---|
| 837 | sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); |
---|
| 838 | sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); |
---|
| 839 | sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); |
---|
| 840 | sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); |
---|
| 841 | sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); |
---|
| 842 | sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); |
---|
| 843 | sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); |
---|
| 844 | |
---|
| 845 | n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); |
---|
| 846 | sc->stats.tx_single_collisn += n; |
---|
[a09f00e] | 847 | #ifndef __rtems__ |
---|
[b8e0c66] | 848 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
[a09f00e] | 849 | #else /* __rtems__ */ |
---|
| 850 | sc->ifp->if_collisions += n; |
---|
| 851 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 852 | n = RD4(sc, CGEM_MULTI_COLL_FRAMES); |
---|
| 853 | sc->stats.tx_multi_collisn += n; |
---|
[a09f00e] | 854 | #ifndef __rtems__ |
---|
[b8e0c66] | 855 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
[a09f00e] | 856 | #else /* __rtems__ */ |
---|
| 857 | sc->ifp->if_collisions += n; |
---|
| 858 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 859 | n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); |
---|
| 860 | sc->stats.tx_excsv_collisn += n; |
---|
[a09f00e] | 861 | #ifndef __rtems__ |
---|
[b8e0c66] | 862 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
[a09f00e] | 863 | #else /* __rtems__ */ |
---|
| 864 | sc->ifp->if_collisions += n; |
---|
| 865 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 866 | n = RD4(sc, CGEM_LATE_COLL); |
---|
| 867 | sc->stats.tx_late_collisn += n; |
---|
[a09f00e] | 868 | #ifndef __rtems__ |
---|
[b8e0c66] | 869 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
[a09f00e] | 870 | #else /* __rtems__ */ |
---|
| 871 | sc->ifp->if_collisions += n; |
---|
| 872 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 873 | |
---|
| 874 | sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); |
---|
| 875 | sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); |
---|
| 876 | |
---|
| 877 | sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); |
---|
| 878 | sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; |
---|
| 879 | |
---|
| 880 | sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); |
---|
| 881 | sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); |
---|
| 882 | sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); |
---|
| 883 | sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); |
---|
| 884 | sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); |
---|
| 885 | sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); |
---|
| 886 | sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); |
---|
| 887 | sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); |
---|
| 888 | sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); |
---|
| 889 | sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); |
---|
| 890 | sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); |
---|
| 891 | sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); |
---|
| 892 | sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); |
---|
| 893 | sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); |
---|
| 894 | sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); |
---|
| 895 | sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); |
---|
| 896 | sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); |
---|
| 897 | sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); |
---|
| 898 | sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); |
---|
| 899 | sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); |
---|
| 900 | sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); |
---|
| 901 | sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); |
---|
| 902 | } |
---|
| 903 | |
---|
| 904 | static void |
---|
| 905 | cgem_tick(void *arg) |
---|
| 906 | { |
---|
| 907 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
| 908 | struct mii_data *mii; |
---|
| 909 | |
---|
| 910 | CGEM_ASSERT_LOCKED(sc); |
---|
| 911 | |
---|
| 912 | /* Poll the phy. */ |
---|
| 913 | if (sc->miibus != NULL) { |
---|
| 914 | mii = device_get_softc(sc->miibus); |
---|
| 915 | mii_tick(mii); |
---|
| 916 | } |
---|
| 917 | |
---|
| 918 | /* Poll statistics registers. */ |
---|
| 919 | cgem_poll_hw_stats(sc); |
---|
| 920 | |
---|
| 921 | /* Check for receiver hang. */ |
---|
| 922 | if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { |
---|
| 923 | /* |
---|
| 924 | * Reset receiver logic by toggling RX_EN bit. 1usec |
---|
| 925 | * delay is necessary especially when operating at 100mbps |
---|
| 926 | * and 10mbps speeds. |
---|
| 927 | */ |
---|
| 928 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & |
---|
| 929 | ~CGEM_NET_CTRL_RX_EN); |
---|
| 930 | DELAY(1); |
---|
| 931 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
| 932 | } |
---|
| 933 | sc->rx_frames_prev = sc->stats.rx_frames; |
---|
| 934 | |
---|
| 935 | /* Next callout in one second. */ |
---|
| 936 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
| 937 | } |
---|
| 938 | |
---|
| 939 | /* Interrupt handler. */ |
---|
| 940 | static void |
---|
| 941 | cgem_intr(void *arg) |
---|
| 942 | { |
---|
| 943 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
| 944 | uint32_t istatus; |
---|
| 945 | |
---|
| 946 | CGEM_LOCK(sc); |
---|
| 947 | |
---|
| 948 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
---|
| 949 | CGEM_UNLOCK(sc); |
---|
| 950 | return; |
---|
| 951 | } |
---|
| 952 | |
---|
| 953 | /* Read interrupt status and immediately clear the bits. */ |
---|
| 954 | istatus = RD4(sc, CGEM_INTR_STAT); |
---|
| 955 | WR4(sc, CGEM_INTR_STAT, istatus); |
---|
| 956 | |
---|
| 957 | /* Packets received. */ |
---|
| 958 | if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) |
---|
| 959 | cgem_recv(sc); |
---|
| 960 | |
---|
| 961 | /* Free up any completed transmit buffers. */ |
---|
| 962 | cgem_clean_tx(sc); |
---|
| 963 | |
---|
| 964 | /* Hresp not ok. Something is very bad with DMA. Try to clear. */ |
---|
| 965 | if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { |
---|
| 966 | device_printf(sc->dev, "cgem_intr: hresp not okay! " |
---|
| 967 | "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT)); |
---|
| 968 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); |
---|
| 969 | } |
---|
| 970 | |
---|
| 971 | /* Receiver overrun. */ |
---|
| 972 | if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { |
---|
| 973 | /* Clear status bit. */ |
---|
| 974 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); |
---|
| 975 | sc->rxoverruns++; |
---|
| 976 | } |
---|
| 977 | |
---|
| 978 | /* Receiver ran out of bufs. */ |
---|
| 979 | if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { |
---|
| 980 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
| 981 | CGEM_NET_CTRL_FLUSH_DPRAM_PKT); |
---|
| 982 | cgem_fill_rqueue(sc); |
---|
| 983 | sc->rxnobufs++; |
---|
| 984 | } |
---|
| 985 | |
---|
| 986 | /* Restart transmitter if needed. */ |
---|
| 987 | if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) |
---|
| 988 | cgem_start_locked(sc->ifp); |
---|
| 989 | |
---|
| 990 | CGEM_UNLOCK(sc); |
---|
| 991 | } |
---|
| 992 | |
---|
| 993 | /* Reset hardware. */ |
---|
| 994 | static void |
---|
| 995 | cgem_reset(struct cgem_softc *sc) |
---|
| 996 | { |
---|
| 997 | |
---|
| 998 | CGEM_ASSERT_LOCKED(sc); |
---|
| 999 | |
---|
| 1000 | WR4(sc, CGEM_NET_CTRL, 0); |
---|
| 1001 | WR4(sc, CGEM_NET_CFG, 0); |
---|
| 1002 | WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); |
---|
| 1003 | WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); |
---|
| 1004 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); |
---|
| 1005 | WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); |
---|
| 1006 | WR4(sc, CGEM_HASH_BOT, 0); |
---|
| 1007 | WR4(sc, CGEM_HASH_TOP, 0); |
---|
| 1008 | WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ |
---|
| 1009 | WR4(sc, CGEM_RX_QBAR, 0); |
---|
| 1010 | |
---|
| 1011 | /* Get management port running even if interface is down. */ |
---|
| 1012 | WR4(sc, CGEM_NET_CFG, |
---|
| 1013 | CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
| 1014 | CGEM_NET_CFG_MDC_CLK_DIV_64); |
---|
| 1015 | |
---|
| 1016 | sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; |
---|
| 1017 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
| 1018 | } |
---|
| 1019 | |
---|
| 1020 | /* Bring up the hardware. */ |
---|
| 1021 | static void |
---|
| 1022 | cgem_config(struct cgem_softc *sc) |
---|
| 1023 | { |
---|
| 1024 | uint32_t net_cfg; |
---|
| 1025 | uint32_t dma_cfg; |
---|
| 1026 | u_char *eaddr = IF_LLADDR(sc->ifp); |
---|
| 1027 | |
---|
| 1028 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1029 | |
---|
| 1030 | /* Program Net Config Register. */ |
---|
| 1031 | net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
| 1032 | CGEM_NET_CFG_MDC_CLK_DIV_64 | |
---|
| 1033 | CGEM_NET_CFG_FCS_REMOVE | |
---|
| 1034 | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | |
---|
| 1035 | CGEM_NET_CFG_GIGE_EN | |
---|
| 1036 | CGEM_NET_CFG_1536RXEN | |
---|
| 1037 | CGEM_NET_CFG_FULL_DUPLEX | |
---|
| 1038 | CGEM_NET_CFG_SPEED100; |
---|
| 1039 | |
---|
| 1040 | /* Enable receive checksum offloading? */ |
---|
| 1041 | if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) |
---|
| 1042 | net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; |
---|
| 1043 | |
---|
| 1044 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
| 1045 | |
---|
| 1046 | /* Program DMA Config Register. */ |
---|
| 1047 | dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | |
---|
| 1048 | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | |
---|
| 1049 | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | |
---|
| 1050 | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | |
---|
| 1051 | CGEM_DMA_CFG_DISC_WHEN_NO_AHB; |
---|
| 1052 | |
---|
| 1053 | /* Enable transmit checksum offloading? */ |
---|
| 1054 | if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) |
---|
| 1055 | dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; |
---|
| 1056 | |
---|
| 1057 | WR4(sc, CGEM_DMA_CFG, dma_cfg); |
---|
| 1058 | |
---|
| 1059 | /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ |
---|
| 1060 | WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); |
---|
| 1061 | WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); |
---|
| 1062 | |
---|
| 1063 | /* Enable rx and tx. */ |
---|
| 1064 | sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); |
---|
| 1065 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
| 1066 | |
---|
| 1067 | /* Set receive address in case it changed. */ |
---|
| 1068 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
| 1069 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
| 1070 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
| 1071 | |
---|
| 1072 | /* Set up interrupts. */ |
---|
| 1073 | WR4(sc, CGEM_INTR_EN, |
---|
| 1074 | CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | |
---|
| 1075 | CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | |
---|
| 1076 | CGEM_INTR_HRESP_NOT_OK); |
---|
| 1077 | } |
---|
| 1078 | |
---|
| 1079 | /* Turn on interface and load up receive ring with buffers. */ |
---|
| 1080 | static void |
---|
| 1081 | cgem_init_locked(struct cgem_softc *sc) |
---|
| 1082 | { |
---|
| 1083 | struct mii_data *mii; |
---|
| 1084 | |
---|
| 1085 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1086 | |
---|
| 1087 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) |
---|
| 1088 | return; |
---|
| 1089 | |
---|
| 1090 | cgem_config(sc); |
---|
| 1091 | cgem_fill_rqueue(sc); |
---|
| 1092 | |
---|
| 1093 | sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; |
---|
| 1094 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
| 1095 | |
---|
| 1096 | mii = device_get_softc(sc->miibus); |
---|
| 1097 | mii_mediachg(mii); |
---|
| 1098 | |
---|
| 1099 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
| 1100 | } |
---|
| 1101 | |
---|
| 1102 | static void |
---|
| 1103 | cgem_init(void *arg) |
---|
| 1104 | { |
---|
| 1105 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
| 1106 | |
---|
| 1107 | CGEM_LOCK(sc); |
---|
| 1108 | cgem_init_locked(sc); |
---|
| 1109 | CGEM_UNLOCK(sc); |
---|
| 1110 | } |
---|
| 1111 | |
---|
| 1112 | /* Turn off interface. Free up any buffers in transmit or receive queues. */ |
---|
| 1113 | static void |
---|
| 1114 | cgem_stop(struct cgem_softc *sc) |
---|
| 1115 | { |
---|
| 1116 | int i; |
---|
| 1117 | |
---|
| 1118 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1119 | |
---|
| 1120 | callout_stop(&sc->tick_ch); |
---|
| 1121 | |
---|
| 1122 | /* Shut down hardware. */ |
---|
| 1123 | cgem_reset(sc); |
---|
| 1124 | |
---|
| 1125 | /* Clear out transmit queue. */ |
---|
| 1126 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
| 1127 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
| 1128 | sc->txring[i].addr = 0; |
---|
| 1129 | if (sc->txring_m[i]) { |
---|
| 1130 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
| 1131 | sc->txring_m_dmamap[i]); |
---|
| 1132 | m_freem(sc->txring_m[i]); |
---|
| 1133 | sc->txring_m[i] = NULL; |
---|
| 1134 | } |
---|
| 1135 | } |
---|
| 1136 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
| 1137 | |
---|
| 1138 | sc->txring_hd_ptr = 0; |
---|
| 1139 | sc->txring_tl_ptr = 0; |
---|
| 1140 | sc->txring_queued = 0; |
---|
| 1141 | |
---|
| 1142 | /* Clear out receive queue. */ |
---|
| 1143 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
| 1144 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
| 1145 | sc->rxring[i].ctl = 0; |
---|
| 1146 | if (sc->rxring_m[i]) { |
---|
| 1147 | /* Unload dmamap. */ |
---|
| 1148 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
| 1149 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
| 1150 | |
---|
| 1151 | m_freem(sc->rxring_m[i]); |
---|
| 1152 | sc->rxring_m[i] = NULL; |
---|
| 1153 | } |
---|
| 1154 | } |
---|
| 1155 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
| 1156 | |
---|
| 1157 | sc->rxring_hd_ptr = 0; |
---|
| 1158 | sc->rxring_tl_ptr = 0; |
---|
| 1159 | sc->rxring_queued = 0; |
---|
| 1160 | |
---|
| 1161 | /* Force next statchg or linkchg to program net config register. */ |
---|
| 1162 | sc->mii_media_active = 0; |
---|
| 1163 | } |
---|
| 1164 | |
---|
| 1165 | |
---|
| 1166 | static int |
---|
| 1167 | cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
---|
| 1168 | { |
---|
| 1169 | struct cgem_softc *sc = ifp->if_softc; |
---|
| 1170 | struct ifreq *ifr = (struct ifreq *)data; |
---|
| 1171 | struct mii_data *mii; |
---|
| 1172 | int error = 0, mask; |
---|
| 1173 | |
---|
| 1174 | switch (cmd) { |
---|
| 1175 | case SIOCSIFFLAGS: |
---|
| 1176 | CGEM_LOCK(sc); |
---|
| 1177 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
| 1178 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
| 1179 | if (((ifp->if_flags ^ sc->if_old_flags) & |
---|
| 1180 | (IFF_PROMISC | IFF_ALLMULTI)) != 0) { |
---|
| 1181 | cgem_rx_filter(sc); |
---|
| 1182 | } |
---|
| 1183 | } else { |
---|
| 1184 | cgem_init_locked(sc); |
---|
| 1185 | } |
---|
| 1186 | } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
| 1187 | ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
---|
| 1188 | cgem_stop(sc); |
---|
| 1189 | } |
---|
| 1190 | sc->if_old_flags = ifp->if_flags; |
---|
| 1191 | CGEM_UNLOCK(sc); |
---|
| 1192 | break; |
---|
| 1193 | |
---|
| 1194 | case SIOCADDMULTI: |
---|
| 1195 | case SIOCDELMULTI: |
---|
| 1196 | /* Set up multi-cast filters. */ |
---|
| 1197 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
| 1198 | CGEM_LOCK(sc); |
---|
| 1199 | cgem_rx_filter(sc); |
---|
| 1200 | CGEM_UNLOCK(sc); |
---|
| 1201 | } |
---|
| 1202 | break; |
---|
| 1203 | |
---|
| 1204 | case SIOCSIFMEDIA: |
---|
| 1205 | case SIOCGIFMEDIA: |
---|
| 1206 | mii = device_get_softc(sc->miibus); |
---|
| 1207 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); |
---|
| 1208 | break; |
---|
| 1209 | |
---|
| 1210 | case SIOCSIFCAP: |
---|
| 1211 | CGEM_LOCK(sc); |
---|
| 1212 | mask = ifp->if_capenable ^ ifr->ifr_reqcap; |
---|
| 1213 | |
---|
| 1214 | if ((mask & IFCAP_TXCSUM) != 0) { |
---|
| 1215 | if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { |
---|
| 1216 | /* Turn on TX checksumming. */ |
---|
| 1217 | ifp->if_capenable |= (IFCAP_TXCSUM | |
---|
| 1218 | IFCAP_TXCSUM_IPV6); |
---|
| 1219 | ifp->if_hwassist |= CGEM_CKSUM_ASSIST; |
---|
| 1220 | |
---|
| 1221 | WR4(sc, CGEM_DMA_CFG, |
---|
| 1222 | RD4(sc, CGEM_DMA_CFG) | |
---|
| 1223 | CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
| 1224 | } else { |
---|
| 1225 | /* Turn off TX checksumming. */ |
---|
| 1226 | ifp->if_capenable &= ~(IFCAP_TXCSUM | |
---|
| 1227 | IFCAP_TXCSUM_IPV6); |
---|
| 1228 | ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; |
---|
| 1229 | |
---|
| 1230 | WR4(sc, CGEM_DMA_CFG, |
---|
| 1231 | RD4(sc, CGEM_DMA_CFG) & |
---|
| 1232 | ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
| 1233 | } |
---|
| 1234 | } |
---|
| 1235 | if ((mask & IFCAP_RXCSUM) != 0) { |
---|
| 1236 | if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { |
---|
| 1237 | /* Turn on RX checksumming. */ |
---|
| 1238 | ifp->if_capenable |= (IFCAP_RXCSUM | |
---|
| 1239 | IFCAP_RXCSUM_IPV6); |
---|
| 1240 | WR4(sc, CGEM_NET_CFG, |
---|
| 1241 | RD4(sc, CGEM_NET_CFG) | |
---|
| 1242 | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
| 1243 | } else { |
---|
| 1244 | /* Turn off RX checksumming. */ |
---|
| 1245 | ifp->if_capenable &= ~(IFCAP_RXCSUM | |
---|
| 1246 | IFCAP_RXCSUM_IPV6); |
---|
| 1247 | WR4(sc, CGEM_NET_CFG, |
---|
| 1248 | RD4(sc, CGEM_NET_CFG) & |
---|
| 1249 | ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
| 1250 | } |
---|
| 1251 | } |
---|
| 1252 | if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == |
---|
| 1253 | (IFCAP_RXCSUM | IFCAP_TXCSUM)) |
---|
| 1254 | ifp->if_capenable |= IFCAP_VLAN_HWCSUM; |
---|
| 1255 | else |
---|
| 1256 | ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; |
---|
| 1257 | |
---|
| 1258 | CGEM_UNLOCK(sc); |
---|
| 1259 | break; |
---|
| 1260 | default: |
---|
| 1261 | error = ether_ioctl(ifp, cmd, data); |
---|
| 1262 | break; |
---|
| 1263 | } |
---|
| 1264 | |
---|
| 1265 | return (error); |
---|
| 1266 | } |
---|
| 1267 | |
---|
| 1268 | /* MII bus support routines. |
---|
| 1269 | */ |
---|
| 1270 | static void |
---|
| 1271 | cgem_child_detached(device_t dev, device_t child) |
---|
| 1272 | { |
---|
| 1273 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1274 | |
---|
| 1275 | if (child == sc->miibus) |
---|
| 1276 | sc->miibus = NULL; |
---|
| 1277 | } |
---|
| 1278 | |
---|
| 1279 | static int |
---|
| 1280 | cgem_ifmedia_upd(struct ifnet *ifp) |
---|
| 1281 | { |
---|
| 1282 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
| 1283 | struct mii_data *mii; |
---|
| 1284 | struct mii_softc *miisc; |
---|
| 1285 | int error = 0; |
---|
| 1286 | |
---|
| 1287 | mii = device_get_softc(sc->miibus); |
---|
| 1288 | CGEM_LOCK(sc); |
---|
| 1289 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
| 1290 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
---|
| 1291 | PHY_RESET(miisc); |
---|
| 1292 | error = mii_mediachg(mii); |
---|
| 1293 | } |
---|
| 1294 | CGEM_UNLOCK(sc); |
---|
| 1295 | |
---|
| 1296 | return (error); |
---|
| 1297 | } |
---|
| 1298 | |
---|
| 1299 | static void |
---|
| 1300 | cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
| 1301 | { |
---|
| 1302 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
| 1303 | struct mii_data *mii; |
---|
| 1304 | |
---|
| 1305 | mii = device_get_softc(sc->miibus); |
---|
| 1306 | CGEM_LOCK(sc); |
---|
| 1307 | mii_pollstat(mii); |
---|
| 1308 | ifmr->ifm_active = mii->mii_media_active; |
---|
| 1309 | ifmr->ifm_status = mii->mii_media_status; |
---|
| 1310 | CGEM_UNLOCK(sc); |
---|
| 1311 | } |
---|
| 1312 | |
---|
| 1313 | static int |
---|
| 1314 | cgem_miibus_readreg(device_t dev, int phy, int reg) |
---|
| 1315 | { |
---|
| 1316 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1317 | int tries, val; |
---|
| 1318 | |
---|
| 1319 | WR4(sc, CGEM_PHY_MAINT, |
---|
| 1320 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
| 1321 | CGEM_PHY_MAINT_OP_READ | |
---|
| 1322 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
| 1323 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); |
---|
| 1324 | |
---|
| 1325 | /* Wait for completion. */ |
---|
| 1326 | tries=0; |
---|
| 1327 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
| 1328 | DELAY(5); |
---|
| 1329 | if (++tries > 200) { |
---|
| 1330 | device_printf(dev, "phy read timeout: %d\n", reg); |
---|
| 1331 | return (-1); |
---|
| 1332 | } |
---|
| 1333 | } |
---|
| 1334 | |
---|
| 1335 | val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; |
---|
| 1336 | |
---|
| 1337 | if (reg == MII_EXTSR) |
---|
| 1338 | /* |
---|
| 1339 | * MAC does not support half-duplex at gig speeds. |
---|
| 1340 | * Let mii(4) exclude the capability. |
---|
| 1341 | */ |
---|
| 1342 | val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); |
---|
| 1343 | |
---|
| 1344 | return (val); |
---|
| 1345 | } |
---|
| 1346 | |
---|
| 1347 | static int |
---|
| 1348 | cgem_miibus_writereg(device_t dev, int phy, int reg, int data) |
---|
| 1349 | { |
---|
| 1350 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1351 | int tries; |
---|
| 1352 | |
---|
| 1353 | WR4(sc, CGEM_PHY_MAINT, |
---|
| 1354 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
| 1355 | CGEM_PHY_MAINT_OP_WRITE | |
---|
| 1356 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
| 1357 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | |
---|
| 1358 | (data & CGEM_PHY_MAINT_DATA_MASK)); |
---|
| 1359 | |
---|
| 1360 | /* Wait for completion. */ |
---|
| 1361 | tries = 0; |
---|
| 1362 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
| 1363 | DELAY(5); |
---|
| 1364 | if (++tries > 200) { |
---|
| 1365 | device_printf(dev, "phy write timeout: %d\n", reg); |
---|
| 1366 | return (-1); |
---|
| 1367 | } |
---|
| 1368 | } |
---|
| 1369 | |
---|
| 1370 | return (0); |
---|
| 1371 | } |
---|
| 1372 | |
---|
| 1373 | static void |
---|
| 1374 | cgem_miibus_statchg(device_t dev) |
---|
| 1375 | { |
---|
| 1376 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1377 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
| 1378 | |
---|
| 1379 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1380 | |
---|
| 1381 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
| 1382 | (IFM_ACTIVE | IFM_AVALID) && |
---|
| 1383 | sc->mii_media_active != mii->mii_media_active) |
---|
| 1384 | cgem_mediachange(sc, mii); |
---|
| 1385 | } |
---|
| 1386 | |
---|
| 1387 | static void |
---|
| 1388 | cgem_miibus_linkchg(device_t dev) |
---|
| 1389 | { |
---|
| 1390 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1391 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
| 1392 | |
---|
| 1393 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1394 | |
---|
| 1395 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
| 1396 | (IFM_ACTIVE | IFM_AVALID) && |
---|
| 1397 | sc->mii_media_active != mii->mii_media_active) |
---|
| 1398 | cgem_mediachange(sc, mii); |
---|
| 1399 | } |
---|
| 1400 | |
---|
| 1401 | /* |
---|
| 1402 | * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to |
---|
| 1403 | * provide a function to set the cgem's reference clock. |
---|
| 1404 | */ |
---|
| 1405 | static int __used |
---|
| 1406 | cgem_default_set_ref_clk(int unit, int frequency) |
---|
| 1407 | { |
---|
| 1408 | |
---|
| 1409 | return 0; |
---|
| 1410 | } |
---|
| 1411 | __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); |
---|
| 1412 | |
---|
| 1413 | /* Call to set reference clock and network config bits according to media. */ |
---|
| 1414 | static void |
---|
| 1415 | cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) |
---|
| 1416 | { |
---|
| 1417 | uint32_t net_cfg; |
---|
| 1418 | int ref_clk_freq; |
---|
| 1419 | |
---|
| 1420 | CGEM_ASSERT_LOCKED(sc); |
---|
| 1421 | |
---|
| 1422 | /* Update hardware to reflect media. */ |
---|
| 1423 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
| 1424 | net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | |
---|
| 1425 | CGEM_NET_CFG_FULL_DUPLEX); |
---|
| 1426 | |
---|
| 1427 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
---|
| 1428 | case IFM_1000_T: |
---|
| 1429 | net_cfg |= (CGEM_NET_CFG_SPEED100 | |
---|
| 1430 | CGEM_NET_CFG_GIGE_EN); |
---|
| 1431 | ref_clk_freq = 125000000; |
---|
| 1432 | break; |
---|
| 1433 | case IFM_100_TX: |
---|
| 1434 | net_cfg |= CGEM_NET_CFG_SPEED100; |
---|
| 1435 | ref_clk_freq = 25000000; |
---|
| 1436 | break; |
---|
| 1437 | default: |
---|
| 1438 | ref_clk_freq = 2500000; |
---|
| 1439 | } |
---|
| 1440 | |
---|
| 1441 | if ((mii->mii_media_active & IFM_FDX) != 0) |
---|
| 1442 | net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; |
---|
| 1443 | |
---|
| 1444 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
| 1445 | |
---|
| 1446 | /* Set the reference clock if necessary. */ |
---|
| 1447 | if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) |
---|
| 1448 | device_printf(sc->dev, "cgem_mediachange: " |
---|
| 1449 | "could not set ref clk%d to %d.\n", |
---|
| 1450 | sc->ref_clk_num, ref_clk_freq); |
---|
| 1451 | |
---|
| 1452 | sc->mii_media_active = mii->mii_media_active; |
---|
| 1453 | } |
---|
| 1454 | |
---|
| 1455 | static void |
---|
| 1456 | cgem_add_sysctls(device_t dev) |
---|
| 1457 | { |
---|
[4ff97c8] | 1458 | #ifndef __rtems__ |
---|
[b8e0c66] | 1459 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1460 | struct sysctl_ctx_list *ctx; |
---|
| 1461 | struct sysctl_oid_list *child; |
---|
| 1462 | struct sysctl_oid *tree; |
---|
| 1463 | |
---|
| 1464 | ctx = device_get_sysctl_ctx(dev); |
---|
| 1465 | child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); |
---|
| 1466 | |
---|
| 1467 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, |
---|
| 1468 | &sc->rxbufs, 0, |
---|
| 1469 | "Number receive buffers to provide"); |
---|
| 1470 | |
---|
| 1471 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, |
---|
| 1472 | &sc->rxhangwar, 0, |
---|
| 1473 | "Enable receive hang work-around"); |
---|
| 1474 | |
---|
| 1475 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, |
---|
| 1476 | &sc->rxoverruns, 0, |
---|
| 1477 | "Receive overrun events"); |
---|
| 1478 | |
---|
| 1479 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, |
---|
| 1480 | &sc->rxnobufs, 0, |
---|
| 1481 | "Receive buf queue empty events"); |
---|
| 1482 | |
---|
| 1483 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, |
---|
| 1484 | &sc->rxdmamapfails, 0, |
---|
| 1485 | "Receive DMA map failures"); |
---|
| 1486 | |
---|
| 1487 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, |
---|
| 1488 | &sc->txfull, 0, |
---|
| 1489 | "Transmit ring full events"); |
---|
| 1490 | |
---|
| 1491 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, |
---|
| 1492 | &sc->txdmamapfails, 0, |
---|
| 1493 | "Transmit DMA map failures"); |
---|
| 1494 | |
---|
| 1495 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, |
---|
| 1496 | &sc->txdefrags, 0, |
---|
| 1497 | "Transmit m_defrag() calls"); |
---|
| 1498 | |
---|
| 1499 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, |
---|
| 1500 | &sc->txdefragfails, 0, |
---|
| 1501 | "Transmit m_defrag() failures"); |
---|
| 1502 | |
---|
| 1503 | tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, |
---|
| 1504 | NULL, "GEM statistics"); |
---|
| 1505 | child = SYSCTL_CHILDREN(tree); |
---|
| 1506 | |
---|
| 1507 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, |
---|
| 1508 | &sc->stats.tx_bytes, "Total bytes transmitted"); |
---|
| 1509 | |
---|
| 1510 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, |
---|
| 1511 | &sc->stats.tx_frames, 0, "Total frames transmitted"); |
---|
| 1512 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, |
---|
| 1513 | &sc->stats.tx_frames_bcast, 0, |
---|
| 1514 | "Number broadcast frames transmitted"); |
---|
| 1515 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, |
---|
| 1516 | &sc->stats.tx_frames_multi, 0, |
---|
| 1517 | "Number multicast frames transmitted"); |
---|
| 1518 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", |
---|
| 1519 | CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, |
---|
| 1520 | "Number pause frames transmitted"); |
---|
| 1521 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, |
---|
| 1522 | &sc->stats.tx_frames_64b, 0, |
---|
| 1523 | "Number frames transmitted of size 64 bytes or less"); |
---|
| 1524 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, |
---|
| 1525 | &sc->stats.tx_frames_65to127b, 0, |
---|
| 1526 | "Number frames transmitted of size 65-127 bytes"); |
---|
| 1527 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", |
---|
| 1528 | CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, |
---|
| 1529 | "Number frames transmitted of size 128-255 bytes"); |
---|
| 1530 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", |
---|
| 1531 | CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, |
---|
| 1532 | "Number frames transmitted of size 256-511 bytes"); |
---|
| 1533 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", |
---|
| 1534 | CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, |
---|
| 1535 | "Number frames transmitted of size 512-1023 bytes"); |
---|
| 1536 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", |
---|
| 1537 | CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, |
---|
| 1538 | "Number frames transmitted of size 1024-1536 bytes"); |
---|
| 1539 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", |
---|
| 1540 | CTLFLAG_RD, &sc->stats.tx_under_runs, 0, |
---|
| 1541 | "Number transmit under-run events"); |
---|
| 1542 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", |
---|
| 1543 | CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, |
---|
| 1544 | "Number single-collision transmit frames"); |
---|
| 1545 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", |
---|
| 1546 | CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, |
---|
| 1547 | "Number multi-collision transmit frames"); |
---|
| 1548 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", |
---|
| 1549 | CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, |
---|
| 1550 | "Number excessive collision transmit frames"); |
---|
| 1551 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", |
---|
| 1552 | CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, |
---|
| 1553 | "Number late-collision transmit frames"); |
---|
| 1554 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", |
---|
| 1555 | CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, |
---|
| 1556 | "Number deferred transmit frames"); |
---|
| 1557 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", |
---|
| 1558 | CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, |
---|
| 1559 | "Number carrier sense errors on transmit"); |
---|
| 1560 | |
---|
| 1561 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, |
---|
| 1562 | &sc->stats.rx_bytes, "Total bytes received"); |
---|
| 1563 | |
---|
| 1564 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, |
---|
| 1565 | &sc->stats.rx_frames, 0, "Total frames received"); |
---|
| 1566 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", |
---|
| 1567 | CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, |
---|
| 1568 | "Number broadcast frames received"); |
---|
| 1569 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", |
---|
| 1570 | CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, |
---|
| 1571 | "Number multicast frames received"); |
---|
| 1572 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", |
---|
| 1573 | CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, |
---|
| 1574 | "Number pause frames received"); |
---|
| 1575 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", |
---|
| 1576 | CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, |
---|
| 1577 | "Number frames received of size 64 bytes or less"); |
---|
| 1578 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", |
---|
| 1579 | CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, |
---|
| 1580 | "Number frames received of size 65-127 bytes"); |
---|
| 1581 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", |
---|
| 1582 | CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, |
---|
| 1583 | "Number frames received of size 128-255 bytes"); |
---|
| 1584 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", |
---|
| 1585 | CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, |
---|
| 1586 | "Number frames received of size 256-511 bytes"); |
---|
| 1587 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", |
---|
| 1588 | CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, |
---|
| 1589 | "Number frames received of size 512-1023 bytes"); |
---|
| 1590 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", |
---|
| 1591 | CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, |
---|
| 1592 | "Number frames received of size 1024-1536 bytes"); |
---|
| 1593 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", |
---|
| 1594 | CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, |
---|
| 1595 | "Number undersize frames received"); |
---|
| 1596 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", |
---|
| 1597 | CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, |
---|
| 1598 | "Number oversize frames received"); |
---|
| 1599 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", |
---|
| 1600 | CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, |
---|
| 1601 | "Number jabber frames received"); |
---|
| 1602 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", |
---|
| 1603 | CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, |
---|
| 1604 | "Number frames received with FCS errors"); |
---|
| 1605 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", |
---|
| 1606 | CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, |
---|
| 1607 | "Number frames received with length errors"); |
---|
| 1608 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", |
---|
| 1609 | CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, |
---|
| 1610 | "Number receive symbol errors"); |
---|
| 1611 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", |
---|
| 1612 | CTLFLAG_RD, &sc->stats.rx_align_errs, 0, |
---|
| 1613 | "Number receive alignment errors"); |
---|
| 1614 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", |
---|
| 1615 | CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, |
---|
| 1616 | "Number frames received when no rx buffer available"); |
---|
| 1617 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", |
---|
| 1618 | CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, |
---|
| 1619 | "Number frames received but not copied due to " |
---|
| 1620 | "receive overrun"); |
---|
| 1621 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", |
---|
| 1622 | CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, |
---|
| 1623 | "Number frames received with IP header checksum " |
---|
| 1624 | "errors"); |
---|
| 1625 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", |
---|
| 1626 | CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, |
---|
| 1627 | "Number frames received with TCP checksum errors"); |
---|
| 1628 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", |
---|
| 1629 | CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, |
---|
| 1630 | "Number frames received with UDP checksum errors"); |
---|
[4ff97c8] | 1631 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 1632 | } |
---|
| 1633 | |
---|
| 1634 | |
---|
| 1635 | static int |
---|
| 1636 | cgem_probe(device_t dev) |
---|
| 1637 | { |
---|
| 1638 | |
---|
[14ecf75d] | 1639 | #ifndef __rtems__ |
---|
[b8e0c66] | 1640 | if (!ofw_bus_is_compatible(dev, "cadence,gem")) |
---|
| 1641 | return (ENXIO); |
---|
[14ecf75d] | 1642 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 1643 | |
---|
| 1644 | device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); |
---|
| 1645 | return (0); |
---|
| 1646 | } |
---|
| 1647 | |
---|
| 1648 | static int |
---|
| 1649 | cgem_attach(device_t dev) |
---|
| 1650 | { |
---|
| 1651 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1652 | struct ifnet *ifp = NULL; |
---|
[14ecf75d] | 1653 | #ifndef __rtems__ |
---|
[b8e0c66] | 1654 | phandle_t node; |
---|
| 1655 | pcell_t cell; |
---|
[14ecf75d] | 1656 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 1657 | int rid, err; |
---|
| 1658 | u_char eaddr[ETHER_ADDR_LEN]; |
---|
| 1659 | |
---|
| 1660 | sc->dev = dev; |
---|
| 1661 | CGEM_LOCK_INIT(sc); |
---|
| 1662 | |
---|
[14ecf75d] | 1663 | #ifndef __rtems__ |
---|
[b8e0c66] | 1664 | /* Get reference clock number and base divider from fdt. */ |
---|
| 1665 | node = ofw_bus_get_node(dev); |
---|
| 1666 | sc->ref_clk_num = 0; |
---|
| 1667 | if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) |
---|
| 1668 | sc->ref_clk_num = fdt32_to_cpu(cell); |
---|
[d65c5e5] | 1669 | #else /* __rtems__ */ |
---|
| 1670 | sc->ref_clk_num = device_get_unit(dev); |
---|
[14ecf75d] | 1671 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 1672 | |
---|
| 1673 | /* Get memory resource. */ |
---|
| 1674 | rid = 0; |
---|
| 1675 | sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
---|
| 1676 | RF_ACTIVE); |
---|
| 1677 | if (sc->mem_res == NULL) { |
---|
| 1678 | device_printf(dev, "could not allocate memory resources.\n"); |
---|
| 1679 | return (ENOMEM); |
---|
| 1680 | } |
---|
| 1681 | |
---|
| 1682 | /* Get IRQ resource. */ |
---|
| 1683 | rid = 0; |
---|
| 1684 | sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, |
---|
| 1685 | RF_ACTIVE); |
---|
| 1686 | if (sc->irq_res == NULL) { |
---|
| 1687 | device_printf(dev, "could not allocate interrupt resource.\n"); |
---|
| 1688 | cgem_detach(dev); |
---|
| 1689 | return (ENOMEM); |
---|
| 1690 | } |
---|
| 1691 | |
---|
| 1692 | /* Set up ifnet structure. */ |
---|
| 1693 | ifp = sc->ifp = if_alloc(IFT_ETHER); |
---|
| 1694 | if (ifp == NULL) { |
---|
| 1695 | device_printf(dev, "could not allocate ifnet structure\n"); |
---|
| 1696 | cgem_detach(dev); |
---|
| 1697 | return (ENOMEM); |
---|
| 1698 | } |
---|
| 1699 | ifp->if_softc = sc; |
---|
| 1700 | if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); |
---|
| 1701 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
| 1702 | ifp->if_start = cgem_start; |
---|
| 1703 | ifp->if_ioctl = cgem_ioctl; |
---|
| 1704 | ifp->if_init = cgem_init; |
---|
| 1705 | ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | |
---|
| 1706 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; |
---|
| 1707 | /* Disable hardware checksumming by default. */ |
---|
| 1708 | ifp->if_hwassist = 0; |
---|
| 1709 | ifp->if_capenable = ifp->if_capabilities & |
---|
| 1710 | ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM); |
---|
| 1711 | ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS; |
---|
| 1712 | IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); |
---|
| 1713 | IFQ_SET_READY(&ifp->if_snd); |
---|
| 1714 | |
---|
| 1715 | sc->if_old_flags = ifp->if_flags; |
---|
| 1716 | sc->rxbufs = DEFAULT_NUM_RX_BUFS; |
---|
| 1717 | sc->rxhangwar = 1; |
---|
| 1718 | |
---|
| 1719 | /* Reset hardware. */ |
---|
| 1720 | CGEM_LOCK(sc); |
---|
| 1721 | cgem_reset(sc); |
---|
| 1722 | CGEM_UNLOCK(sc); |
---|
| 1723 | |
---|
| 1724 | /* Attach phy to mii bus. */ |
---|
| 1725 | err = mii_attach(dev, &sc->miibus, ifp, |
---|
| 1726 | cgem_ifmedia_upd, cgem_ifmedia_sts, |
---|
| 1727 | BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); |
---|
| 1728 | if (err) { |
---|
| 1729 | device_printf(dev, "attaching PHYs failed\n"); |
---|
| 1730 | cgem_detach(dev); |
---|
| 1731 | return (err); |
---|
| 1732 | } |
---|
| 1733 | |
---|
| 1734 | /* Set up TX and RX descriptor area. */ |
---|
| 1735 | err = cgem_setup_descs(sc); |
---|
| 1736 | if (err) { |
---|
| 1737 | device_printf(dev, "could not set up dma mem for descs.\n"); |
---|
| 1738 | cgem_detach(dev); |
---|
| 1739 | return (ENOMEM); |
---|
| 1740 | } |
---|
| 1741 | |
---|
| 1742 | /* Get a MAC address. */ |
---|
| 1743 | cgem_get_mac(sc, eaddr); |
---|
| 1744 | |
---|
| 1745 | /* Start ticks. */ |
---|
| 1746 | callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); |
---|
| 1747 | |
---|
| 1748 | ether_ifattach(ifp, eaddr); |
---|
| 1749 | |
---|
| 1750 | err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | |
---|
| 1751 | INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); |
---|
| 1752 | if (err) { |
---|
| 1753 | device_printf(dev, "could not set interrupt handler.\n"); |
---|
| 1754 | ether_ifdetach(ifp); |
---|
| 1755 | cgem_detach(dev); |
---|
| 1756 | return (err); |
---|
| 1757 | } |
---|
| 1758 | |
---|
| 1759 | cgem_add_sysctls(dev); |
---|
| 1760 | |
---|
| 1761 | return (0); |
---|
| 1762 | } |
---|
| 1763 | |
---|
| 1764 | static int |
---|
| 1765 | cgem_detach(device_t dev) |
---|
| 1766 | { |
---|
| 1767 | struct cgem_softc *sc = device_get_softc(dev); |
---|
| 1768 | int i; |
---|
| 1769 | |
---|
| 1770 | if (sc == NULL) |
---|
| 1771 | return (ENODEV); |
---|
| 1772 | |
---|
| 1773 | if (device_is_attached(dev)) { |
---|
| 1774 | CGEM_LOCK(sc); |
---|
| 1775 | cgem_stop(sc); |
---|
| 1776 | CGEM_UNLOCK(sc); |
---|
| 1777 | callout_drain(&sc->tick_ch); |
---|
| 1778 | sc->ifp->if_flags &= ~IFF_UP; |
---|
| 1779 | ether_ifdetach(sc->ifp); |
---|
| 1780 | } |
---|
| 1781 | |
---|
| 1782 | if (sc->miibus != NULL) { |
---|
| 1783 | device_delete_child(dev, sc->miibus); |
---|
| 1784 | sc->miibus = NULL; |
---|
| 1785 | } |
---|
| 1786 | |
---|
| 1787 | /* Release resources. */ |
---|
| 1788 | if (sc->mem_res != NULL) { |
---|
| 1789 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
| 1790 | rman_get_rid(sc->mem_res), sc->mem_res); |
---|
| 1791 | sc->mem_res = NULL; |
---|
| 1792 | } |
---|
| 1793 | if (sc->irq_res != NULL) { |
---|
| 1794 | if (sc->intrhand) |
---|
| 1795 | bus_teardown_intr(dev, sc->irq_res, sc->intrhand); |
---|
| 1796 | bus_release_resource(dev, SYS_RES_IRQ, |
---|
| 1797 | rman_get_rid(sc->irq_res), sc->irq_res); |
---|
| 1798 | sc->irq_res = NULL; |
---|
| 1799 | } |
---|
| 1800 | |
---|
| 1801 | /* Release DMA resources. */ |
---|
| 1802 | if (sc->rxring != NULL) { |
---|
| 1803 | if (sc->rxring_physaddr != 0) { |
---|
| 1804 | bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); |
---|
| 1805 | sc->rxring_physaddr = 0; |
---|
| 1806 | } |
---|
| 1807 | bus_dmamem_free(sc->desc_dma_tag, sc->rxring, |
---|
| 1808 | sc->rxring_dma_map); |
---|
| 1809 | sc->rxring = NULL; |
---|
| 1810 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) |
---|
| 1811 | if (sc->rxring_m_dmamap[i] != NULL) { |
---|
| 1812 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
| 1813 | sc->rxring_m_dmamap[i]); |
---|
| 1814 | sc->rxring_m_dmamap[i] = NULL; |
---|
| 1815 | } |
---|
| 1816 | } |
---|
| 1817 | if (sc->txring != NULL) { |
---|
| 1818 | if (sc->txring_physaddr != 0) { |
---|
| 1819 | bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map); |
---|
| 1820 | sc->txring_physaddr = 0; |
---|
| 1821 | } |
---|
| 1822 | bus_dmamem_free(sc->desc_dma_tag, sc->txring, |
---|
| 1823 | sc->txring_dma_map); |
---|
| 1824 | sc->txring = NULL; |
---|
| 1825 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) |
---|
| 1826 | if (sc->txring_m_dmamap[i] != NULL) { |
---|
| 1827 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
| 1828 | sc->txring_m_dmamap[i]); |
---|
| 1829 | sc->txring_m_dmamap[i] = NULL; |
---|
| 1830 | } |
---|
| 1831 | } |
---|
| 1832 | if (sc->desc_dma_tag != NULL) { |
---|
| 1833 | bus_dma_tag_destroy(sc->desc_dma_tag); |
---|
| 1834 | sc->desc_dma_tag = NULL; |
---|
| 1835 | } |
---|
| 1836 | if (sc->mbuf_dma_tag != NULL) { |
---|
| 1837 | bus_dma_tag_destroy(sc->mbuf_dma_tag); |
---|
| 1838 | sc->mbuf_dma_tag = NULL; |
---|
| 1839 | } |
---|
| 1840 | |
---|
| 1841 | bus_generic_detach(dev); |
---|
| 1842 | |
---|
| 1843 | CGEM_LOCK_DESTROY(sc); |
---|
| 1844 | |
---|
| 1845 | return (0); |
---|
| 1846 | } |
---|
| 1847 | |
---|
| 1848 | static device_method_t cgem_methods[] = { |
---|
| 1849 | /* Device interface */ |
---|
| 1850 | DEVMETHOD(device_probe, cgem_probe), |
---|
| 1851 | DEVMETHOD(device_attach, cgem_attach), |
---|
| 1852 | DEVMETHOD(device_detach, cgem_detach), |
---|
| 1853 | |
---|
| 1854 | /* Bus interface */ |
---|
| 1855 | DEVMETHOD(bus_child_detached, cgem_child_detached), |
---|
| 1856 | |
---|
| 1857 | /* MII interface */ |
---|
| 1858 | DEVMETHOD(miibus_readreg, cgem_miibus_readreg), |
---|
| 1859 | DEVMETHOD(miibus_writereg, cgem_miibus_writereg), |
---|
| 1860 | DEVMETHOD(miibus_statchg, cgem_miibus_statchg), |
---|
| 1861 | DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), |
---|
| 1862 | |
---|
| 1863 | DEVMETHOD_END |
---|
| 1864 | }; |
---|
| 1865 | |
---|
| 1866 | static driver_t cgem_driver = { |
---|
| 1867 | "cgem", |
---|
| 1868 | cgem_methods, |
---|
| 1869 | sizeof(struct cgem_softc), |
---|
| 1870 | }; |
---|
| 1871 | |
---|
[d65c5e5] | 1872 | #ifndef __rtems__ |
---|
[b8e0c66] | 1873 | DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
[d65c5e5] | 1874 | #else /* __rtems__ */ |
---|
| 1875 | DRIVER_MODULE(cgem, nexus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
| 1876 | #endif /* __rtems__ */ |
---|
[b8e0c66] | 1877 | DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); |
---|
| 1878 | MODULE_DEPEND(cgem, miibus, 1, 1, 1); |
---|
| 1879 | MODULE_DEPEND(cgem, ether, 1, 1, 1); |
---|