1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | #include <rtems/bsd/local/opt_dpaa.h> |
---|
4 | |
---|
5 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions are met: |
---|
9 | * * Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * * Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * * Neither the name of Freescale Semiconductor nor the |
---|
15 | * names of its contributors may be used to endorse or promote products |
---|
16 | * derived from this software without specific prior written permission. |
---|
17 | * |
---|
18 | * ALTERNATIVELY, this software may be distributed under the terms of the |
---|
19 | * GNU General Public License ("GPL") as published by the Free Software |
---|
20 | * Foundation, either version 2 of that License or (at your option) any |
---|
21 | * later version. |
---|
22 | * |
---|
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
---|
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
---|
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
33 | */ |
---|
34 | |
---|
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
36 | |
---|
37 | #include <linux/init.h> |
---|
38 | #include <linux/module.h> |
---|
39 | #include <linux/of_platform.h> |
---|
40 | #include <linux/of_mdio.h> |
---|
41 | #include <linux/of_net.h> |
---|
42 | #include <linux/io.h> |
---|
43 | #ifndef __rtems__ |
---|
44 | #include <linux/if_arp.h> |
---|
45 | #include <linux/if_vlan.h> |
---|
46 | #include <linux/icmp.h> |
---|
47 | #include <linux/ip.h> |
---|
48 | #include <linux/ipv6.h> |
---|
49 | #include <linux/udp.h> |
---|
50 | #include <linux/tcp.h> |
---|
51 | #include <linux/net.h> |
---|
52 | #include <linux/skbuff.h> |
---|
53 | #include <linux/etherdevice.h> |
---|
54 | #include <linux/if_ether.h> |
---|
55 | #include <linux/highmem.h> |
---|
56 | #include <linux/percpu.h> |
---|
57 | #include <linux/dma-mapping.h> |
---|
58 | #include <linux/sort.h> |
---|
59 | #else /* __rtems__ */ |
---|
60 | #include <soc/fsl/dpaa.h> |
---|
61 | #endif /* __rtems__ */ |
---|
62 | #include <soc/fsl/bman.h> |
---|
63 | #include <soc/fsl/qman.h> |
---|
64 | |
---|
65 | #include "fman.h" |
---|
66 | #include "fman_port.h" |
---|
67 | #include "mac.h" |
---|
68 | #include "dpaa_eth.h" |
---|
69 | |
---|
70 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files |
---|
71 | * using trace events only need to #include <trace/events/sched.h> |
---|
72 | */ |
---|
73 | #define CREATE_TRACE_POINTS |
---|
74 | #include "dpaa_eth_trace.h" |
---|
75 | |
---|
76 | static int debug = -1; |
---|
77 | module_param(debug, int, 0444); |
---|
78 | MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); |
---|
79 | |
---|
80 | static u16 tx_timeout = 1000; |
---|
81 | module_param(tx_timeout, ushort, 0444); |
---|
82 | MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); |
---|
83 | |
---|
84 | #define FM_FD_STAT_RX_ERRORS \ |
---|
85 | (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ |
---|
86 | FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ |
---|
87 | FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ |
---|
88 | FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ |
---|
89 | FM_FD_ERR_PRS_HDR_ERR) |
---|
90 | |
---|
91 | #define FM_FD_STAT_TX_ERRORS \ |
---|
92 | (FM_FD_ERR_UNSUPPORTED_FORMAT | \ |
---|
93 | FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) |
---|
94 | |
---|
95 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ |
---|
96 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ |
---|
97 | NETIF_MSG_IFDOWN) |
---|
98 | |
---|
99 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 |
---|
100 | /* Ingress congestion threshold on FMan ports |
---|
101 | * The size in bytes of the ingress tail-drop threshold on FMan ports. |
---|
102 | * Traffic piling up above this value will be rejected by QMan and discarded |
---|
103 | * by FMan. |
---|
104 | */ |
---|
105 | |
---|
106 | /* Size in bytes of the FQ taildrop threshold */ |
---|
107 | #define DPAA_FQ_TD 0x200000 |
---|
108 | |
---|
109 | #define DPAA_CS_THRESHOLD_1G 0x06000000 |
---|
110 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 |
---|
111 | * The size in bytes of the egress Congestion State notification threshold on |
---|
112 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a |
---|
113 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), |
---|
114 | * and the larger the frame size, the more acute the problem. |
---|
115 | * So we have to find a balance between these factors: |
---|
116 | * - avoiding the device staying congested for a prolonged time (risking |
---|
117 | * the netdev watchdog to fire - see also the tx_timeout module param); |
---|
118 | * - affecting performance of protocols such as TCP, which otherwise |
---|
119 | * behave well under the congestion notification mechanism; |
---|
120 | * - preventing the Tx cores from tightly-looping (as if the congestion |
---|
121 | * threshold was too low to be effective); |
---|
122 | * - running out of memory if the CS threshold is set too high. |
---|
123 | */ |
---|
124 | |
---|
125 | #define DPAA_CS_THRESHOLD_10G 0x10000000 |
---|
126 | /* The size in bytes of the egress Congestion State notification threshold on |
---|
127 | * 10G ports, range 0x1000 .. 0x10000000 |
---|
128 | */ |
---|
129 | |
---|
130 | /* Largest value that the FQD's OAL field can hold */ |
---|
131 | #define FSL_QMAN_MAX_OAL 127 |
---|
132 | |
---|
133 | /* Default alignment for start of data in an Rx FD */ |
---|
134 | #define DPAA_FD_DATA_ALIGNMENT 16 |
---|
135 | |
---|
136 | /* Values for the L3R field of the FM Parse Results |
---|
137 | */ |
---|
138 | /* L3 Type field: First IP Present IPv4 */ |
---|
139 | #define FM_L3_PARSE_RESULT_IPV4 0x8000 |
---|
140 | /* L3 Type field: First IP Present IPv6 */ |
---|
141 | #define FM_L3_PARSE_RESULT_IPV6 0x4000 |
---|
142 | /* Values for the L4R field of the FM Parse Results */ |
---|
143 | /* L4 Type field: UDP */ |
---|
144 | #define FM_L4_PARSE_RESULT_UDP 0x40 |
---|
145 | /* L4 Type field: TCP */ |
---|
146 | #define FM_L4_PARSE_RESULT_TCP 0x20 |
---|
147 | |
---|
148 | /* FD status field indicating whether the FM Parser has attempted to validate |
---|
149 | * the L4 csum of the frame. |
---|
150 | * Note that having this bit set doesn't necessarily imply that the checksum |
---|
151 | * is valid. One would have to check the parse results to find that out. |
---|
152 | */ |
---|
153 | #define FM_FD_STAT_L4CV 0x00000004 |
---|
154 | |
---|
155 | #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ |
---|
156 | #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ |
---|
157 | |
---|
158 | #define FSL_DPAA_BPID_INV 0xff |
---|
159 | #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 |
---|
160 | #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 |
---|
161 | |
---|
162 | #define DPAA_TX_PRIV_DATA_SIZE 16 |
---|
163 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) |
---|
164 | #define DPAA_TIME_STAMP_SIZE 8 |
---|
165 | #define DPAA_HASH_RESULTS_SIZE 8 |
---|
166 | #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ |
---|
167 | dpaa_rx_extra_headroom) |
---|
168 | |
---|
169 | #define DPAA_ETH_RX_QUEUES 128 |
---|
170 | |
---|
171 | #define DPAA_ENQUEUE_RETRIES 100000 |
---|
172 | |
---|
173 | enum port_type {RX, TX}; |
---|
174 | |
---|
175 | struct fm_port_fqs { |
---|
176 | struct dpaa_fq *tx_defq; |
---|
177 | struct dpaa_fq *tx_errq; |
---|
178 | struct dpaa_fq *rx_defq; |
---|
179 | struct dpaa_fq *rx_errq; |
---|
180 | }; |
---|
181 | |
---|
182 | /* All the dpa bps in use at any moment */ |
---|
183 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; |
---|
184 | |
---|
185 | /* The raw buffer size must be cacheline aligned */ |
---|
186 | #ifndef __rtems__ |
---|
187 | #define DPAA_BP_RAW_SIZE 4096 |
---|
188 | #else /* __rtems__ */ |
---|
189 | /* |
---|
190 | * FIXME: Support multiple buffer pools. |
---|
191 | */ |
---|
192 | #define DPAA_BP_RAW_SIZE 2048 |
---|
193 | |
---|
194 | /* |
---|
195 | * FIXME: 4 bytes would be enough for the mbuf pointer. However, jumbo receive |
---|
196 | * frames overwrite this area if < 64 bytes. |
---|
197 | */ |
---|
198 | #define DPAA_OUT_OF_BAND_SIZE 64 |
---|
199 | |
---|
200 | #define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE) |
---|
201 | #endif /* __rtems__ */ |
---|
202 | /* When using more than one buffer pool, the raw sizes are as follows: |
---|
203 | * 1 bp: 4KB |
---|
204 | * 2 bp: 2KB, 4KB |
---|
205 | * 3 bp: 1KB, 2KB, 4KB |
---|
206 | * 4 bp: 1KB, 2KB, 4KB, 8KB |
---|
207 | */ |
---|
208 | static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) |
---|
209 | { |
---|
210 | size_t res = DPAA_BP_RAW_SIZE / 4; |
---|
211 | u8 i; |
---|
212 | |
---|
213 | for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) |
---|
214 | res *= 2; |
---|
215 | return res; |
---|
216 | } |
---|
217 | |
---|
218 | /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is |
---|
219 | * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, |
---|
220 | * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us |
---|
221 | * half-page-aligned buffers, so we reserve some more space for start-of-buffer |
---|
222 | * alignment. |
---|
223 | */ |
---|
224 | #ifndef __rtems__ |
---|
225 | #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) |
---|
226 | #else /* __rtems__ */ |
---|
227 | #define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET |
---|
228 | #endif /* __rtems__ */ |
---|
229 | |
---|
230 | #ifndef __rtems__ |
---|
231 | static int dpaa_max_frm; |
---|
232 | #endif /* __rtems__ */ |
---|
233 | |
---|
234 | #ifndef __rtems__ |
---|
235 | static int dpaa_rx_extra_headroom; |
---|
236 | #else /* __rtems__ */ |
---|
237 | #define dpaa_rx_extra_headroom fman_get_rx_extra_headroom() |
---|
238 | #endif /* __rtems__ */ |
---|
239 | |
---|
240 | #define dpaa_get_max_mtu() \ |
---|
241 | (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) |
---|
242 | |
---|
243 | #ifndef __rtems__ |
---|
244 | static int dpaa_netdev_init(struct net_device *net_dev, |
---|
245 | const struct net_device_ops *dpaa_ops, |
---|
246 | u16 tx_timeout) |
---|
247 | { |
---|
248 | struct dpaa_priv *priv = netdev_priv(net_dev); |
---|
249 | struct device *dev = net_dev->dev.parent; |
---|
250 | struct dpaa_percpu_priv *percpu_priv; |
---|
251 | const u8 *mac_addr; |
---|
252 | int i, err; |
---|
253 | |
---|
254 | /* Although we access another CPU's private data here |
---|
255 | * we do it at initialization so it is safe |
---|
256 | */ |
---|
257 | for_each_possible_cpu(i) { |
---|
258 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
259 | percpu_priv->net_dev = net_dev; |
---|
260 | } |
---|
261 | |
---|
262 | net_dev->netdev_ops = dpaa_ops; |
---|
263 | mac_addr = priv->mac_dev->addr; |
---|
264 | |
---|
265 | net_dev->mem_start = priv->mac_dev->res->start; |
---|
266 | net_dev->mem_end = priv->mac_dev->res->end; |
---|
267 | |
---|
268 | net_dev->min_mtu = ETH_MIN_MTU; |
---|
269 | net_dev->max_mtu = dpaa_get_max_mtu(); |
---|
270 | |
---|
271 | net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
---|
272 | NETIF_F_LLTX); |
---|
273 | |
---|
274 | net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; |
---|
275 | /* The kernels enables GSO automatically, if we declare NETIF_F_SG. |
---|
276 | * For conformity, we'll still declare GSO explicitly. |
---|
277 | */ |
---|
278 | net_dev->features |= NETIF_F_GSO; |
---|
279 | net_dev->features |= NETIF_F_RXCSUM; |
---|
280 | |
---|
281 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
---|
282 | /* we do not want shared skbs on TX */ |
---|
283 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
---|
284 | |
---|
285 | net_dev->features |= net_dev->hw_features; |
---|
286 | net_dev->vlan_features = net_dev->features; |
---|
287 | |
---|
288 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); |
---|
289 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); |
---|
290 | |
---|
291 | net_dev->ethtool_ops = &dpaa_ethtool_ops; |
---|
292 | |
---|
293 | net_dev->needed_headroom = priv->tx_headroom; |
---|
294 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); |
---|
295 | |
---|
296 | /* start without the RUNNING flag, phylib controls it later */ |
---|
297 | netif_carrier_off(net_dev); |
---|
298 | |
---|
299 | err = register_netdev(net_dev); |
---|
300 | if (err < 0) { |
---|
301 | dev_err(dev, "register_netdev() = %d\n", err); |
---|
302 | return err; |
---|
303 | } |
---|
304 | |
---|
305 | return 0; |
---|
306 | } |
---|
307 | #endif /* __rtems__ */ |
---|
308 | |
---|
309 | static int dpaa_stop(struct net_device *net_dev) |
---|
310 | { |
---|
311 | struct mac_device *mac_dev; |
---|
312 | struct dpaa_priv *priv; |
---|
313 | int i, err, error; |
---|
314 | |
---|
315 | priv = netdev_priv(net_dev); |
---|
316 | mac_dev = priv->mac_dev; |
---|
317 | |
---|
318 | #ifndef __rtems__ |
---|
319 | netif_tx_stop_all_queues(net_dev); |
---|
320 | #endif /* __rtems__ */ |
---|
321 | /* Allow the Fman (Tx) port to process in-flight frames before we |
---|
322 | * try switching it off. |
---|
323 | */ |
---|
324 | usleep_range(5000, 10000); |
---|
325 | |
---|
326 | err = mac_dev->stop(mac_dev); |
---|
327 | if (err < 0) |
---|
328 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", |
---|
329 | err); |
---|
330 | |
---|
331 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { |
---|
332 | error = fman_port_disable(mac_dev->port[i]); |
---|
333 | if (error) |
---|
334 | err = error; |
---|
335 | } |
---|
336 | |
---|
337 | #ifndef __rtems__ |
---|
338 | if (net_dev->phydev) |
---|
339 | phy_disconnect(net_dev->phydev); |
---|
340 | net_dev->phydev = NULL; |
---|
341 | #endif /* __rtems__ */ |
---|
342 | |
---|
343 | return err; |
---|
344 | } |
---|
345 | |
---|
346 | #ifndef __rtems__ |
---|
347 | static void dpaa_tx_timeout(struct net_device *net_dev) |
---|
348 | { |
---|
349 | struct dpaa_percpu_priv *percpu_priv; |
---|
350 | const struct dpaa_priv *priv; |
---|
351 | |
---|
352 | priv = netdev_priv(net_dev); |
---|
353 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
354 | |
---|
355 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", |
---|
356 | jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); |
---|
357 | |
---|
358 | percpu_priv->stats.tx_errors++; |
---|
359 | } |
---|
360 | |
---|
361 | /* Calculates the statistics for the given device by adding the statistics |
---|
362 | * collected by each CPU. |
---|
363 | */ |
---|
364 | static void dpaa_get_stats64(struct net_device *net_dev, |
---|
365 | struct rtnl_link_stats64 *s) |
---|
366 | { |
---|
367 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); |
---|
368 | struct dpaa_priv *priv = netdev_priv(net_dev); |
---|
369 | struct dpaa_percpu_priv *percpu_priv; |
---|
370 | u64 *netstats = (u64 *)s; |
---|
371 | u64 *cpustats; |
---|
372 | int i, j; |
---|
373 | |
---|
374 | for_each_possible_cpu(i) { |
---|
375 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
376 | |
---|
377 | cpustats = (u64 *)&percpu_priv->stats; |
---|
378 | |
---|
379 | /* add stats from all CPUs */ |
---|
380 | for (j = 0; j < numstats; j++) |
---|
381 | netstats[j] += cpustats[j]; |
---|
382 | } |
---|
383 | } |
---|
384 | |
---|
385 | static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, |
---|
386 | struct tc_to_netdev *tc) |
---|
387 | { |
---|
388 | struct dpaa_priv *priv = netdev_priv(net_dev); |
---|
389 | u8 num_tc; |
---|
390 | int i; |
---|
391 | |
---|
392 | if (tc->type != TC_SETUP_MQPRIO) |
---|
393 | return -EINVAL; |
---|
394 | |
---|
395 | tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
---|
396 | num_tc = tc->mqprio->num_tc; |
---|
397 | |
---|
398 | if (num_tc == priv->num_tc) |
---|
399 | return 0; |
---|
400 | |
---|
401 | if (!num_tc) { |
---|
402 | netdev_reset_tc(net_dev); |
---|
403 | goto out; |
---|
404 | } |
---|
405 | |
---|
406 | if (num_tc > DPAA_TC_NUM) { |
---|
407 | netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", |
---|
408 | DPAA_TC_NUM); |
---|
409 | return -EINVAL; |
---|
410 | } |
---|
411 | |
---|
412 | netdev_set_num_tc(net_dev, num_tc); |
---|
413 | |
---|
414 | for (i = 0; i < num_tc; i++) |
---|
415 | netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, |
---|
416 | i * DPAA_TC_TXQ_NUM); |
---|
417 | |
---|
418 | out: |
---|
419 | priv->num_tc = num_tc ? : 1; |
---|
420 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); |
---|
421 | return 0; |
---|
422 | } |
---|
423 | |
---|
424 | static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) |
---|
425 | { |
---|
426 | struct platform_device *of_dev; |
---|
427 | struct dpaa_eth_data *eth_data; |
---|
428 | struct device *dpaa_dev, *dev; |
---|
429 | struct device_node *mac_node; |
---|
430 | struct mac_device *mac_dev; |
---|
431 | |
---|
432 | dpaa_dev = &pdev->dev; |
---|
433 | eth_data = dpaa_dev->platform_data; |
---|
434 | if (!eth_data) |
---|
435 | return ERR_PTR(-ENODEV); |
---|
436 | |
---|
437 | mac_node = eth_data->mac_node; |
---|
438 | |
---|
439 | of_dev = of_find_device_by_node(mac_node); |
---|
440 | if (!of_dev) { |
---|
441 | dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", |
---|
442 | mac_node->full_name); |
---|
443 | of_node_put(mac_node); |
---|
444 | return ERR_PTR(-EINVAL); |
---|
445 | } |
---|
446 | of_node_put(mac_node); |
---|
447 | |
---|
448 | dev = &of_dev->dev; |
---|
449 | |
---|
450 | mac_dev = dev_get_drvdata(dev); |
---|
451 | if (!mac_dev) { |
---|
452 | dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", |
---|
453 | dev_name(dev)); |
---|
454 | return ERR_PTR(-EINVAL); |
---|
455 | } |
---|
456 | |
---|
457 | return mac_dev; |
---|
458 | } |
---|
459 | |
---|
460 | static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) |
---|
461 | { |
---|
462 | const struct dpaa_priv *priv; |
---|
463 | struct mac_device *mac_dev; |
---|
464 | struct sockaddr old_addr; |
---|
465 | int err; |
---|
466 | |
---|
467 | priv = netdev_priv(net_dev); |
---|
468 | |
---|
469 | memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); |
---|
470 | |
---|
471 | err = eth_mac_addr(net_dev, addr); |
---|
472 | if (err < 0) { |
---|
473 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); |
---|
474 | return err; |
---|
475 | } |
---|
476 | |
---|
477 | mac_dev = priv->mac_dev; |
---|
478 | |
---|
479 | err = mac_dev->change_addr(mac_dev->fman_mac, |
---|
480 | (enet_addr_t *)net_dev->dev_addr); |
---|
481 | if (err < 0) { |
---|
482 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", |
---|
483 | err); |
---|
484 | /* reverting to previous address */ |
---|
485 | eth_mac_addr(net_dev, &old_addr); |
---|
486 | |
---|
487 | return err; |
---|
488 | } |
---|
489 | |
---|
490 | return 0; |
---|
491 | } |
---|
492 | |
---|
493 | static void dpaa_set_rx_mode(struct net_device *net_dev) |
---|
494 | { |
---|
495 | const struct dpaa_priv *priv; |
---|
496 | int err; |
---|
497 | |
---|
498 | priv = netdev_priv(net_dev); |
---|
499 | |
---|
500 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { |
---|
501 | priv->mac_dev->promisc = !priv->mac_dev->promisc; |
---|
502 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, |
---|
503 | priv->mac_dev->promisc); |
---|
504 | if (err < 0) |
---|
505 | netif_err(priv, drv, net_dev, |
---|
506 | "mac_dev->set_promisc() = %d\n", |
---|
507 | err); |
---|
508 | } |
---|
509 | |
---|
510 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); |
---|
511 | if (err < 0) |
---|
512 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", |
---|
513 | err); |
---|
514 | } |
---|
515 | #endif /* __rtems__ */ |
---|
516 | |
---|
517 | static struct dpaa_bp *dpaa_bpid2pool(int bpid) |
---|
518 | { |
---|
519 | if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) |
---|
520 | return NULL; |
---|
521 | |
---|
522 | return dpaa_bp_array[bpid]; |
---|
523 | } |
---|
524 | |
---|
525 | /* checks if this bpool is already allocated */ |
---|
526 | static bool dpaa_bpid2pool_use(int bpid) |
---|
527 | { |
---|
528 | if (dpaa_bpid2pool(bpid)) { |
---|
529 | atomic_inc(&dpaa_bp_array[bpid]->refs); |
---|
530 | return true; |
---|
531 | } |
---|
532 | |
---|
533 | return false; |
---|
534 | } |
---|
535 | |
---|
536 | /* called only once per bpid by dpaa_bp_alloc_pool() */ |
---|
537 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) |
---|
538 | { |
---|
539 | dpaa_bp_array[bpid] = dpaa_bp; |
---|
540 | atomic_set(&dpaa_bp->refs, 1); |
---|
541 | } |
---|
542 | |
---|
543 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) |
---|
544 | { |
---|
545 | int err; |
---|
546 | |
---|
547 | if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { |
---|
548 | pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", |
---|
549 | __func__); |
---|
550 | return -EINVAL; |
---|
551 | } |
---|
552 | |
---|
553 | /* If the pool is already specified, we only create one per bpid */ |
---|
554 | if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && |
---|
555 | dpaa_bpid2pool_use(dpaa_bp->bpid)) |
---|
556 | return 0; |
---|
557 | |
---|
558 | if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { |
---|
559 | dpaa_bp->pool = bman_new_pool(); |
---|
560 | if (!dpaa_bp->pool) { |
---|
561 | pr_err("%s: bman_new_pool() failed\n", |
---|
562 | __func__); |
---|
563 | return -ENODEV; |
---|
564 | } |
---|
565 | |
---|
566 | dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); |
---|
567 | } |
---|
568 | |
---|
569 | if (dpaa_bp->seed_cb) { |
---|
570 | err = dpaa_bp->seed_cb(dpaa_bp); |
---|
571 | if (err) |
---|
572 | goto pool_seed_failed; |
---|
573 | } |
---|
574 | |
---|
575 | dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); |
---|
576 | |
---|
577 | return 0; |
---|
578 | |
---|
579 | pool_seed_failed: |
---|
580 | pr_err("%s: pool seeding failed\n", __func__); |
---|
581 | bman_free_pool(dpaa_bp->pool); |
---|
582 | |
---|
583 | return err; |
---|
584 | } |
---|
585 | |
---|
586 | /* remove and free all the buffers from the given buffer pool */ |
---|
587 | static void dpaa_bp_drain(struct dpaa_bp *bp) |
---|
588 | { |
---|
589 | u8 num = 8; |
---|
590 | int ret; |
---|
591 | |
---|
592 | do { |
---|
593 | struct bm_buffer bmb[8]; |
---|
594 | int i; |
---|
595 | |
---|
596 | ret = bman_acquire(bp->pool, bmb, num); |
---|
597 | if (ret < 0) { |
---|
598 | if (num == 8) { |
---|
599 | /* we have less than 8 buffers left; |
---|
600 | * drain them one by one |
---|
601 | */ |
---|
602 | num = 1; |
---|
603 | ret = 1; |
---|
604 | continue; |
---|
605 | } else { |
---|
606 | /* Pool is fully drained */ |
---|
607 | break; |
---|
608 | } |
---|
609 | } |
---|
610 | |
---|
611 | if (bp->free_buf_cb) |
---|
612 | for (i = 0; i < num; i++) |
---|
613 | bp->free_buf_cb(bp, &bmb[i]); |
---|
614 | } while (ret > 0); |
---|
615 | } |
---|
616 | |
---|
617 | static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) |
---|
618 | { |
---|
619 | struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); |
---|
620 | |
---|
621 | /* the mapping between bpid and dpaa_bp is done very late in the |
---|
622 | * allocation procedure; if something failed before the mapping, the bp |
---|
623 | * was not configured, therefore we don't need the below instructions |
---|
624 | */ |
---|
625 | if (!bp) |
---|
626 | return; |
---|
627 | |
---|
628 | if (!atomic_dec_and_test(&bp->refs)) |
---|
629 | return; |
---|
630 | |
---|
631 | if (bp->free_buf_cb) |
---|
632 | dpaa_bp_drain(bp); |
---|
633 | |
---|
634 | dpaa_bp_array[bp->bpid] = NULL; |
---|
635 | bman_free_pool(bp->pool); |
---|
636 | } |
---|
637 | |
---|
638 | static void dpaa_bps_free(struct dpaa_priv *priv) |
---|
639 | { |
---|
640 | int i; |
---|
641 | |
---|
642 | for (i = 0; i < DPAA_BPS_NUM; i++) |
---|
643 | dpaa_bp_free(priv->dpaa_bps[i]); |
---|
644 | } |
---|
645 | |
---|
646 | /* Use multiple WQs for FQ assignment: |
---|
647 | * - Tx Confirmation queues go to WQ1. |
---|
648 | * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance |
---|
649 | * to be scheduled, in case there are many more FQs in WQ6). |
---|
650 | * - Rx Default goes to WQ6. |
---|
651 | * - Tx queues go to different WQs depending on their priority. Equal |
---|
652 | * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and |
---|
653 | * WQ0 (highest priority). |
---|
654 | * This ensures that Tx-confirmed buffers are timely released. In particular, |
---|
655 | * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they |
---|
656 | * are greatly outnumbered by other FQs in the system, while |
---|
657 | * dequeue scheduling is round-robin. |
---|
658 | */ |
---|
659 | static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) |
---|
660 | { |
---|
661 | switch (fq->fq_type) { |
---|
662 | case FQ_TYPE_TX_CONFIRM: |
---|
663 | case FQ_TYPE_TX_CONF_MQ: |
---|
664 | fq->wq = 1; |
---|
665 | break; |
---|
666 | case FQ_TYPE_RX_ERROR: |
---|
667 | case FQ_TYPE_TX_ERROR: |
---|
668 | fq->wq = 5; |
---|
669 | break; |
---|
670 | case FQ_TYPE_RX_DEFAULT: |
---|
671 | fq->wq = 6; |
---|
672 | break; |
---|
673 | case FQ_TYPE_TX: |
---|
674 | switch (idx / DPAA_TC_TXQ_NUM) { |
---|
675 | case 0: |
---|
676 | /* Low priority (best effort) */ |
---|
677 | fq->wq = 6; |
---|
678 | break; |
---|
679 | case 1: |
---|
680 | /* Medium priority */ |
---|
681 | fq->wq = 2; |
---|
682 | break; |
---|
683 | case 2: |
---|
684 | /* High priority */ |
---|
685 | fq->wq = 1; |
---|
686 | break; |
---|
687 | case 3: |
---|
688 | /* Very high priority */ |
---|
689 | fq->wq = 0; |
---|
690 | break; |
---|
691 | default: |
---|
692 | WARN(1, "Too many TX FQs: more than %d!\n", |
---|
693 | DPAA_ETH_TXQ_NUM); |
---|
694 | } |
---|
695 | break; |
---|
696 | #ifdef __rtems__ |
---|
697 | case FQ_TYPE_RX_PCD: |
---|
698 | fq->wq = 5; |
---|
699 | break; |
---|
700 | #endif /* __rtems__ */ |
---|
701 | default: |
---|
702 | WARN(1, "Invalid FQ type %d for FQID %d!\n", |
---|
703 | fq->fq_type, fq->fqid); |
---|
704 | } |
---|
705 | } |
---|
706 | |
---|
707 | static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, |
---|
708 | u32 start, u32 count, |
---|
709 | struct list_head *list, |
---|
710 | enum dpaa_fq_type fq_type) |
---|
711 | { |
---|
712 | struct dpaa_fq *dpaa_fq; |
---|
713 | int i; |
---|
714 | |
---|
715 | dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, |
---|
716 | GFP_KERNEL); |
---|
717 | if (!dpaa_fq) |
---|
718 | return NULL; |
---|
719 | |
---|
720 | for (i = 0; i < count; i++) { |
---|
721 | dpaa_fq[i].fq_type = fq_type; |
---|
722 | dpaa_fq[i].fqid = start ? start + i : 0; |
---|
723 | list_add_tail(&dpaa_fq[i].list, list); |
---|
724 | } |
---|
725 | |
---|
726 | for (i = 0; i < count; i++) |
---|
727 | dpaa_assign_wq(dpaa_fq + i, i); |
---|
728 | |
---|
729 | return dpaa_fq; |
---|
730 | } |
---|
731 | |
---|
732 | static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, |
---|
733 | struct fm_port_fqs *port_fqs) |
---|
734 | { |
---|
735 | struct dpaa_fq *dpaa_fq; |
---|
736 | |
---|
737 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); |
---|
738 | if (!dpaa_fq) |
---|
739 | goto fq_alloc_failed; |
---|
740 | |
---|
741 | port_fqs->rx_errq = &dpaa_fq[0]; |
---|
742 | |
---|
743 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); |
---|
744 | if (!dpaa_fq) |
---|
745 | goto fq_alloc_failed; |
---|
746 | |
---|
747 | port_fqs->rx_defq = &dpaa_fq[0]; |
---|
748 | |
---|
749 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) |
---|
750 | goto fq_alloc_failed; |
---|
751 | |
---|
752 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); |
---|
753 | if (!dpaa_fq) |
---|
754 | goto fq_alloc_failed; |
---|
755 | |
---|
756 | port_fqs->tx_errq = &dpaa_fq[0]; |
---|
757 | |
---|
758 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); |
---|
759 | if (!dpaa_fq) |
---|
760 | goto fq_alloc_failed; |
---|
761 | |
---|
762 | port_fqs->tx_defq = &dpaa_fq[0]; |
---|
763 | |
---|
764 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) |
---|
765 | goto fq_alloc_failed; |
---|
766 | |
---|
767 | return 0; |
---|
768 | |
---|
769 | fq_alloc_failed: |
---|
770 | dev_err(dev, "dpaa_fq_alloc() failed\n"); |
---|
771 | return -ENOMEM; |
---|
772 | } |
---|
773 | |
---|
774 | static u32 rx_pool_channel; |
---|
775 | static DEFINE_SPINLOCK(rx_pool_channel_init); |
---|
776 | |
---|
777 | static int dpaa_get_channel(void) |
---|
778 | { |
---|
779 | spin_lock(&rx_pool_channel_init); |
---|
780 | if (!rx_pool_channel) { |
---|
781 | u32 pool; |
---|
782 | int ret; |
---|
783 | |
---|
784 | ret = qman_alloc_pool(&pool); |
---|
785 | |
---|
786 | if (!ret) |
---|
787 | rx_pool_channel = pool; |
---|
788 | } |
---|
789 | spin_unlock(&rx_pool_channel_init); |
---|
790 | if (!rx_pool_channel) |
---|
791 | return -ENOMEM; |
---|
792 | return rx_pool_channel; |
---|
793 | } |
---|
794 | |
---|
795 | #ifndef __rtems__ |
---|
796 | static void dpaa_release_channel(void) |
---|
797 | { |
---|
798 | qman_release_pool(rx_pool_channel); |
---|
799 | } |
---|
800 | #endif /* __rtems__ */ |
---|
801 | |
---|
802 | static void dpaa_eth_add_channel(u16 channel) |
---|
803 | { |
---|
804 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); |
---|
805 | #ifndef __rtems__ |
---|
806 | const cpumask_t *cpus = qman_affine_cpus(); |
---|
807 | #endif /* __rtems__ */ |
---|
808 | struct qman_portal *portal; |
---|
809 | int cpu; |
---|
810 | |
---|
811 | for_each_cpu(cpu, cpus) { |
---|
812 | portal = qman_get_affine_portal(cpu); |
---|
813 | qman_p_static_dequeue_add(portal, pool); |
---|
814 | } |
---|
815 | } |
---|
816 | |
---|
817 | /* Congestion group state change notification callback. |
---|
818 | * Stops the device's egress queues while they are congested and |
---|
819 | * wakes them upon exiting congested state. |
---|
820 | * Also updates some CGR-related stats. |
---|
821 | */ |
---|
822 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, |
---|
823 | int congested) |
---|
824 | { |
---|
825 | struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, |
---|
826 | struct dpaa_priv, cgr_data.cgr); |
---|
827 | |
---|
828 | if (congested) { |
---|
829 | priv->cgr_data.congestion_start_jiffies = jiffies; |
---|
830 | #ifndef __rtems__ |
---|
831 | netif_tx_stop_all_queues(priv->net_dev); |
---|
832 | #endif /* __rtems__ */ |
---|
833 | priv->cgr_data.cgr_congested_count++; |
---|
834 | } else { |
---|
835 | priv->cgr_data.congested_jiffies += |
---|
836 | (jiffies - priv->cgr_data.congestion_start_jiffies); |
---|
837 | #ifndef __rtems__ |
---|
838 | netif_tx_wake_all_queues(priv->net_dev); |
---|
839 | #endif /* __rtems__ */ |
---|
840 | } |
---|
841 | } |
---|
842 | |
---|
843 | static int dpaa_eth_cgr_init(struct dpaa_priv *priv) |
---|
844 | { |
---|
845 | struct qm_mcc_initcgr initcgr; |
---|
846 | u32 cs_th; |
---|
847 | int err; |
---|
848 | |
---|
849 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); |
---|
850 | if (err < 0) { |
---|
851 | if (netif_msg_drv(priv)) |
---|
852 | pr_err("%s: Error %d allocating CGR ID\n", |
---|
853 | __func__, err); |
---|
854 | goto out_error; |
---|
855 | } |
---|
856 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; |
---|
857 | |
---|
858 | /* Enable Congestion State Change Notifications and CS taildrop */ |
---|
859 | memset(&initcgr, 0, sizeof(initcgr)); |
---|
860 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); |
---|
861 | initcgr.cgr.cscn_en = QM_CGR_EN; |
---|
862 | |
---|
863 | /* Set different thresholds based on the MAC speed. |
---|
864 | * This may turn suboptimal if the MAC is reconfigured at a speed |
---|
865 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. |
---|
866 | * In such cases, we ought to reconfigure the threshold, too. |
---|
867 | */ |
---|
868 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) |
---|
869 | cs_th = DPAA_CS_THRESHOLD_10G; |
---|
870 | else |
---|
871 | cs_th = DPAA_CS_THRESHOLD_1G; |
---|
872 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); |
---|
873 | |
---|
874 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
---|
875 | initcgr.cgr.cstd_en = QM_CGR_EN; |
---|
876 | |
---|
877 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, |
---|
878 | &initcgr); |
---|
879 | if (err < 0) { |
---|
880 | if (netif_msg_drv(priv)) |
---|
881 | pr_err("%s: Error %d creating CGR with ID %d\n", |
---|
882 | __func__, err, priv->cgr_data.cgr.cgrid); |
---|
883 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); |
---|
884 | goto out_error; |
---|
885 | } |
---|
886 | if (netif_msg_drv(priv)) |
---|
887 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", |
---|
888 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, |
---|
889 | priv->cgr_data.cgr.chan); |
---|
890 | |
---|
891 | out_error: |
---|
892 | return err; |
---|
893 | } |
---|
894 | |
---|
895 | static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, |
---|
896 | struct dpaa_fq *fq, |
---|
897 | const struct qman_fq *template) |
---|
898 | { |
---|
899 | fq->fq_base = *template; |
---|
900 | fq->net_dev = priv->net_dev; |
---|
901 | |
---|
902 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; |
---|
903 | fq->channel = priv->channel; |
---|
904 | } |
---|
905 | |
---|
906 | static inline void dpaa_setup_egress(const struct dpaa_priv *priv, |
---|
907 | struct dpaa_fq *fq, |
---|
908 | struct fman_port *port, |
---|
909 | const struct qman_fq *template) |
---|
910 | { |
---|
911 | fq->fq_base = *template; |
---|
912 | fq->net_dev = priv->net_dev; |
---|
913 | |
---|
914 | if (port) { |
---|
915 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; |
---|
916 | fq->channel = (u16)fman_port_get_qman_channel_id(port); |
---|
917 | } else { |
---|
918 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; |
---|
919 | } |
---|
920 | } |
---|
921 | |
---|
922 | static void dpaa_fq_setup(struct dpaa_priv *priv, |
---|
923 | const struct dpaa_fq_cbs *fq_cbs, |
---|
924 | struct fman_port *tx_port) |
---|
925 | { |
---|
926 | #ifndef __rtems__ |
---|
927 | int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; |
---|
928 | const cpumask_t *affine_cpus = qman_affine_cpus(); |
---|
929 | u16 portals[NR_CPUS]; |
---|
930 | #else /* __rtems__ */ |
---|
931 | int egress_cnt = 0, conf_cnt = 0; |
---|
932 | struct qman_portal *p; |
---|
933 | int cpu; |
---|
934 | #endif /* __rtems__ */ |
---|
935 | struct dpaa_fq *fq; |
---|
936 | |
---|
937 | #ifndef __rtems__ |
---|
938 | for_each_cpu(cpu, affine_cpus) |
---|
939 | portals[num_portals++] = qman_affine_channel(cpu); |
---|
940 | if (num_portals == 0) |
---|
941 | dev_err(priv->net_dev->dev.parent, |
---|
942 | "No Qman software (affine) channels found"); |
---|
943 | #else /* __rtems__ */ |
---|
944 | cpu = 0; |
---|
945 | #endif /* __rtems__ */ |
---|
946 | |
---|
947 | /* Initialize each FQ in the list */ |
---|
948 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { |
---|
949 | switch (fq->fq_type) { |
---|
950 | case FQ_TYPE_RX_DEFAULT: |
---|
951 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); |
---|
952 | break; |
---|
953 | case FQ_TYPE_RX_ERROR: |
---|
954 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); |
---|
955 | break; |
---|
956 | #ifdef __rtems__ |
---|
957 | case FQ_TYPE_RX_PCD: |
---|
958 | /* For MACless we can't have dynamic Rx queues */ |
---|
959 | BUG_ON(priv->mac_dev != NULL || fq->fqid == 0); |
---|
960 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); |
---|
961 | p = qman_get_affine_portal(cpu); |
---|
962 | fq->channel = qman_portal_get_channel(p); |
---|
963 | cpu = (cpu + 1) % (int)rtems_get_processor_count(); |
---|
964 | break; |
---|
965 | #endif /* __rtems__ */ |
---|
966 | case FQ_TYPE_TX: |
---|
967 | dpaa_setup_egress(priv, fq, tx_port, |
---|
968 | &fq_cbs->egress_ern); |
---|
969 | /* If we have more Tx queues than the number of cores, |
---|
970 | * just ignore the extra ones. |
---|
971 | */ |
---|
972 | if (egress_cnt < DPAA_ETH_TXQ_NUM) |
---|
973 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; |
---|
974 | break; |
---|
975 | case FQ_TYPE_TX_CONF_MQ: |
---|
976 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; |
---|
977 | /* fall through */ |
---|
978 | case FQ_TYPE_TX_CONFIRM: |
---|
979 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); |
---|
980 | break; |
---|
981 | case FQ_TYPE_TX_ERROR: |
---|
982 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); |
---|
983 | break; |
---|
984 | default: |
---|
985 | #ifndef __rtems__ |
---|
986 | dev_warn(priv->net_dev->dev.parent, |
---|
987 | "Unknown FQ type detected!\n"); |
---|
988 | #else /* __rtems__ */ |
---|
989 | BSD_ASSERT(0); |
---|
990 | #endif /* __rtems__ */ |
---|
991 | break; |
---|
992 | } |
---|
993 | } |
---|
994 | |
---|
995 | /* Make sure all CPUs receive a corresponding Tx queue. */ |
---|
996 | while (egress_cnt < DPAA_ETH_TXQ_NUM) { |
---|
997 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { |
---|
998 | if (fq->fq_type != FQ_TYPE_TX) |
---|
999 | continue; |
---|
1000 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; |
---|
1001 | if (egress_cnt == DPAA_ETH_TXQ_NUM) |
---|
1002 | break; |
---|
1003 | } |
---|
1004 | } |
---|
1005 | } |
---|
1006 | |
---|
1007 | static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, |
---|
1008 | struct qman_fq *tx_fq) |
---|
1009 | { |
---|
1010 | int i; |
---|
1011 | |
---|
1012 | for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) |
---|
1013 | if (priv->egress_fqs[i] == tx_fq) |
---|
1014 | return i; |
---|
1015 | |
---|
1016 | return -EINVAL; |
---|
1017 | } |
---|
1018 | |
---|
1019 | static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) |
---|
1020 | { |
---|
1021 | const struct dpaa_priv *priv; |
---|
1022 | struct qman_fq *confq = NULL; |
---|
1023 | struct qm_mcc_initfq initfq; |
---|
1024 | #ifndef __rtems__ |
---|
1025 | struct device *dev; |
---|
1026 | #endif /* __rtems__ */ |
---|
1027 | struct qman_fq *fq; |
---|
1028 | int queue_id; |
---|
1029 | int err; |
---|
1030 | |
---|
1031 | priv = netdev_priv(dpaa_fq->net_dev); |
---|
1032 | #ifndef __rtems__ |
---|
1033 | dev = dpaa_fq->net_dev->dev.parent; |
---|
1034 | #endif /* __rtems__ */ |
---|
1035 | |
---|
1036 | if (dpaa_fq->fqid == 0) |
---|
1037 | dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; |
---|
1038 | |
---|
1039 | dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); |
---|
1040 | |
---|
1041 | err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); |
---|
1042 | if (err) { |
---|
1043 | #ifndef __rtems__ |
---|
1044 | dev_err(dev, "qman_create_fq() failed\n"); |
---|
1045 | #else /* __rtems__ */ |
---|
1046 | BSD_ASSERT(0); |
---|
1047 | #endif /* __rtems__ */ |
---|
1048 | return err; |
---|
1049 | } |
---|
1050 | fq = &dpaa_fq->fq_base; |
---|
1051 | |
---|
1052 | if (dpaa_fq->init) { |
---|
1053 | memset(&initfq, 0, sizeof(initfq)); |
---|
1054 | |
---|
1055 | initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); |
---|
1056 | /* Note: we may get to keep an empty FQ in cache */ |
---|
1057 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); |
---|
1058 | |
---|
1059 | /* Try to reduce the number of portal interrupts for |
---|
1060 | * Tx Confirmation FQs. |
---|
1061 | */ |
---|
1062 | if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) |
---|
1063 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); |
---|
1064 | |
---|
1065 | /* FQ placement */ |
---|
1066 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
---|
1067 | |
---|
1068 | qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); |
---|
1069 | |
---|
1070 | /* Put all egress queues in a congestion group of their own. |
---|
1071 | * Sensu stricto, the Tx confirmation queues are Rx FQs, |
---|
1072 | * rather than Tx - but they nonetheless account for the |
---|
1073 | * memory footprint on behalf of egress traffic. We therefore |
---|
1074 | * place them in the netdev's CGR, along with the Tx FQs. |
---|
1075 | */ |
---|
1076 | if (dpaa_fq->fq_type == FQ_TYPE_TX || |
---|
1077 | dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || |
---|
1078 | dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { |
---|
1079 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
---|
1080 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); |
---|
1081 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; |
---|
1082 | /* Set a fixed overhead accounting, in an attempt to |
---|
1083 | * reduce the impact of fixed-size skb shells and the |
---|
1084 | * driver's needed headroom on system memory. This is |
---|
1085 | * especially the case when the egress traffic is |
---|
1086 | * composed of small datagrams. |
---|
1087 | * Unfortunately, QMan's OAL value is capped to an |
---|
1088 | * insufficient value, but even that is better than |
---|
1089 | * no overhead accounting at all. |
---|
1090 | */ |
---|
1091 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
---|
1092 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
---|
1093 | qm_fqd_set_oal(&initfq.fqd, |
---|
1094 | #ifndef __rtems__ |
---|
1095 | min(sizeof(struct sk_buff) + |
---|
1096 | #else /* __rtems__ */ |
---|
1097 | min( |
---|
1098 | #endif /* __rtems__ */ |
---|
1099 | priv->tx_headroom, |
---|
1100 | (size_t)FSL_QMAN_MAX_OAL)); |
---|
1101 | } |
---|
1102 | |
---|
1103 | if (td_enable) { |
---|
1104 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); |
---|
1105 | qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); |
---|
1106 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); |
---|
1107 | } |
---|
1108 | |
---|
1109 | if (dpaa_fq->fq_type == FQ_TYPE_TX) { |
---|
1110 | queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); |
---|
1111 | if (queue_id >= 0) |
---|
1112 | confq = priv->conf_fqs[queue_id]; |
---|
1113 | if (confq) { |
---|
1114 | initfq.we_mask |= |
---|
1115 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
---|
1116 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) |
---|
1117 | * A2V=1 (contextA A2 field is valid) |
---|
1118 | * A0V=1 (contextA A0 field is valid) |
---|
1119 | * B0V=1 (contextB field is valid) |
---|
1120 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) |
---|
1121 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) |
---|
1122 | */ |
---|
1123 | qm_fqd_context_a_set64(&initfq.fqd, |
---|
1124 | 0x1e00000080000000ULL); |
---|
1125 | } |
---|
1126 | } |
---|
1127 | |
---|
1128 | /* Put all the ingress queues in our "ingress CGR". */ |
---|
1129 | if (priv->use_ingress_cgr && |
---|
1130 | (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || |
---|
1131 | #ifdef __rtems__ |
---|
1132 | dpaa_fq->fq_type == FQ_TYPE_RX_PCD || |
---|
1133 | #endif /* __rtems__ */ |
---|
1134 | dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { |
---|
1135 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
---|
1136 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); |
---|
1137 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; |
---|
1138 | /* Set a fixed overhead accounting, just like for the |
---|
1139 | * egress CGR. |
---|
1140 | */ |
---|
1141 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
---|
1142 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
---|
1143 | qm_fqd_set_oal(&initfq.fqd, |
---|
1144 | #ifndef __rtems__ |
---|
1145 | min(sizeof(struct sk_buff) + |
---|
1146 | #else /* __rtems__ */ |
---|
1147 | min( |
---|
1148 | #endif /* __rtems__ */ |
---|
1149 | priv->tx_headroom, |
---|
1150 | (size_t)FSL_QMAN_MAX_OAL)); |
---|
1151 | } |
---|
1152 | |
---|
1153 | /* Initialization common to all ingress queues */ |
---|
1154 | if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { |
---|
1155 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
---|
1156 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | |
---|
1157 | QM_FQCTRL_CTXASTASHING); |
---|
1158 | initfq.fqd.context_a.stashing.exclusive = |
---|
1159 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | |
---|
1160 | QM_STASHING_EXCL_ANNOTATION; |
---|
1161 | qm_fqd_set_stashing(&initfq.fqd, 1, 2, |
---|
1162 | DIV_ROUND_UP(sizeof(struct qman_fq), |
---|
1163 | 64)); |
---|
1164 | } |
---|
1165 | |
---|
1166 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); |
---|
1167 | if (err < 0) { |
---|
1168 | #ifndef __rtems__ |
---|
1169 | dev_err(dev, "qman_init_fq(%u) = %d\n", |
---|
1170 | qman_fq_fqid(fq), err); |
---|
1171 | #else /* __rtems__ */ |
---|
1172 | BSD_ASSERT(0); |
---|
1173 | #endif /* __rtems__ */ |
---|
1174 | qman_destroy_fq(fq); |
---|
1175 | return err; |
---|
1176 | } |
---|
1177 | } |
---|
1178 | |
---|
1179 | dpaa_fq->fqid = qman_fq_fqid(fq); |
---|
1180 | |
---|
1181 | return 0; |
---|
1182 | } |
---|
1183 | |
---|
1184 | #ifndef __rtems__ |
---|
1185 | static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) |
---|
1186 | { |
---|
1187 | #ifndef __rtems__ |
---|
1188 | const struct dpaa_priv *priv; |
---|
1189 | #endif /* __rtems__ */ |
---|
1190 | struct dpaa_fq *dpaa_fq; |
---|
1191 | int err, error; |
---|
1192 | |
---|
1193 | err = 0; |
---|
1194 | |
---|
1195 | dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); |
---|
1196 | #ifndef __rtems__ |
---|
1197 | priv = netdev_priv(dpaa_fq->net_dev); |
---|
1198 | #endif /* __rtems__ */ |
---|
1199 | |
---|
1200 | if (dpaa_fq->init) { |
---|
1201 | err = qman_retire_fq(fq, NULL); |
---|
1202 | if (err < 0 && netif_msg_drv(priv)) |
---|
1203 | dev_err(dev, "qman_retire_fq(%u) = %d\n", |
---|
1204 | qman_fq_fqid(fq), err); |
---|
1205 | |
---|
1206 | error = qman_oos_fq(fq); |
---|
1207 | if (error < 0 && netif_msg_drv(priv)) { |
---|
1208 | dev_err(dev, "qman_oos_fq(%u) = %d\n", |
---|
1209 | qman_fq_fqid(fq), error); |
---|
1210 | if (err >= 0) |
---|
1211 | err = error; |
---|
1212 | } |
---|
1213 | } |
---|
1214 | |
---|
1215 | qman_destroy_fq(fq); |
---|
1216 | list_del(&dpaa_fq->list); |
---|
1217 | |
---|
1218 | return err; |
---|
1219 | } |
---|
1220 | |
---|
1221 | static int dpaa_fq_free(struct device *dev, struct list_head *list) |
---|
1222 | { |
---|
1223 | struct dpaa_fq *dpaa_fq, *tmp; |
---|
1224 | int err, error; |
---|
1225 | |
---|
1226 | err = 0; |
---|
1227 | list_for_each_entry_safe(dpaa_fq, tmp, list, list) { |
---|
1228 | error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); |
---|
1229 | if (error < 0 && err >= 0) |
---|
1230 | err = error; |
---|
1231 | } |
---|
1232 | |
---|
1233 | return err; |
---|
1234 | } |
---|
1235 | #endif /* __rtems__ */ |
---|
1236 | |
---|
1237 | static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, |
---|
1238 | struct dpaa_fq *defq, |
---|
1239 | struct dpaa_buffer_layout *buf_layout) |
---|
1240 | { |
---|
1241 | struct fman_buffer_prefix_content buf_prefix_content; |
---|
1242 | struct fman_port_params params; |
---|
1243 | int err; |
---|
1244 | |
---|
1245 | memset(¶ms, 0, sizeof(params)); |
---|
1246 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); |
---|
1247 | |
---|
1248 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; |
---|
1249 | buf_prefix_content.pass_prs_result = true; |
---|
1250 | buf_prefix_content.pass_hash_result = true; |
---|
1251 | buf_prefix_content.pass_time_stamp = false; |
---|
1252 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; |
---|
1253 | |
---|
1254 | params.specific_params.non_rx_params.err_fqid = errq->fqid; |
---|
1255 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; |
---|
1256 | |
---|
1257 | err = fman_port_config(port, ¶ms); |
---|
1258 | if (err) { |
---|
1259 | pr_err("%s: fman_port_config failed\n", __func__); |
---|
1260 | return err; |
---|
1261 | } |
---|
1262 | |
---|
1263 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); |
---|
1264 | if (err) { |
---|
1265 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
---|
1266 | __func__); |
---|
1267 | return err; |
---|
1268 | } |
---|
1269 | |
---|
1270 | err = fman_port_init(port); |
---|
1271 | if (err) |
---|
1272 | pr_err("%s: fm_port_init failed\n", __func__); |
---|
1273 | |
---|
1274 | return err; |
---|
1275 | } |
---|
1276 | |
---|
1277 | static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, |
---|
1278 | size_t count, struct dpaa_fq *errq, |
---|
1279 | struct dpaa_fq *defq, |
---|
1280 | struct dpaa_buffer_layout *buf_layout) |
---|
1281 | { |
---|
1282 | struct fman_buffer_prefix_content buf_prefix_content; |
---|
1283 | struct fman_port_rx_params *rx_p; |
---|
1284 | struct fman_port_params params; |
---|
1285 | int i, err; |
---|
1286 | |
---|
1287 | memset(¶ms, 0, sizeof(params)); |
---|
1288 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); |
---|
1289 | |
---|
1290 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; |
---|
1291 | buf_prefix_content.pass_prs_result = true; |
---|
1292 | buf_prefix_content.pass_hash_result = true; |
---|
1293 | buf_prefix_content.pass_time_stamp = false; |
---|
1294 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; |
---|
1295 | |
---|
1296 | rx_p = ¶ms.specific_params.rx_params; |
---|
1297 | rx_p->err_fqid = errq->fqid; |
---|
1298 | rx_p->dflt_fqid = defq->fqid; |
---|
1299 | |
---|
1300 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); |
---|
1301 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; |
---|
1302 | for (i = 0; i < count; i++) { |
---|
1303 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; |
---|
1304 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; |
---|
1305 | } |
---|
1306 | |
---|
1307 | err = fman_port_config(port, ¶ms); |
---|
1308 | if (err) { |
---|
1309 | pr_err("%s: fman_port_config failed\n", __func__); |
---|
1310 | return err; |
---|
1311 | } |
---|
1312 | |
---|
1313 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); |
---|
1314 | if (err) { |
---|
1315 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
---|
1316 | __func__); |
---|
1317 | return err; |
---|
1318 | } |
---|
1319 | |
---|
1320 | err = fman_port_init(port); |
---|
1321 | if (err) |
---|
1322 | pr_err("%s: fm_port_init failed\n", __func__); |
---|
1323 | |
---|
1324 | return err; |
---|
1325 | } |
---|
1326 | |
---|
1327 | static int dpaa_eth_init_ports(struct mac_device *mac_dev, |
---|
1328 | struct dpaa_bp **bps, size_t count, |
---|
1329 | struct fm_port_fqs *port_fqs, |
---|
1330 | struct dpaa_buffer_layout *buf_layout, |
---|
1331 | struct device *dev) |
---|
1332 | { |
---|
1333 | struct fman_port *rxport = mac_dev->port[RX]; |
---|
1334 | struct fman_port *txport = mac_dev->port[TX]; |
---|
1335 | int err; |
---|
1336 | |
---|
1337 | err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, |
---|
1338 | port_fqs->tx_defq, &buf_layout[TX]); |
---|
1339 | if (err) |
---|
1340 | return err; |
---|
1341 | |
---|
1342 | err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, |
---|
1343 | port_fqs->rx_defq, &buf_layout[RX]); |
---|
1344 | |
---|
1345 | return err; |
---|
1346 | } |
---|
1347 | |
---|
1348 | static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, |
---|
1349 | struct bm_buffer *bmb, int cnt) |
---|
1350 | { |
---|
1351 | int err; |
---|
1352 | |
---|
1353 | err = bman_release(dpaa_bp->pool, bmb, cnt); |
---|
1354 | /* Should never occur, address anyway to avoid leaking the buffers */ |
---|
1355 | if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) |
---|
1356 | while (cnt-- > 0) |
---|
1357 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); |
---|
1358 | |
---|
1359 | return cnt; |
---|
1360 | } |
---|
1361 | |
---|
1362 | static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) |
---|
1363 | { |
---|
1364 | struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; |
---|
1365 | struct dpaa_bp *dpaa_bp; |
---|
1366 | int i = 0, j; |
---|
1367 | |
---|
1368 | memset(bmb, 0, sizeof(bmb)); |
---|
1369 | |
---|
1370 | do { |
---|
1371 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
---|
1372 | if (!dpaa_bp) |
---|
1373 | return; |
---|
1374 | |
---|
1375 | j = 0; |
---|
1376 | do { |
---|
1377 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); |
---|
1378 | |
---|
1379 | bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); |
---|
1380 | |
---|
1381 | j++; i++; |
---|
1382 | } while (j < ARRAY_SIZE(bmb) && |
---|
1383 | !qm_sg_entry_is_final(&sgt[i - 1]) && |
---|
1384 | sgt[i - 1].bpid == sgt[i].bpid); |
---|
1385 | |
---|
1386 | dpaa_bman_release(dpaa_bp, bmb, j); |
---|
1387 | } while (!qm_sg_entry_is_final(&sgt[i - 1])); |
---|
1388 | } |
---|
1389 | |
---|
1390 | static void dpaa_fd_release(const struct net_device *net_dev, |
---|
1391 | const struct qm_fd *fd) |
---|
1392 | { |
---|
1393 | struct qm_sg_entry *sgt; |
---|
1394 | struct dpaa_bp *dpaa_bp; |
---|
1395 | struct bm_buffer bmb; |
---|
1396 | dma_addr_t addr; |
---|
1397 | void *vaddr; |
---|
1398 | |
---|
1399 | bmb.data = 0; |
---|
1400 | bm_buffer_set64(&bmb, qm_fd_addr(fd)); |
---|
1401 | |
---|
1402 | dpaa_bp = dpaa_bpid2pool(fd->bpid); |
---|
1403 | if (!dpaa_bp) |
---|
1404 | return; |
---|
1405 | |
---|
1406 | if (qm_fd_get_format(fd) == qm_fd_sg) { |
---|
1407 | vaddr = phys_to_virt(qm_fd_addr(fd)); |
---|
1408 | sgt = vaddr + qm_fd_get_offset(fd); |
---|
1409 | |
---|
1410 | #ifndef __rtems__ |
---|
1411 | dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, |
---|
1412 | DMA_FROM_DEVICE); |
---|
1413 | #endif /* __rtems__ */ |
---|
1414 | |
---|
1415 | dpaa_release_sgt_members(sgt); |
---|
1416 | |
---|
1417 | #ifndef __rtems__ |
---|
1418 | addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, |
---|
1419 | DMA_FROM_DEVICE); |
---|
1420 | if (dma_mapping_error(dpaa_bp->dev, addr)) { |
---|
1421 | dev_err(dpaa_bp->dev, "DMA mapping failed"); |
---|
1422 | return; |
---|
1423 | } |
---|
1424 | #else /* __rtems__ */ |
---|
1425 | addr = (dma_addr_t)vaddr; |
---|
1426 | #endif /* __rtems__ */ |
---|
1427 | bm_buffer_set64(&bmb, addr); |
---|
1428 | } |
---|
1429 | |
---|
1430 | dpaa_bman_release(dpaa_bp, &bmb, 1); |
---|
1431 | } |
---|
1432 | |
---|
1433 | static void count_ern(struct dpaa_percpu_priv *percpu_priv, |
---|
1434 | const union qm_mr_entry *msg) |
---|
1435 | { |
---|
1436 | switch (msg->ern.rc & QM_MR_RC_MASK) { |
---|
1437 | case QM_MR_RC_CGR_TAILDROP: |
---|
1438 | percpu_priv->ern_cnt.cg_tdrop++; |
---|
1439 | break; |
---|
1440 | case QM_MR_RC_WRED: |
---|
1441 | percpu_priv->ern_cnt.wred++; |
---|
1442 | break; |
---|
1443 | case QM_MR_RC_ERROR: |
---|
1444 | percpu_priv->ern_cnt.err_cond++; |
---|
1445 | break; |
---|
1446 | case QM_MR_RC_ORPWINDOW_EARLY: |
---|
1447 | percpu_priv->ern_cnt.early_window++; |
---|
1448 | break; |
---|
1449 | case QM_MR_RC_ORPWINDOW_LATE: |
---|
1450 | percpu_priv->ern_cnt.late_window++; |
---|
1451 | break; |
---|
1452 | case QM_MR_RC_FQ_TAILDROP: |
---|
1453 | percpu_priv->ern_cnt.fq_tdrop++; |
---|
1454 | break; |
---|
1455 | case QM_MR_RC_ORPWINDOW_RETIRED: |
---|
1456 | percpu_priv->ern_cnt.fq_retired++; |
---|
1457 | break; |
---|
1458 | case QM_MR_RC_ORP_ZERO: |
---|
1459 | percpu_priv->ern_cnt.orp_zero++; |
---|
1460 | break; |
---|
1461 | } |
---|
1462 | } |
---|
1463 | |
---|
1464 | #ifndef __rtems__ |
---|
1465 | /* Turn on HW checksum computation for this outgoing frame. |
---|
1466 | * If the current protocol is not something we support in this regard |
---|
1467 | * (or if the stack has already computed the SW checksum), we do nothing. |
---|
1468 | * |
---|
1469 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value |
---|
1470 | * otherwise. |
---|
1471 | * |
---|
1472 | * Note that this function may modify the fd->cmd field and the skb data buffer |
---|
1473 | * (the Parse Results area). |
---|
1474 | */ |
---|
1475 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, |
---|
1476 | struct sk_buff *skb, |
---|
1477 | struct qm_fd *fd, |
---|
1478 | char *parse_results) |
---|
1479 | { |
---|
1480 | struct fman_prs_result *parse_result; |
---|
1481 | u16 ethertype = ntohs(skb->protocol); |
---|
1482 | struct ipv6hdr *ipv6h = NULL; |
---|
1483 | struct iphdr *iph; |
---|
1484 | int retval = 0; |
---|
1485 | u8 l4_proto; |
---|
1486 | |
---|
1487 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
1488 | return 0; |
---|
1489 | |
---|
1490 | /* Note: L3 csum seems to be already computed in sw, but we can't choose |
---|
1491 | * L4 alone from the FM configuration anyway. |
---|
1492 | */ |
---|
1493 | |
---|
1494 | /* Fill in some fields of the Parse Results array, so the FMan |
---|
1495 | * can find them as if they came from the FMan Parser. |
---|
1496 | */ |
---|
1497 | parse_result = (struct fman_prs_result *)parse_results; |
---|
1498 | |
---|
1499 | /* If we're dealing with VLAN, get the real Ethernet type */ |
---|
1500 | if (ethertype == ETH_P_8021Q) { |
---|
1501 | /* We can't always assume the MAC header is set correctly |
---|
1502 | * by the stack, so reset to beginning of skb->data |
---|
1503 | */ |
---|
1504 | skb_reset_mac_header(skb); |
---|
1505 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); |
---|
1506 | } |
---|
1507 | |
---|
1508 | /* Fill in the relevant L3 parse result fields |
---|
1509 | * and read the L4 protocol type |
---|
1510 | */ |
---|
1511 | switch (ethertype) { |
---|
1512 | case ETH_P_IP: |
---|
1513 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); |
---|
1514 | iph = ip_hdr(skb); |
---|
1515 | WARN_ON(!iph); |
---|
1516 | l4_proto = iph->protocol; |
---|
1517 | break; |
---|
1518 | case ETH_P_IPV6: |
---|
1519 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); |
---|
1520 | ipv6h = ipv6_hdr(skb); |
---|
1521 | WARN_ON(!ipv6h); |
---|
1522 | l4_proto = ipv6h->nexthdr; |
---|
1523 | break; |
---|
1524 | default: |
---|
1525 | /* We shouldn't even be here */ |
---|
1526 | if (net_ratelimit()) |
---|
1527 | netif_alert(priv, tx_err, priv->net_dev, |
---|
1528 | "Can't compute HW csum for L3 proto 0x%x\n", |
---|
1529 | ntohs(skb->protocol)); |
---|
1530 | retval = -EIO; |
---|
1531 | goto return_error; |
---|
1532 | } |
---|
1533 | |
---|
1534 | /* Fill in the relevant L4 parse result fields */ |
---|
1535 | switch (l4_proto) { |
---|
1536 | case IPPROTO_UDP: |
---|
1537 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; |
---|
1538 | break; |
---|
1539 | case IPPROTO_TCP: |
---|
1540 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; |
---|
1541 | break; |
---|
1542 | default: |
---|
1543 | if (net_ratelimit()) |
---|
1544 | netif_alert(priv, tx_err, priv->net_dev, |
---|
1545 | "Can't compute HW csum for L4 proto 0x%x\n", |
---|
1546 | l4_proto); |
---|
1547 | retval = -EIO; |
---|
1548 | goto return_error; |
---|
1549 | } |
---|
1550 | |
---|
1551 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ |
---|
1552 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); |
---|
1553 | parse_result->l4_off = (u8)skb_transport_offset(skb); |
---|
1554 | |
---|
1555 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ |
---|
1556 | fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); |
---|
1557 | |
---|
1558 | /* On P1023 and similar platforms fd->cmd interpretation could |
---|
1559 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit |
---|
1560 | * is not set so we do not need to check; in the future, if/when |
---|
1561 | * using context_a we need to check this bit |
---|
1562 | */ |
---|
1563 | |
---|
1564 | return_error: |
---|
1565 | return retval; |
---|
1566 | } |
---|
1567 | #endif /* __rtems__ */ |
---|
1568 | |
---|
1569 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) |
---|
1570 | { |
---|
1571 | #ifndef __rtems__ |
---|
1572 | struct device *dev = dpaa_bp->dev; |
---|
1573 | #endif /* __rtems__ */ |
---|
1574 | struct bm_buffer bmb[8]; |
---|
1575 | dma_addr_t addr; |
---|
1576 | #ifndef __rtems__ |
---|
1577 | void *new_buf; |
---|
1578 | #endif /* __rtems__ */ |
---|
1579 | u8 i; |
---|
1580 | |
---|
1581 | for (i = 0; i < 8; i++) { |
---|
1582 | #ifndef __rtems__ |
---|
1583 | new_buf = netdev_alloc_frag(dpaa_bp->raw_size); |
---|
1584 | if (unlikely(!new_buf)) { |
---|
1585 | dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", |
---|
1586 | dpaa_bp->raw_size); |
---|
1587 | goto release_previous_buffs; |
---|
1588 | } |
---|
1589 | new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); |
---|
1590 | |
---|
1591 | addr = dma_map_single(dev, new_buf, |
---|
1592 | dpaa_bp->size, DMA_FROM_DEVICE); |
---|
1593 | if (unlikely(dma_mapping_error(dev, addr))) { |
---|
1594 | dev_err(dpaa_bp->dev, "DMA map failed"); |
---|
1595 | goto release_previous_buffs; |
---|
1596 | } |
---|
1597 | #else /* __rtems__ */ |
---|
1598 | struct mbuf *m; |
---|
1599 | |
---|
1600 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
---|
1601 | if (unlikely(m == NULL)) { |
---|
1602 | goto release_previous_buffs; |
---|
1603 | } |
---|
1604 | |
---|
1605 | RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES, |
---|
1606 | DPAA_BP_RAW_SIZE); |
---|
1607 | *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) = |
---|
1608 | m; |
---|
1609 | addr = mtod(m, dma_addr_t); |
---|
1610 | #endif /* __rtems__ */ |
---|
1611 | |
---|
1612 | bmb[i].data = 0; |
---|
1613 | bm_buffer_set64(&bmb[i], addr); |
---|
1614 | } |
---|
1615 | |
---|
1616 | release_bufs: |
---|
1617 | return dpaa_bman_release(dpaa_bp, bmb, i); |
---|
1618 | |
---|
1619 | release_previous_buffs: |
---|
1620 | #ifndef __rtems__ |
---|
1621 | WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); |
---|
1622 | #endif /* __rtems__ */ |
---|
1623 | |
---|
1624 | bm_buffer_set64(&bmb[i], 0); |
---|
1625 | /* Avoid releasing a completely null buffer; bman_release() requires |
---|
1626 | * at least one buffer. |
---|
1627 | */ |
---|
1628 | if (likely(i)) |
---|
1629 | goto release_bufs; |
---|
1630 | |
---|
1631 | return 0; |
---|
1632 | } |
---|
1633 | #ifdef __rtems__ |
---|
1634 | void |
---|
1635 | dpaa_recycle_mcluster(struct dpaa_priv *dpaa_priv, |
---|
1636 | dpaa_buffer_recycle_context *rc, struct mbuf *m) |
---|
1637 | { |
---|
1638 | size_t i; |
---|
1639 | dma_addr_t addr; |
---|
1640 | |
---|
1641 | i = rc->count; |
---|
1642 | m->m_data = m->m_ext.ext_buf; |
---|
1643 | *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) = m; |
---|
1644 | addr = mtod(m, dma_addr_t); |
---|
1645 | rc->bmb[i].data = 0; |
---|
1646 | bm_buffer_set64(&rc->bmb[i], addr); |
---|
1647 | |
---|
1648 | if (i < ARRAY_SIZE(rc->bmb) - 1) { |
---|
1649 | rc->count = i + 1; |
---|
1650 | } else { |
---|
1651 | struct dpaa_bp *dpaa_bp; |
---|
1652 | int *countptr; |
---|
1653 | |
---|
1654 | rc->count = 0; |
---|
1655 | dpaa_bp = dpaa_priv->dpaa_bps[0]; |
---|
1656 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
1657 | *countptr += dpaa_bman_release(dpaa_bp, rc->bmb, |
---|
1658 | ARRAY_SIZE(rc->bmb)); |
---|
1659 | } |
---|
1660 | } |
---|
1661 | #endif /* __rtems__ */ |
---|
1662 | |
---|
1663 | static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) |
---|
1664 | { |
---|
1665 | int i; |
---|
1666 | |
---|
1667 | /* Give each CPU an allotment of "config_count" buffers */ |
---|
1668 | for_each_possible_cpu(i) { |
---|
1669 | int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); |
---|
1670 | int j; |
---|
1671 | |
---|
1672 | /* Although we access another CPU's counters here |
---|
1673 | * we do it at boot time so it is safe |
---|
1674 | */ |
---|
1675 | for (j = 0; j < dpaa_bp->config_count; j += 8) |
---|
1676 | *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); |
---|
1677 | } |
---|
1678 | return 0; |
---|
1679 | } |
---|
1680 | |
---|
1681 | /* Add buffers/(pages) for Rx processing whenever bpool count falls below |
---|
1682 | * REFILL_THRESHOLD. |
---|
1683 | */ |
---|
1684 | static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) |
---|
1685 | { |
---|
1686 | int count = *countptr; |
---|
1687 | int new_bufs; |
---|
1688 | |
---|
1689 | if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { |
---|
1690 | do { |
---|
1691 | new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); |
---|
1692 | if (unlikely(!new_bufs)) { |
---|
1693 | /* Avoid looping forever if we've temporarily |
---|
1694 | * run out of memory. We'll try again at the |
---|
1695 | * next NAPI cycle. |
---|
1696 | */ |
---|
1697 | break; |
---|
1698 | } |
---|
1699 | count += new_bufs; |
---|
1700 | } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); |
---|
1701 | |
---|
1702 | *countptr = count; |
---|
1703 | if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) |
---|
1704 | return -ENOMEM; |
---|
1705 | } |
---|
1706 | |
---|
1707 | return 0; |
---|
1708 | } |
---|
1709 | |
---|
1710 | static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) |
---|
1711 | { |
---|
1712 | struct dpaa_bp *dpaa_bp; |
---|
1713 | int *countptr; |
---|
1714 | int res, i; |
---|
1715 | |
---|
1716 | for (i = 0; i < DPAA_BPS_NUM; i++) { |
---|
1717 | dpaa_bp = priv->dpaa_bps[i]; |
---|
1718 | if (!dpaa_bp) |
---|
1719 | return -EINVAL; |
---|
1720 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
1721 | res = dpaa_eth_refill_bpool(dpaa_bp, countptr); |
---|
1722 | if (res) |
---|
1723 | return res; |
---|
1724 | } |
---|
1725 | return 0; |
---|
1726 | } |
---|
1727 | |
---|
1728 | #ifndef __rtems__ |
---|
1729 | /* Cleanup function for outgoing frame descriptors that were built on Tx path, |
---|
1730 | * either contiguous frames or scatter/gather ones. |
---|
1731 | * Skb freeing is not handled here. |
---|
1732 | * |
---|
1733 | * This function may be called on error paths in the Tx function, so guard |
---|
1734 | * against cases when not all fd relevant fields were filled in. |
---|
1735 | * |
---|
1736 | * Return the skb backpointer, since for S/G frames the buffer containing it |
---|
1737 | * gets freed here. |
---|
1738 | */ |
---|
1739 | static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, |
---|
1740 | const struct qm_fd *fd) |
---|
1741 | { |
---|
1742 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; |
---|
1743 | struct device *dev = priv->net_dev->dev.parent; |
---|
1744 | dma_addr_t addr = qm_fd_addr(fd); |
---|
1745 | const struct qm_sg_entry *sgt; |
---|
1746 | struct sk_buff **skbh, *skb; |
---|
1747 | int nr_frags, i; |
---|
1748 | |
---|
1749 | skbh = (struct sk_buff **)phys_to_virt(addr); |
---|
1750 | skb = *skbh; |
---|
1751 | |
---|
1752 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { |
---|
1753 | nr_frags = skb_shinfo(skb)->nr_frags; |
---|
1754 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + |
---|
1755 | sizeof(struct qm_sg_entry) * (1 + nr_frags), |
---|
1756 | dma_dir); |
---|
1757 | |
---|
1758 | /* The sgt buffer has been allocated with netdev_alloc_frag(), |
---|
1759 | * it's from lowmem. |
---|
1760 | */ |
---|
1761 | sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); |
---|
1762 | |
---|
1763 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ |
---|
1764 | dma_unmap_single(dev, qm_sg_addr(&sgt[0]), |
---|
1765 | qm_sg_entry_get_len(&sgt[0]), dma_dir); |
---|
1766 | |
---|
1767 | /* remaining pages were mapped with skb_frag_dma_map() */ |
---|
1768 | for (i = 1; i < nr_frags; i++) { |
---|
1769 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); |
---|
1770 | |
---|
1771 | dma_unmap_page(dev, qm_sg_addr(&sgt[i]), |
---|
1772 | qm_sg_entry_get_len(&sgt[i]), dma_dir); |
---|
1773 | } |
---|
1774 | |
---|
1775 | /* Free the page frag that we allocated on Tx */ |
---|
1776 | skb_free_frag(phys_to_virt(addr)); |
---|
1777 | } else { |
---|
1778 | dma_unmap_single(dev, addr, |
---|
1779 | skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); |
---|
1780 | } |
---|
1781 | |
---|
1782 | return skb; |
---|
1783 | } |
---|
1784 | |
---|
1785 | static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) |
---|
1786 | { |
---|
1787 | /* The parser has run and performed L4 checksum validation. |
---|
1788 | * We know there were no parser errors (and implicitly no |
---|
1789 | * L4 csum error), otherwise we wouldn't be here. |
---|
1790 | */ |
---|
1791 | if ((priv->net_dev->features & NETIF_F_RXCSUM) && |
---|
1792 | (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) |
---|
1793 | return CHECKSUM_UNNECESSARY; |
---|
1794 | |
---|
1795 | /* We're here because either the parser didn't run or the L4 checksum |
---|
1796 | * was not verified. This may include the case of a UDP frame with |
---|
1797 | * checksum zero or an L4 proto other than TCP/UDP |
---|
1798 | */ |
---|
1799 | return CHECKSUM_NONE; |
---|
1800 | } |
---|
1801 | |
---|
1802 | /* Build a linear skb around the received buffer. |
---|
1803 | * We are guaranteed there is enough room at the end of the data buffer to |
---|
1804 | * accommodate the shared info area of the skb. |
---|
1805 | */ |
---|
1806 | static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, |
---|
1807 | const struct qm_fd *fd) |
---|
1808 | { |
---|
1809 | ssize_t fd_off = qm_fd_get_offset(fd); |
---|
1810 | dma_addr_t addr = qm_fd_addr(fd); |
---|
1811 | struct dpaa_bp *dpaa_bp; |
---|
1812 | struct sk_buff *skb; |
---|
1813 | void *vaddr; |
---|
1814 | |
---|
1815 | vaddr = phys_to_virt(addr); |
---|
1816 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); |
---|
1817 | |
---|
1818 | dpaa_bp = dpaa_bpid2pool(fd->bpid); |
---|
1819 | if (!dpaa_bp) |
---|
1820 | goto free_buffer; |
---|
1821 | |
---|
1822 | skb = build_skb(vaddr, dpaa_bp->size + |
---|
1823 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
---|
1824 | if (unlikely(!skb)) { |
---|
1825 | WARN_ONCE(1, "Build skb failure on Rx\n"); |
---|
1826 | goto free_buffer; |
---|
1827 | } |
---|
1828 | WARN_ON(fd_off != priv->rx_headroom); |
---|
1829 | skb_reserve(skb, fd_off); |
---|
1830 | skb_put(skb, qm_fd_get_length(fd)); |
---|
1831 | |
---|
1832 | skb->ip_summed = rx_csum_offload(priv, fd); |
---|
1833 | |
---|
1834 | return skb; |
---|
1835 | |
---|
1836 | free_buffer: |
---|
1837 | skb_free_frag(vaddr); |
---|
1838 | return NULL; |
---|
1839 | } |
---|
1840 | |
---|
1841 | /* Build an skb with the data of the first S/G entry in the linear portion and |
---|
1842 | * the rest of the frame as skb fragments. |
---|
1843 | * |
---|
1844 | * The page fragment holding the S/G Table is recycled here. |
---|
1845 | */ |
---|
1846 | static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, |
---|
1847 | const struct qm_fd *fd) |
---|
1848 | { |
---|
1849 | ssize_t fd_off = qm_fd_get_offset(fd); |
---|
1850 | dma_addr_t addr = qm_fd_addr(fd); |
---|
1851 | const struct qm_sg_entry *sgt; |
---|
1852 | struct page *page, *head_page; |
---|
1853 | struct dpaa_bp *dpaa_bp; |
---|
1854 | void *vaddr, *sg_vaddr; |
---|
1855 | int frag_off, frag_len; |
---|
1856 | struct sk_buff *skb; |
---|
1857 | dma_addr_t sg_addr; |
---|
1858 | int page_offset; |
---|
1859 | unsigned int sz; |
---|
1860 | int *count_ptr; |
---|
1861 | int i; |
---|
1862 | |
---|
1863 | vaddr = phys_to_virt(addr); |
---|
1864 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); |
---|
1865 | |
---|
1866 | /* Iterate through the SGT entries and add data buffers to the skb */ |
---|
1867 | sgt = vaddr + fd_off; |
---|
1868 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { |
---|
1869 | /* Extension bit is not supported */ |
---|
1870 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); |
---|
1871 | |
---|
1872 | sg_addr = qm_sg_addr(&sgt[i]); |
---|
1873 | sg_vaddr = phys_to_virt(sg_addr); |
---|
1874 | WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, |
---|
1875 | SMP_CACHE_BYTES)); |
---|
1876 | |
---|
1877 | /* We may use multiple Rx pools */ |
---|
1878 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
---|
1879 | if (!dpaa_bp) |
---|
1880 | goto free_buffers; |
---|
1881 | |
---|
1882 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
1883 | dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, |
---|
1884 | DMA_FROM_DEVICE); |
---|
1885 | if (i == 0) { |
---|
1886 | sz = dpaa_bp->size + |
---|
1887 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
1888 | skb = build_skb(sg_vaddr, sz); |
---|
1889 | if (WARN_ON(unlikely(!skb))) |
---|
1890 | goto free_buffers; |
---|
1891 | |
---|
1892 | skb->ip_summed = rx_csum_offload(priv, fd); |
---|
1893 | |
---|
1894 | /* Make sure forwarded skbs will have enough space |
---|
1895 | * on Tx, if extra headers are added. |
---|
1896 | */ |
---|
1897 | WARN_ON(fd_off != priv->rx_headroom); |
---|
1898 | skb_reserve(skb, fd_off); |
---|
1899 | skb_put(skb, qm_sg_entry_get_len(&sgt[i])); |
---|
1900 | } else { |
---|
1901 | /* Not the first S/G entry; all data from buffer will |
---|
1902 | * be added in an skb fragment; fragment index is offset |
---|
1903 | * by one since first S/G entry was incorporated in the |
---|
1904 | * linear part of the skb. |
---|
1905 | * |
---|
1906 | * Caution: 'page' may be a tail page. |
---|
1907 | */ |
---|
1908 | page = virt_to_page(sg_vaddr); |
---|
1909 | head_page = virt_to_head_page(sg_vaddr); |
---|
1910 | |
---|
1911 | /* Compute offset in (possibly tail) page */ |
---|
1912 | page_offset = ((unsigned long)sg_vaddr & |
---|
1913 | (PAGE_SIZE - 1)) + |
---|
1914 | (page_address(page) - page_address(head_page)); |
---|
1915 | /* page_offset only refers to the beginning of sgt[i]; |
---|
1916 | * but the buffer itself may have an internal offset. |
---|
1917 | */ |
---|
1918 | frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; |
---|
1919 | frag_len = qm_sg_entry_get_len(&sgt[i]); |
---|
1920 | /* skb_add_rx_frag() does no checking on the page; if |
---|
1921 | * we pass it a tail page, we'll end up with |
---|
1922 | * bad page accounting and eventually with segafults. |
---|
1923 | */ |
---|
1924 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, |
---|
1925 | frag_len, dpaa_bp->size); |
---|
1926 | } |
---|
1927 | /* Update the pool count for the current {cpu x bpool} */ |
---|
1928 | (*count_ptr)--; |
---|
1929 | |
---|
1930 | if (qm_sg_entry_is_final(&sgt[i])) |
---|
1931 | break; |
---|
1932 | } |
---|
1933 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); |
---|
1934 | |
---|
1935 | /* free the SG table buffer */ |
---|
1936 | skb_free_frag(vaddr); |
---|
1937 | |
---|
1938 | return skb; |
---|
1939 | |
---|
1940 | free_buffers: |
---|
1941 | /* compensate sw bpool counter changes */ |
---|
1942 | for (i--; i >= 0; i--) { |
---|
1943 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
---|
1944 | if (dpaa_bp) { |
---|
1945 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
1946 | (*count_ptr)++; |
---|
1947 | } |
---|
1948 | } |
---|
1949 | /* free all the SG entries */ |
---|
1950 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { |
---|
1951 | sg_addr = qm_sg_addr(&sgt[i]); |
---|
1952 | sg_vaddr = phys_to_virt(sg_addr); |
---|
1953 | skb_free_frag(sg_vaddr); |
---|
1954 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
---|
1955 | if (dpaa_bp) { |
---|
1956 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
1957 | (*count_ptr)--; |
---|
1958 | } |
---|
1959 | |
---|
1960 | if (qm_sg_entry_is_final(&sgt[i])) |
---|
1961 | break; |
---|
1962 | } |
---|
1963 | /* free the SGT fragment */ |
---|
1964 | skb_free_frag(vaddr); |
---|
1965 | |
---|
1966 | return NULL; |
---|
1967 | } |
---|
1968 | |
---|
1969 | static int skb_to_contig_fd(struct dpaa_priv *priv, |
---|
1970 | struct sk_buff *skb, struct qm_fd *fd, |
---|
1971 | int *offset) |
---|
1972 | { |
---|
1973 | struct net_device *net_dev = priv->net_dev; |
---|
1974 | struct device *dev = net_dev->dev.parent; |
---|
1975 | enum dma_data_direction dma_dir; |
---|
1976 | unsigned char *buffer_start; |
---|
1977 | struct sk_buff **skbh; |
---|
1978 | dma_addr_t addr; |
---|
1979 | int err; |
---|
1980 | |
---|
1981 | /* We are guaranteed to have at least tx_headroom bytes |
---|
1982 | * available, so just use that for offset. |
---|
1983 | */ |
---|
1984 | fd->bpid = FSL_DPAA_BPID_INV; |
---|
1985 | buffer_start = skb->data - priv->tx_headroom; |
---|
1986 | dma_dir = DMA_TO_DEVICE; |
---|
1987 | |
---|
1988 | skbh = (struct sk_buff **)buffer_start; |
---|
1989 | *skbh = skb; |
---|
1990 | |
---|
1991 | /* Enable L3/L4 hardware checksum computation. |
---|
1992 | * |
---|
1993 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may |
---|
1994 | * need to write into the skb. |
---|
1995 | */ |
---|
1996 | err = dpaa_enable_tx_csum(priv, skb, fd, |
---|
1997 | ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); |
---|
1998 | if (unlikely(err < 0)) { |
---|
1999 | if (net_ratelimit()) |
---|
2000 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", |
---|
2001 | err); |
---|
2002 | return err; |
---|
2003 | } |
---|
2004 | |
---|
2005 | /* Fill in the rest of the FD fields */ |
---|
2006 | qm_fd_set_contig(fd, priv->tx_headroom, skb->len); |
---|
2007 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
---|
2008 | |
---|
2009 | /* Map the entire buffer size that may be seen by FMan, but no more */ |
---|
2010 | addr = dma_map_single(dev, skbh, |
---|
2011 | skb_tail_pointer(skb) - buffer_start, dma_dir); |
---|
2012 | if (unlikely(dma_mapping_error(dev, addr))) { |
---|
2013 | if (net_ratelimit()) |
---|
2014 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); |
---|
2015 | return -EINVAL; |
---|
2016 | } |
---|
2017 | qm_fd_addr_set64(fd, addr); |
---|
2018 | |
---|
2019 | return 0; |
---|
2020 | } |
---|
2021 | |
---|
2022 | static int skb_to_sg_fd(struct dpaa_priv *priv, |
---|
2023 | struct sk_buff *skb, struct qm_fd *fd) |
---|
2024 | { |
---|
2025 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; |
---|
2026 | const int nr_frags = skb_shinfo(skb)->nr_frags; |
---|
2027 | struct net_device *net_dev = priv->net_dev; |
---|
2028 | struct device *dev = net_dev->dev.parent; |
---|
2029 | struct qm_sg_entry *sgt; |
---|
2030 | struct sk_buff **skbh; |
---|
2031 | int i, j, err, sz; |
---|
2032 | void *buffer_start; |
---|
2033 | skb_frag_t *frag; |
---|
2034 | dma_addr_t addr; |
---|
2035 | size_t frag_len; |
---|
2036 | void *sgt_buf; |
---|
2037 | |
---|
2038 | /* get a page frag to store the SGTable */ |
---|
2039 | sz = SKB_DATA_ALIGN(priv->tx_headroom + |
---|
2040 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); |
---|
2041 | sgt_buf = netdev_alloc_frag(sz); |
---|
2042 | if (unlikely(!sgt_buf)) { |
---|
2043 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", |
---|
2044 | sz); |
---|
2045 | return -ENOMEM; |
---|
2046 | } |
---|
2047 | |
---|
2048 | /* Enable L3/L4 hardware checksum computation. |
---|
2049 | * |
---|
2050 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may |
---|
2051 | * need to write into the skb. |
---|
2052 | */ |
---|
2053 | err = dpaa_enable_tx_csum(priv, skb, fd, |
---|
2054 | sgt_buf + DPAA_TX_PRIV_DATA_SIZE); |
---|
2055 | if (unlikely(err < 0)) { |
---|
2056 | if (net_ratelimit()) |
---|
2057 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", |
---|
2058 | err); |
---|
2059 | goto csum_failed; |
---|
2060 | } |
---|
2061 | |
---|
2062 | sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); |
---|
2063 | qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); |
---|
2064 | sgt[0].bpid = FSL_DPAA_BPID_INV; |
---|
2065 | sgt[0].offset = 0; |
---|
2066 | addr = dma_map_single(dev, skb->data, |
---|
2067 | skb_headlen(skb), dma_dir); |
---|
2068 | if (unlikely(dma_mapping_error(dev, addr))) { |
---|
2069 | dev_err(dev, "DMA mapping failed"); |
---|
2070 | err = -EINVAL; |
---|
2071 | goto sg0_map_failed; |
---|
2072 | } |
---|
2073 | qm_sg_entry_set64(&sgt[0], addr); |
---|
2074 | |
---|
2075 | /* populate the rest of SGT entries */ |
---|
2076 | frag = &skb_shinfo(skb)->frags[0]; |
---|
2077 | frag_len = frag->size; |
---|
2078 | for (i = 1; i <= nr_frags; i++, frag++) { |
---|
2079 | WARN_ON(!skb_frag_page(frag)); |
---|
2080 | addr = skb_frag_dma_map(dev, frag, 0, |
---|
2081 | frag_len, dma_dir); |
---|
2082 | if (unlikely(dma_mapping_error(dev, addr))) { |
---|
2083 | dev_err(dev, "DMA mapping failed"); |
---|
2084 | err = -EINVAL; |
---|
2085 | goto sg_map_failed; |
---|
2086 | } |
---|
2087 | |
---|
2088 | qm_sg_entry_set_len(&sgt[i], frag_len); |
---|
2089 | sgt[i].bpid = FSL_DPAA_BPID_INV; |
---|
2090 | sgt[i].offset = 0; |
---|
2091 | |
---|
2092 | /* keep the offset in the address */ |
---|
2093 | qm_sg_entry_set64(&sgt[i], addr); |
---|
2094 | frag_len = frag->size; |
---|
2095 | } |
---|
2096 | qm_sg_entry_set_f(&sgt[i - 1], frag_len); |
---|
2097 | |
---|
2098 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); |
---|
2099 | |
---|
2100 | /* DMA map the SGT page */ |
---|
2101 | buffer_start = (void *)sgt - priv->tx_headroom; |
---|
2102 | skbh = (struct sk_buff **)buffer_start; |
---|
2103 | *skbh = skb; |
---|
2104 | |
---|
2105 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + |
---|
2106 | sizeof(struct qm_sg_entry) * (1 + nr_frags), |
---|
2107 | dma_dir); |
---|
2108 | if (unlikely(dma_mapping_error(dev, addr))) { |
---|
2109 | dev_err(dev, "DMA mapping failed"); |
---|
2110 | err = -EINVAL; |
---|
2111 | goto sgt_map_failed; |
---|
2112 | } |
---|
2113 | |
---|
2114 | fd->bpid = FSL_DPAA_BPID_INV; |
---|
2115 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
---|
2116 | qm_fd_addr_set64(fd, addr); |
---|
2117 | |
---|
2118 | return 0; |
---|
2119 | |
---|
2120 | sgt_map_failed: |
---|
2121 | sg_map_failed: |
---|
2122 | for (j = 0; j < i; j++) |
---|
2123 | dma_unmap_page(dev, qm_sg_addr(&sgt[j]), |
---|
2124 | qm_sg_entry_get_len(&sgt[j]), dma_dir); |
---|
2125 | sg0_map_failed: |
---|
2126 | csum_failed: |
---|
2127 | skb_free_frag(sgt_buf); |
---|
2128 | |
---|
2129 | return err; |
---|
2130 | } |
---|
2131 | |
---|
2132 | static inline int dpaa_xmit(struct dpaa_priv *priv, |
---|
2133 | struct rtnl_link_stats64 *percpu_stats, |
---|
2134 | int queue, |
---|
2135 | struct qm_fd *fd) |
---|
2136 | { |
---|
2137 | struct qman_fq *egress_fq; |
---|
2138 | int err, i; |
---|
2139 | |
---|
2140 | egress_fq = priv->egress_fqs[queue]; |
---|
2141 | if (fd->bpid == FSL_DPAA_BPID_INV) |
---|
2142 | fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); |
---|
2143 | |
---|
2144 | /* Trace this Tx fd */ |
---|
2145 | trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); |
---|
2146 | |
---|
2147 | for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { |
---|
2148 | err = qman_enqueue(egress_fq, fd); |
---|
2149 | if (err != -EBUSY) |
---|
2150 | break; |
---|
2151 | } |
---|
2152 | |
---|
2153 | if (unlikely(err < 0)) { |
---|
2154 | percpu_stats->tx_errors++; |
---|
2155 | percpu_stats->tx_fifo_errors++; |
---|
2156 | return err; |
---|
2157 | } |
---|
2158 | |
---|
2159 | percpu_stats->tx_packets++; |
---|
2160 | percpu_stats->tx_bytes += qm_fd_get_length(fd); |
---|
2161 | |
---|
2162 | return 0; |
---|
2163 | } |
---|
2164 | |
---|
2165 | static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) |
---|
2166 | { |
---|
2167 | const int queue_mapping = skb_get_queue_mapping(skb); |
---|
2168 | bool nonlinear = skb_is_nonlinear(skb); |
---|
2169 | struct rtnl_link_stats64 *percpu_stats; |
---|
2170 | struct dpaa_percpu_priv *percpu_priv; |
---|
2171 | struct dpaa_priv *priv; |
---|
2172 | struct qm_fd fd; |
---|
2173 | int offset = 0; |
---|
2174 | int err = 0; |
---|
2175 | |
---|
2176 | priv = netdev_priv(net_dev); |
---|
2177 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2178 | percpu_stats = &percpu_priv->stats; |
---|
2179 | |
---|
2180 | qm_fd_clear_fd(&fd); |
---|
2181 | |
---|
2182 | if (!nonlinear) { |
---|
2183 | /* We're going to store the skb backpointer at the beginning |
---|
2184 | * of the data buffer, so we need a privately owned skb |
---|
2185 | * |
---|
2186 | * We've made sure skb is not shared in dev->priv_flags, |
---|
2187 | * we need to verify the skb head is not cloned |
---|
2188 | */ |
---|
2189 | if (skb_cow_head(skb, priv->tx_headroom)) |
---|
2190 | goto enomem; |
---|
2191 | |
---|
2192 | WARN_ON(skb_is_nonlinear(skb)); |
---|
2193 | } |
---|
2194 | |
---|
2195 | /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; |
---|
2196 | * make sure we don't feed FMan with more fragments than it supports. |
---|
2197 | */ |
---|
2198 | if (nonlinear && |
---|
2199 | likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { |
---|
2200 | /* Just create a S/G fd based on the skb */ |
---|
2201 | err = skb_to_sg_fd(priv, skb, &fd); |
---|
2202 | percpu_priv->tx_frag_skbuffs++; |
---|
2203 | } else { |
---|
2204 | /* If the egress skb contains more fragments than we support |
---|
2205 | * we have no choice but to linearize it ourselves. |
---|
2206 | */ |
---|
2207 | if (unlikely(nonlinear) && __skb_linearize(skb)) |
---|
2208 | goto enomem; |
---|
2209 | |
---|
2210 | /* Finally, create a contig FD from this skb */ |
---|
2211 | err = skb_to_contig_fd(priv, skb, &fd, &offset); |
---|
2212 | } |
---|
2213 | if (unlikely(err < 0)) |
---|
2214 | goto skb_to_fd_failed; |
---|
2215 | |
---|
2216 | if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) |
---|
2217 | return NETDEV_TX_OK; |
---|
2218 | |
---|
2219 | dpaa_cleanup_tx_fd(priv, &fd); |
---|
2220 | skb_to_fd_failed: |
---|
2221 | enomem: |
---|
2222 | percpu_stats->tx_errors++; |
---|
2223 | dev_kfree_skb(skb); |
---|
2224 | return NETDEV_TX_OK; |
---|
2225 | } |
---|
2226 | #endif /* __rtems__ */ |
---|
2227 | |
---|
2228 | static void dpaa_rx_error(struct net_device *net_dev, |
---|
2229 | const struct dpaa_priv *priv, |
---|
2230 | struct dpaa_percpu_priv *percpu_priv, |
---|
2231 | const struct qm_fd *fd, |
---|
2232 | u32 fqid) |
---|
2233 | { |
---|
2234 | #ifndef __rtems__ |
---|
2235 | if (net_ratelimit()) |
---|
2236 | netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", |
---|
2237 | be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); |
---|
2238 | |
---|
2239 | percpu_priv->stats.rx_errors++; |
---|
2240 | #endif /* __rtems__ */ |
---|
2241 | |
---|
2242 | if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) |
---|
2243 | percpu_priv->rx_errors.dme++; |
---|
2244 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) |
---|
2245 | percpu_priv->rx_errors.fpe++; |
---|
2246 | if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) |
---|
2247 | percpu_priv->rx_errors.fse++; |
---|
2248 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) |
---|
2249 | percpu_priv->rx_errors.phe++; |
---|
2250 | |
---|
2251 | dpaa_fd_release(net_dev, fd); |
---|
2252 | } |
---|
2253 | |
---|
2254 | static void dpaa_tx_error(struct net_device *net_dev, |
---|
2255 | const struct dpaa_priv *priv, |
---|
2256 | struct dpaa_percpu_priv *percpu_priv, |
---|
2257 | const struct qm_fd *fd, |
---|
2258 | u32 fqid) |
---|
2259 | { |
---|
2260 | #ifndef __rtems__ |
---|
2261 | struct sk_buff *skb; |
---|
2262 | |
---|
2263 | if (net_ratelimit()) |
---|
2264 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", |
---|
2265 | be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); |
---|
2266 | |
---|
2267 | percpu_priv->stats.tx_errors++; |
---|
2268 | #else /* __rtems__ */ |
---|
2269 | struct ifnet *ifp = net_dev->ifp; |
---|
2270 | |
---|
2271 | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); |
---|
2272 | #endif /* __rtems__ */ |
---|
2273 | |
---|
2274 | #ifndef __rtems__ |
---|
2275 | skb = dpaa_cleanup_tx_fd(priv, fd); |
---|
2276 | dev_kfree_skb(skb); |
---|
2277 | #else /* __rtems__ */ |
---|
2278 | dpaa_cleanup_tx_fd(ifp, fd); |
---|
2279 | #endif /* __rtems__ */ |
---|
2280 | } |
---|
2281 | |
---|
2282 | #ifndef __rtems__ |
---|
2283 | static int dpaa_eth_poll(struct napi_struct *napi, int budget) |
---|
2284 | { |
---|
2285 | struct dpaa_napi_portal *np = |
---|
2286 | container_of(napi, struct dpaa_napi_portal, napi); |
---|
2287 | |
---|
2288 | int cleaned = qman_p_poll_dqrr(np->p, budget); |
---|
2289 | |
---|
2290 | if (cleaned < budget) { |
---|
2291 | napi_complete_done(napi, cleaned); |
---|
2292 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
---|
2293 | |
---|
2294 | } else if (np->down) { |
---|
2295 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
---|
2296 | } |
---|
2297 | |
---|
2298 | return cleaned; |
---|
2299 | } |
---|
2300 | #endif /* __rtems__ */ |
---|
2301 | |
---|
2302 | static void dpaa_tx_conf(struct net_device *net_dev, |
---|
2303 | const struct dpaa_priv *priv, |
---|
2304 | struct dpaa_percpu_priv *percpu_priv, |
---|
2305 | const struct qm_fd *fd, |
---|
2306 | u32 fqid) |
---|
2307 | { |
---|
2308 | #ifndef __rtems__ |
---|
2309 | struct sk_buff *skb; |
---|
2310 | |
---|
2311 | if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { |
---|
2312 | if (net_ratelimit()) |
---|
2313 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", |
---|
2314 | be32_to_cpu(fd->status) & |
---|
2315 | FM_FD_STAT_TX_ERRORS); |
---|
2316 | |
---|
2317 | percpu_priv->stats.tx_errors++; |
---|
2318 | } |
---|
2319 | |
---|
2320 | percpu_priv->tx_confirm++; |
---|
2321 | |
---|
2322 | skb = dpaa_cleanup_tx_fd(priv, fd); |
---|
2323 | |
---|
2324 | consume_skb(skb); |
---|
2325 | #else /* __rtems__ */ |
---|
2326 | struct ifnet *ifp = net_dev->ifp; |
---|
2327 | |
---|
2328 | if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS) != 0) { |
---|
2329 | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); |
---|
2330 | } |
---|
2331 | |
---|
2332 | dpaa_cleanup_tx_fd(ifp, fd); |
---|
2333 | #endif /* __rtems__ */ |
---|
2334 | } |
---|
2335 | |
---|
2336 | static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, |
---|
2337 | struct qman_portal *portal) |
---|
2338 | { |
---|
2339 | #ifndef __rtems__ |
---|
2340 | if (unlikely(in_irq() || !in_serving_softirq())) { |
---|
2341 | /* Disable QMan IRQ and invoke NAPI */ |
---|
2342 | qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); |
---|
2343 | |
---|
2344 | percpu_priv->np.p = portal; |
---|
2345 | napi_schedule(&percpu_priv->np.napi); |
---|
2346 | percpu_priv->in_interrupt++; |
---|
2347 | return 1; |
---|
2348 | } |
---|
2349 | #endif /* __rtems__ */ |
---|
2350 | return 0; |
---|
2351 | } |
---|
2352 | |
---|
2353 | static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, |
---|
2354 | struct qman_fq *fq, |
---|
2355 | const struct qm_dqrr_entry *dq) |
---|
2356 | { |
---|
2357 | struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); |
---|
2358 | struct dpaa_percpu_priv *percpu_priv; |
---|
2359 | struct net_device *net_dev; |
---|
2360 | struct dpaa_bp *dpaa_bp; |
---|
2361 | struct dpaa_priv *priv; |
---|
2362 | |
---|
2363 | net_dev = dpaa_fq->net_dev; |
---|
2364 | priv = netdev_priv(net_dev); |
---|
2365 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); |
---|
2366 | if (!dpaa_bp) |
---|
2367 | return qman_cb_dqrr_consume; |
---|
2368 | |
---|
2369 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2370 | |
---|
2371 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) |
---|
2372 | return qman_cb_dqrr_stop; |
---|
2373 | |
---|
2374 | if (dpaa_eth_refill_bpools(priv)) |
---|
2375 | /* Unable to refill the buffer pool due to insufficient |
---|
2376 | * system memory. Just release the frame back into the pool, |
---|
2377 | * otherwise we'll soon end up with an empty buffer pool. |
---|
2378 | */ |
---|
2379 | dpaa_fd_release(net_dev, &dq->fd); |
---|
2380 | else |
---|
2381 | dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); |
---|
2382 | |
---|
2383 | return qman_cb_dqrr_consume; |
---|
2384 | } |
---|
2385 | |
---|
2386 | #ifdef __rtems__ |
---|
2387 | static struct mbuf * |
---|
2388 | dpaa_bp_addr_to_mbuf(dma_addr_t addr) |
---|
2389 | { |
---|
2390 | void *vaddr = phys_to_virt(addr); |
---|
2391 | |
---|
2392 | return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET)); |
---|
2393 | } |
---|
2394 | |
---|
2395 | static struct mbuf * |
---|
2396 | contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp) |
---|
2397 | { |
---|
2398 | struct mbuf *m; |
---|
2399 | ssize_t fd_off = qm_fd_get_offset(fd); |
---|
2400 | dma_addr_t addr = qm_fd_addr(fd); |
---|
2401 | |
---|
2402 | m = dpaa_bp_addr_to_mbuf(addr); |
---|
2403 | m->m_pkthdr.rcvif = ifp; |
---|
2404 | m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd); |
---|
2405 | m->m_data = mtod(m, char *) + fd_off; |
---|
2406 | |
---|
2407 | return (m); |
---|
2408 | } |
---|
2409 | |
---|
2410 | static void |
---|
2411 | dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr) |
---|
2412 | { |
---|
2413 | struct bm_buffer bmb; |
---|
2414 | |
---|
2415 | bm_buffer_set64(&bmb, addr); |
---|
2416 | |
---|
2417 | while (bman_release(dpaa_bp->pool, &bmb, 1)) |
---|
2418 | cpu_relax(); |
---|
2419 | |
---|
2420 | ++(*count_ptr); |
---|
2421 | } |
---|
2422 | |
---|
2423 | static struct mbuf * |
---|
2424 | sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd, |
---|
2425 | struct ifnet *ifp, int *count_ptr) |
---|
2426 | { |
---|
2427 | ssize_t fd_off = qm_fd_get_offset(fd); |
---|
2428 | dma_addr_t addr = qm_fd_addr(fd); |
---|
2429 | const struct qm_sg_entry *sgt; |
---|
2430 | int i; |
---|
2431 | int len; |
---|
2432 | struct mbuf *m; |
---|
2433 | struct mbuf *last; |
---|
2434 | |
---|
2435 | sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off); |
---|
2436 | len = 0; |
---|
2437 | |
---|
2438 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) { |
---|
2439 | dma_addr_t sg_addr; |
---|
2440 | int sg_len; |
---|
2441 | struct mbuf *n; |
---|
2442 | |
---|
2443 | BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i])); |
---|
2444 | BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid)); |
---|
2445 | |
---|
2446 | sg_addr = qm_sg_addr(&sgt[i]); |
---|
2447 | n = dpaa_bp_addr_to_mbuf(sg_addr); |
---|
2448 | |
---|
2449 | sg_len = qm_sg_entry_get_len(&sgt[i]); |
---|
2450 | len += sg_len; |
---|
2451 | |
---|
2452 | if (i == 0) { |
---|
2453 | m = n; |
---|
2454 | } else { |
---|
2455 | last->m_next = n; |
---|
2456 | } |
---|
2457 | |
---|
2458 | n->m_len = sg_len; |
---|
2459 | n->m_data = mtod(n, char *) + sgt[i].offset; |
---|
2460 | last = n; |
---|
2461 | |
---|
2462 | --(*count_ptr); |
---|
2463 | |
---|
2464 | if (qm_sg_entry_is_final(&sgt[i])) { |
---|
2465 | break; |
---|
2466 | } |
---|
2467 | } |
---|
2468 | |
---|
2469 | m->m_pkthdr.rcvif = ifp; |
---|
2470 | m->m_pkthdr.len = len; |
---|
2471 | |
---|
2472 | dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr); |
---|
2473 | |
---|
2474 | return (m); |
---|
2475 | } |
---|
2476 | |
---|
2477 | static void |
---|
2478 | dpaa_rx(struct net_device *net_dev, struct qman_portal *portal, |
---|
2479 | const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv, |
---|
2480 | const struct qm_fd *fd, u32 fqid, int *count_ptr) |
---|
2481 | { |
---|
2482 | struct dpaa_bp *dpaa_bp; |
---|
2483 | u32 fd_status; |
---|
2484 | enum qm_fd_format fd_format; |
---|
2485 | struct mbuf *m; |
---|
2486 | struct ifnet *ifp; |
---|
2487 | |
---|
2488 | fd_status = be32_to_cpu(fd->status); |
---|
2489 | ifp = net_dev->ifp; |
---|
2490 | |
---|
2491 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { |
---|
2492 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); |
---|
2493 | dpaa_fd_release(net_dev, fd); |
---|
2494 | return; |
---|
2495 | } |
---|
2496 | |
---|
2497 | dpaa_bp = dpaa_bpid2pool(fd->bpid); |
---|
2498 | fd_format = qm_fd_get_format(fd); |
---|
2499 | |
---|
2500 | if (likely(fd_format == qm_fd_contig)) { |
---|
2501 | m = contig_fd_to_mbuf(fd, ifp); |
---|
2502 | } else { |
---|
2503 | BSD_ASSERT(fd_format == qm_fd_sg); |
---|
2504 | m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr); |
---|
2505 | } |
---|
2506 | |
---|
2507 | if ((be32_to_cpu(fd->status) & FM_FD_STAT_L4CV) != 0) { |
---|
2508 | m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID | |
---|
2509 | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
---|
2510 | m->m_pkthdr.csum_data = 0xffff; |
---|
2511 | } |
---|
2512 | |
---|
2513 | /* Account for either the contig buffer or the SGT buffer (depending on |
---|
2514 | * which case we were in) having been removed from the pool. |
---|
2515 | */ |
---|
2516 | (*count_ptr)--; |
---|
2517 | |
---|
2518 | if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); |
---|
2519 | (*ifp->if_input)(ifp, m); |
---|
2520 | } |
---|
2521 | #endif /* __rtems__ */ |
---|
2522 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, |
---|
2523 | struct qman_fq *fq, |
---|
2524 | const struct qm_dqrr_entry *dq) |
---|
2525 | { |
---|
2526 | #ifndef __rtems__ |
---|
2527 | struct rtnl_link_stats64 *percpu_stats; |
---|
2528 | #endif /* __rtems__ */ |
---|
2529 | struct dpaa_percpu_priv *percpu_priv; |
---|
2530 | #ifndef __rtems__ |
---|
2531 | const struct qm_fd *fd = &dq->fd; |
---|
2532 | dma_addr_t addr = qm_fd_addr(fd); |
---|
2533 | enum qm_fd_format fd_format; |
---|
2534 | #endif /* __rtems__ */ |
---|
2535 | struct net_device *net_dev; |
---|
2536 | #ifndef __rtems__ |
---|
2537 | u32 fd_status; |
---|
2538 | #endif /* __rtems__ */ |
---|
2539 | struct dpaa_bp *dpaa_bp; |
---|
2540 | struct dpaa_priv *priv; |
---|
2541 | #ifndef __rtems__ |
---|
2542 | unsigned int skb_len; |
---|
2543 | struct sk_buff *skb; |
---|
2544 | #endif /* __rtems__ */ |
---|
2545 | int *count_ptr; |
---|
2546 | |
---|
2547 | #ifndef __rtems__ |
---|
2548 | fd_status = be32_to_cpu(fd->status); |
---|
2549 | fd_format = qm_fd_get_format(fd); |
---|
2550 | #endif /* __rtems__ */ |
---|
2551 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
---|
2552 | priv = netdev_priv(net_dev); |
---|
2553 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); |
---|
2554 | if (!dpaa_bp) |
---|
2555 | return qman_cb_dqrr_consume; |
---|
2556 | |
---|
2557 | #ifndef __rtems__ |
---|
2558 | /* Trace the Rx fd */ |
---|
2559 | trace_dpaa_rx_fd(net_dev, fq, &dq->fd); |
---|
2560 | #endif /* __rtems__ */ |
---|
2561 | |
---|
2562 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2563 | #ifndef __rtems__ |
---|
2564 | percpu_stats = &percpu_priv->stats; |
---|
2565 | #endif /* __rtems__ */ |
---|
2566 | |
---|
2567 | if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) |
---|
2568 | return qman_cb_dqrr_stop; |
---|
2569 | |
---|
2570 | /* Make sure we didn't run out of buffers */ |
---|
2571 | if (unlikely(dpaa_eth_refill_bpools(priv))) { |
---|
2572 | #ifdef __rtems__ |
---|
2573 | struct ifnet *ifp = net_dev->ifp; |
---|
2574 | if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); |
---|
2575 | #endif /* __rtems__ */ |
---|
2576 | dpaa_fd_release(net_dev, &dq->fd); |
---|
2577 | return qman_cb_dqrr_consume; |
---|
2578 | } |
---|
2579 | |
---|
2580 | #ifndef __rtems__ |
---|
2581 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { |
---|
2582 | if (net_ratelimit()) |
---|
2583 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", |
---|
2584 | fd_status & FM_FD_STAT_RX_ERRORS); |
---|
2585 | |
---|
2586 | percpu_stats->rx_errors++; |
---|
2587 | dpaa_fd_release(net_dev, fd); |
---|
2588 | return qman_cb_dqrr_consume; |
---|
2589 | } |
---|
2590 | |
---|
2591 | dpaa_bp = dpaa_bpid2pool(fd->bpid); |
---|
2592 | if (!dpaa_bp) |
---|
2593 | return qman_cb_dqrr_consume; |
---|
2594 | |
---|
2595 | dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); |
---|
2596 | |
---|
2597 | /* prefetch the first 64 bytes of the frame or the SGT start */ |
---|
2598 | prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); |
---|
2599 | |
---|
2600 | fd_format = qm_fd_get_format(fd); |
---|
2601 | /* The only FD types that we may receive are contig and S/G */ |
---|
2602 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); |
---|
2603 | |
---|
2604 | /* Account for either the contig buffer or the SGT buffer (depending on |
---|
2605 | * which case we were in) having been removed from the pool. |
---|
2606 | */ |
---|
2607 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
2608 | (*count_ptr)--; |
---|
2609 | |
---|
2610 | if (likely(fd_format == qm_fd_contig)) |
---|
2611 | skb = contig_fd_to_skb(priv, fd); |
---|
2612 | dpa_fd_release(net_dev, &dq->fd); |
---|
2613 | else |
---|
2614 | skb = sg_fd_to_skb(priv, fd); |
---|
2615 | if (!skb) |
---|
2616 | return qman_cb_dqrr_consume; |
---|
2617 | |
---|
2618 | skb->protocol = eth_type_trans(skb, net_dev); |
---|
2619 | |
---|
2620 | skb_len = skb->len; |
---|
2621 | |
---|
2622 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) |
---|
2623 | return qman_cb_dqrr_consume; |
---|
2624 | |
---|
2625 | percpu_stats->rx_packets++; |
---|
2626 | percpu_stats->rx_bytes += skb_len; |
---|
2627 | #else /* __rtems__ */ |
---|
2628 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
---|
2629 | dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid, |
---|
2630 | count_ptr); |
---|
2631 | #endif /* __rtems__ */ |
---|
2632 | |
---|
2633 | return qman_cb_dqrr_consume; |
---|
2634 | } |
---|
2635 | |
---|
2636 | static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, |
---|
2637 | struct qman_fq *fq, |
---|
2638 | const struct qm_dqrr_entry *dq) |
---|
2639 | { |
---|
2640 | struct dpaa_percpu_priv *percpu_priv; |
---|
2641 | struct net_device *net_dev; |
---|
2642 | struct dpaa_priv *priv; |
---|
2643 | |
---|
2644 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
---|
2645 | priv = netdev_priv(net_dev); |
---|
2646 | |
---|
2647 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2648 | |
---|
2649 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) |
---|
2650 | return qman_cb_dqrr_stop; |
---|
2651 | |
---|
2652 | dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); |
---|
2653 | |
---|
2654 | return qman_cb_dqrr_consume; |
---|
2655 | } |
---|
2656 | |
---|
2657 | static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, |
---|
2658 | struct qman_fq *fq, |
---|
2659 | const struct qm_dqrr_entry *dq) |
---|
2660 | { |
---|
2661 | struct dpaa_percpu_priv *percpu_priv; |
---|
2662 | struct net_device *net_dev; |
---|
2663 | struct dpaa_priv *priv; |
---|
2664 | |
---|
2665 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
---|
2666 | priv = netdev_priv(net_dev); |
---|
2667 | |
---|
2668 | #ifndef __rtems__ |
---|
2669 | /* Trace the fd */ |
---|
2670 | trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); |
---|
2671 | #endif /* __rtems__ */ |
---|
2672 | |
---|
2673 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2674 | |
---|
2675 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) |
---|
2676 | return qman_cb_dqrr_stop; |
---|
2677 | |
---|
2678 | dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); |
---|
2679 | |
---|
2680 | return qman_cb_dqrr_consume; |
---|
2681 | } |
---|
2682 | |
---|
2683 | static void egress_ern(struct qman_portal *portal, |
---|
2684 | struct qman_fq *fq, |
---|
2685 | const union qm_mr_entry *msg) |
---|
2686 | { |
---|
2687 | const struct qm_fd *fd = &msg->ern.fd; |
---|
2688 | struct dpaa_percpu_priv *percpu_priv; |
---|
2689 | const struct dpaa_priv *priv; |
---|
2690 | struct net_device *net_dev; |
---|
2691 | #ifndef __rtems__ |
---|
2692 | struct sk_buff *skb; |
---|
2693 | #else /* __rtems__ */ |
---|
2694 | struct ifnet *ifp; |
---|
2695 | #endif /* __rtems__ */ |
---|
2696 | |
---|
2697 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
---|
2698 | priv = netdev_priv(net_dev); |
---|
2699 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
---|
2700 | |
---|
2701 | #ifndef __rtems__ |
---|
2702 | percpu_priv->stats.tx_dropped++; |
---|
2703 | percpu_priv->stats.tx_fifo_errors++; |
---|
2704 | #else /* __rtems__ */ |
---|
2705 | ifp = net_dev->ifp; |
---|
2706 | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); |
---|
2707 | #endif /* __rtems__ */ |
---|
2708 | count_ern(percpu_priv, msg); |
---|
2709 | |
---|
2710 | #ifndef __rtems__ |
---|
2711 | skb = dpaa_cleanup_tx_fd(priv, fd); |
---|
2712 | dev_kfree_skb_any(skb); |
---|
2713 | #else /* __rtems__ */ |
---|
2714 | dpaa_cleanup_tx_fd(ifp, fd); |
---|
2715 | #endif /* __rtems__ */ |
---|
2716 | } |
---|
2717 | |
---|
2718 | static const struct dpaa_fq_cbs dpaa_fq_cbs = { |
---|
2719 | .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, |
---|
2720 | .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, |
---|
2721 | .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, |
---|
2722 | .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, |
---|
2723 | .egress_ern = { .cb = { .ern = egress_ern } } |
---|
2724 | }; |
---|
2725 | |
---|
2726 | static void dpaa_eth_napi_enable(struct dpaa_priv *priv) |
---|
2727 | { |
---|
2728 | #ifndef __rtems__ |
---|
2729 | struct dpaa_percpu_priv *percpu_priv; |
---|
2730 | int i; |
---|
2731 | |
---|
2732 | for_each_possible_cpu(i) { |
---|
2733 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
2734 | |
---|
2735 | percpu_priv->np.down = 0; |
---|
2736 | napi_enable(&percpu_priv->np.napi); |
---|
2737 | } |
---|
2738 | #endif /* __rtems__ */ |
---|
2739 | } |
---|
2740 | |
---|
2741 | static void dpaa_eth_napi_disable(struct dpaa_priv *priv) |
---|
2742 | { |
---|
2743 | #ifndef __rtems__ |
---|
2744 | struct dpaa_percpu_priv *percpu_priv; |
---|
2745 | int i; |
---|
2746 | |
---|
2747 | for_each_possible_cpu(i) { |
---|
2748 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
2749 | |
---|
2750 | percpu_priv->np.down = 1; |
---|
2751 | napi_disable(&percpu_priv->np.napi); |
---|
2752 | } |
---|
2753 | #endif /* __rtems__ */ |
---|
2754 | } |
---|
2755 | |
---|
2756 | #ifndef __rtems__ |
---|
2757 | static int dpaa_open(struct net_device *net_dev) |
---|
2758 | #else /* __rtems__ */ |
---|
2759 | int dpa_eth_priv_start(struct net_device *net_dev) |
---|
2760 | #endif /* __rtems__ */ |
---|
2761 | { |
---|
2762 | struct mac_device *mac_dev; |
---|
2763 | struct dpaa_priv *priv; |
---|
2764 | int err, i; |
---|
2765 | |
---|
2766 | priv = netdev_priv(net_dev); |
---|
2767 | mac_dev = priv->mac_dev; |
---|
2768 | dpaa_eth_napi_enable(priv); |
---|
2769 | |
---|
2770 | #ifndef __rtems__ |
---|
2771 | net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); |
---|
2772 | if (!net_dev->phydev) { |
---|
2773 | netif_err(priv, ifup, net_dev, "init_phy() failed\n"); |
---|
2774 | err = -ENODEV; |
---|
2775 | goto phy_init_failed; |
---|
2776 | } |
---|
2777 | #endif /* __rtems__ */ |
---|
2778 | |
---|
2779 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { |
---|
2780 | err = fman_port_enable(mac_dev->port[i]); |
---|
2781 | if (err) |
---|
2782 | goto mac_start_failed; |
---|
2783 | } |
---|
2784 | |
---|
2785 | err = priv->mac_dev->start(mac_dev); |
---|
2786 | if (err < 0) { |
---|
2787 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); |
---|
2788 | goto mac_start_failed; |
---|
2789 | } |
---|
2790 | |
---|
2791 | #ifndef __rtems__ |
---|
2792 | netif_tx_start_all_queues(net_dev); |
---|
2793 | #endif /* __rtems__ */ |
---|
2794 | |
---|
2795 | return 0; |
---|
2796 | |
---|
2797 | mac_start_failed: |
---|
2798 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) |
---|
2799 | fman_port_disable(mac_dev->port[i]); |
---|
2800 | |
---|
2801 | #ifndef __rtems__ |
---|
2802 | phy_init_failed: |
---|
2803 | #endif /* __rtems__ */ |
---|
2804 | dpaa_eth_napi_disable(priv); |
---|
2805 | |
---|
2806 | return err; |
---|
2807 | } |
---|
2808 | |
---|
2809 | #ifndef __rtems__ |
---|
2810 | static int dpaa_eth_stop(struct net_device *net_dev) |
---|
2811 | #else /* __rtems__ */ |
---|
2812 | int dpa_eth_priv_stop(struct net_device *net_dev) |
---|
2813 | #endif /* __rtems__ */ |
---|
2814 | { |
---|
2815 | struct dpaa_priv *priv; |
---|
2816 | int err; |
---|
2817 | |
---|
2818 | err = dpaa_stop(net_dev); |
---|
2819 | |
---|
2820 | priv = netdev_priv(net_dev); |
---|
2821 | dpaa_eth_napi_disable(priv); |
---|
2822 | |
---|
2823 | return err; |
---|
2824 | } |
---|
2825 | |
---|
2826 | #ifndef __rtems__ |
---|
2827 | static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) |
---|
2828 | { |
---|
2829 | if (!net_dev->phydev) |
---|
2830 | return -EINVAL; |
---|
2831 | return phy_mii_ioctl(net_dev->phydev, rq, cmd); |
---|
2832 | } |
---|
2833 | |
---|
2834 | static const struct net_device_ops dpaa_ops = { |
---|
2835 | .ndo_open = dpaa_open, |
---|
2836 | .ndo_start_xmit = dpaa_start_xmit, |
---|
2837 | .ndo_stop = dpaa_eth_stop, |
---|
2838 | .ndo_tx_timeout = dpaa_tx_timeout, |
---|
2839 | .ndo_get_stats64 = dpaa_get_stats64, |
---|
2840 | .ndo_set_mac_address = dpaa_set_mac_address, |
---|
2841 | .ndo_validate_addr = eth_validate_addr, |
---|
2842 | .ndo_set_rx_mode = dpaa_set_rx_mode, |
---|
2843 | .ndo_do_ioctl = dpaa_ioctl, |
---|
2844 | .ndo_setup_tc = dpaa_setup_tc, |
---|
2845 | }; |
---|
2846 | |
---|
2847 | static int dpaa_napi_add(struct net_device *net_dev) |
---|
2848 | { |
---|
2849 | struct dpaa_priv *priv = netdev_priv(net_dev); |
---|
2850 | struct dpaa_percpu_priv *percpu_priv; |
---|
2851 | int cpu; |
---|
2852 | |
---|
2853 | for_each_possible_cpu(cpu) { |
---|
2854 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); |
---|
2855 | |
---|
2856 | netif_napi_add(net_dev, &percpu_priv->np.napi, |
---|
2857 | dpaa_eth_poll, NAPI_POLL_WEIGHT); |
---|
2858 | } |
---|
2859 | |
---|
2860 | return 0; |
---|
2861 | } |
---|
2862 | #endif /* __rtems__ */ |
---|
2863 | |
---|
2864 | static void dpaa_napi_del(struct net_device *net_dev) |
---|
2865 | { |
---|
2866 | #ifndef __rtems__ |
---|
2867 | struct dpaa_priv *priv = netdev_priv(net_dev); |
---|
2868 | struct dpaa_percpu_priv *percpu_priv; |
---|
2869 | int cpu; |
---|
2870 | |
---|
2871 | for_each_possible_cpu(cpu) { |
---|
2872 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); |
---|
2873 | |
---|
2874 | netif_napi_del(&percpu_priv->np.napi); |
---|
2875 | } |
---|
2876 | #endif /* __rtems__ */ |
---|
2877 | } |
---|
2878 | |
---|
2879 | static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, |
---|
2880 | struct bm_buffer *bmb) |
---|
2881 | { |
---|
2882 | dma_addr_t addr = bm_buf_addr(bmb); |
---|
2883 | |
---|
2884 | #ifndef __rtems__ |
---|
2885 | dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); |
---|
2886 | |
---|
2887 | skb_free_frag(phys_to_virt(addr)); |
---|
2888 | #else /* __rtems__ */ |
---|
2889 | BSD_ASSERT(0); |
---|
2890 | m_freem(dpaa_bp_addr_to_mbuf(addr)); |
---|
2891 | #endif /* __rtems__ */ |
---|
2892 | } |
---|
2893 | |
---|
2894 | /* Alloc the dpaa_bp struct and configure default values */ |
---|
2895 | static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) |
---|
2896 | { |
---|
2897 | struct dpaa_bp *dpaa_bp; |
---|
2898 | |
---|
2899 | dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); |
---|
2900 | if (!dpaa_bp) |
---|
2901 | return ERR_PTR(-ENOMEM); |
---|
2902 | |
---|
2903 | dpaa_bp->bpid = FSL_DPAA_BPID_INV; |
---|
2904 | dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); |
---|
2905 | #ifndef __rtems__ |
---|
2906 | dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; |
---|
2907 | #else /* __rtems__ */ |
---|
2908 | dpaa_bp->config_count = 8; |
---|
2909 | #endif /* __rtems__ */ |
---|
2910 | |
---|
2911 | dpaa_bp->seed_cb = dpaa_bp_seed; |
---|
2912 | dpaa_bp->free_buf_cb = dpaa_bp_free_pf; |
---|
2913 | |
---|
2914 | return dpaa_bp; |
---|
2915 | } |
---|
2916 | |
---|
2917 | /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. |
---|
2918 | * We won't be sending congestion notifications to FMan; for now, we just use |
---|
2919 | * this CGR to generate enqueue rejections to FMan in order to drop the frames |
---|
2920 | * before they reach our ingress queues and eat up memory. |
---|
2921 | */ |
---|
2922 | static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) |
---|
2923 | { |
---|
2924 | struct qm_mcc_initcgr initcgr; |
---|
2925 | u32 cs_th; |
---|
2926 | int err; |
---|
2927 | |
---|
2928 | err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); |
---|
2929 | if (err < 0) { |
---|
2930 | if (netif_msg_drv(priv)) |
---|
2931 | pr_err("Error %d allocating CGR ID\n", err); |
---|
2932 | goto out_error; |
---|
2933 | } |
---|
2934 | |
---|
2935 | /* Enable CS TD, but disable Congestion State Change Notifications. */ |
---|
2936 | memset(&initcgr, 0, sizeof(initcgr)); |
---|
2937 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); |
---|
2938 | initcgr.cgr.cscn_en = QM_CGR_EN; |
---|
2939 | cs_th = DPAA_INGRESS_CS_THRESHOLD; |
---|
2940 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); |
---|
2941 | |
---|
2942 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
---|
2943 | initcgr.cgr.cstd_en = QM_CGR_EN; |
---|
2944 | |
---|
2945 | /* This CGR will be associated with the SWP affined to the current CPU. |
---|
2946 | * However, we'll place all our ingress FQs in it. |
---|
2947 | */ |
---|
2948 | err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, |
---|
2949 | &initcgr); |
---|
2950 | if (err < 0) { |
---|
2951 | if (netif_msg_drv(priv)) |
---|
2952 | pr_err("Error %d creating ingress CGR with ID %d\n", |
---|
2953 | err, priv->ingress_cgr.cgrid); |
---|
2954 | qman_release_cgrid(priv->ingress_cgr.cgrid); |
---|
2955 | goto out_error; |
---|
2956 | } |
---|
2957 | if (netif_msg_drv(priv)) |
---|
2958 | pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", |
---|
2959 | priv->ingress_cgr.cgrid, priv->mac_dev->addr); |
---|
2960 | |
---|
2961 | priv->use_ingress_cgr = true; |
---|
2962 | |
---|
2963 | out_error: |
---|
2964 | return err; |
---|
2965 | } |
---|
2966 | |
---|
2967 | #ifndef __rtems__ |
---|
2968 | static const struct of_device_id dpaa_match[]; |
---|
2969 | #endif /* __rtems__ */ |
---|
2970 | |
---|
2971 | static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) |
---|
2972 | { |
---|
2973 | u16 headroom; |
---|
2974 | |
---|
2975 | /* The frame headroom must accommodate: |
---|
2976 | * - the driver private data area |
---|
2977 | * - parse results, hash results, timestamp if selected |
---|
2978 | * If either hash results or time stamp are selected, both will |
---|
2979 | * be copied to/from the frame headroom, as TS is located between PR and |
---|
2980 | * HR in the IC and IC copy size has a granularity of 16bytes |
---|
2981 | * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) |
---|
2982 | * |
---|
2983 | * Also make sure the headroom is a multiple of data_align bytes |
---|
2984 | */ |
---|
2985 | headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + |
---|
2986 | DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); |
---|
2987 | |
---|
2988 | return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, |
---|
2989 | DPAA_FD_DATA_ALIGNMENT) : |
---|
2990 | headroom; |
---|
2991 | } |
---|
2992 | |
---|
2993 | #ifndef __rtems__ |
---|
2994 | static int dpaa_eth_probe(struct platform_device *pdev) |
---|
2995 | #else /* __rtems__ */ |
---|
2996 | int |
---|
2997 | dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev) |
---|
2998 | #endif /* __rtems__ */ |
---|
2999 | { |
---|
3000 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; |
---|
3001 | struct dpaa_percpu_priv *percpu_priv; |
---|
3002 | struct net_device *net_dev = NULL; |
---|
3003 | struct dpaa_fq *dpaa_fq, *tmp; |
---|
3004 | struct dpaa_priv *priv = NULL; |
---|
3005 | struct fm_port_fqs port_fqs; |
---|
3006 | #ifndef __rtems__ |
---|
3007 | struct mac_device *mac_dev; |
---|
3008 | #endif /* __rtems__ */ |
---|
3009 | int err = 0, i, channel; |
---|
3010 | struct device *dev; |
---|
3011 | |
---|
3012 | dev = &pdev->dev; |
---|
3013 | |
---|
3014 | #ifndef __rtems__ |
---|
3015 | /* Allocate this early, so we can store relevant information in |
---|
3016 | * the private area |
---|
3017 | */ |
---|
3018 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); |
---|
3019 | if (!net_dev) { |
---|
3020 | dev_err(dev, "alloc_etherdev_mq() failed\n"); |
---|
3021 | goto alloc_etherdev_mq_failed; |
---|
3022 | } |
---|
3023 | #else /* __rtems__ */ |
---|
3024 | net_dev = &mac_dev->net_dev; |
---|
3025 | net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO); |
---|
3026 | #endif /* __rtems__ */ |
---|
3027 | |
---|
3028 | /* Do this here, so we can be verbose early */ |
---|
3029 | #ifndef __rtems__ |
---|
3030 | SET_NETDEV_DEV(net_dev, dev); |
---|
3031 | #endif /* __rtems__ */ |
---|
3032 | dev_set_drvdata(dev, net_dev); |
---|
3033 | |
---|
3034 | priv = netdev_priv(net_dev); |
---|
3035 | priv->net_dev = net_dev; |
---|
3036 | |
---|
3037 | #ifndef __rtems__ |
---|
3038 | priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); |
---|
3039 | |
---|
3040 | mac_dev = dpaa_mac_dev_get(pdev); |
---|
3041 | if (IS_ERR(mac_dev)) { |
---|
3042 | dev_err(dev, "dpaa_mac_dev_get() failed\n"); |
---|
3043 | err = PTR_ERR(mac_dev); |
---|
3044 | goto mac_probe_failed; |
---|
3045 | } |
---|
3046 | |
---|
3047 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, |
---|
3048 | * we choose conservatively and let the user explicitly set a higher |
---|
3049 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs |
---|
3050 | * in the same LAN. |
---|
3051 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, |
---|
3052 | * start with the maximum allowed. |
---|
3053 | */ |
---|
3054 | net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); |
---|
3055 | |
---|
3056 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", |
---|
3057 | net_dev->mtu); |
---|
3058 | #endif /* __rtems__ */ |
---|
3059 | |
---|
3060 | priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ |
---|
3061 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ |
---|
3062 | |
---|
3063 | #ifndef __rtems__ |
---|
3064 | /* device used for DMA mapping */ |
---|
3065 | set_dma_ops(dev, get_dma_ops(&pdev->dev)); |
---|
3066 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
---|
3067 | if (err) { |
---|
3068 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); |
---|
3069 | goto dev_mask_failed; |
---|
3070 | } |
---|
3071 | #endif /* __rtems__ */ |
---|
3072 | |
---|
3073 | /* bp init */ |
---|
3074 | for (i = 0; i < DPAA_BPS_NUM; i++) { |
---|
3075 | int err; |
---|
3076 | |
---|
3077 | dpaa_bps[i] = dpaa_bp_alloc(dev); |
---|
3078 | if (IS_ERR(dpaa_bps[i])) |
---|
3079 | return PTR_ERR(dpaa_bps[i]); |
---|
3080 | /* the raw size of the buffers used for reception */ |
---|
3081 | dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); |
---|
3082 | /* avoid runtime computations by keeping the usable size here */ |
---|
3083 | dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); |
---|
3084 | dpaa_bps[i]->dev = dev; |
---|
3085 | |
---|
3086 | err = dpaa_bp_alloc_pool(dpaa_bps[i]); |
---|
3087 | if (err < 0) { |
---|
3088 | dpaa_bps_free(priv); |
---|
3089 | priv->dpaa_bps[i] = NULL; |
---|
3090 | goto bp_create_failed; |
---|
3091 | } |
---|
3092 | priv->dpaa_bps[i] = dpaa_bps[i]; |
---|
3093 | } |
---|
3094 | |
---|
3095 | INIT_LIST_HEAD(&priv->dpaa_fq_list); |
---|
3096 | |
---|
3097 | memset(&port_fqs, 0, sizeof(port_fqs)); |
---|
3098 | |
---|
3099 | err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); |
---|
3100 | if (err < 0) { |
---|
3101 | dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); |
---|
3102 | goto fq_probe_failed; |
---|
3103 | } |
---|
3104 | |
---|
3105 | priv->mac_dev = mac_dev; |
---|
3106 | |
---|
3107 | #ifdef __rtems__ |
---|
3108 | if (mac_dev->use_dedicated_portal) { |
---|
3109 | struct qman_portal *portal; |
---|
3110 | |
---|
3111 | portal = qman_get_dedicated_portal(0); |
---|
3112 | BSD_ASSERT(portal != NULL); |
---|
3113 | mac_dev->portal = portal; |
---|
3114 | channel = qman_portal_get_channel(portal); |
---|
3115 | priv->channel = (u16)channel; |
---|
3116 | } else { |
---|
3117 | #endif /* __rtems__ */ |
---|
3118 | channel = dpaa_get_channel(); |
---|
3119 | if (channel < 0) { |
---|
3120 | dev_err(dev, "dpaa_get_channel() failed\n"); |
---|
3121 | err = channel; |
---|
3122 | goto get_channel_failed; |
---|
3123 | } |
---|
3124 | |
---|
3125 | priv->channel = (u16)channel; |
---|
3126 | |
---|
3127 | /* Start a thread that will walk the CPUs with affine portals |
---|
3128 | * and add this pool channel to each's dequeue mask. |
---|
3129 | */ |
---|
3130 | dpaa_eth_add_channel(priv->channel); |
---|
3131 | #ifdef __rtems__ |
---|
3132 | } |
---|
3133 | #endif /* __rtems__ */ |
---|
3134 | |
---|
3135 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); |
---|
3136 | |
---|
3137 | /* Create a congestion group for this netdev, with |
---|
3138 | * dynamically-allocated CGR ID. |
---|
3139 | * Must be executed after probing the MAC, but before |
---|
3140 | * assigning the egress FQs to the CGRs. |
---|
3141 | */ |
---|
3142 | err = dpaa_eth_cgr_init(priv); |
---|
3143 | if (err < 0) { |
---|
3144 | dev_err(dev, "Error initializing CGR\n"); |
---|
3145 | goto tx_cgr_init_failed; |
---|
3146 | } |
---|
3147 | |
---|
3148 | err = dpaa_ingress_cgr_init(priv); |
---|
3149 | if (err < 0) { |
---|
3150 | dev_err(dev, "Error initializing ingress CGR\n"); |
---|
3151 | goto rx_cgr_init_failed; |
---|
3152 | } |
---|
3153 | |
---|
3154 | /* Add the FQs to the interface, and make them active */ |
---|
3155 | list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { |
---|
3156 | err = dpaa_fq_init(dpaa_fq, false); |
---|
3157 | if (err < 0) |
---|
3158 | goto fq_alloc_failed; |
---|
3159 | } |
---|
3160 | |
---|
3161 | priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); |
---|
3162 | priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); |
---|
3163 | |
---|
3164 | /* All real interfaces need their ports initialized */ |
---|
3165 | err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, |
---|
3166 | &priv->buf_layout[0], dev); |
---|
3167 | if (err) |
---|
3168 | goto init_ports_failed; |
---|
3169 | |
---|
3170 | priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); |
---|
3171 | if (!priv->percpu_priv) { |
---|
3172 | dev_err(dev, "devm_alloc_percpu() failed\n"); |
---|
3173 | err = -ENOMEM; |
---|
3174 | goto alloc_percpu_failed; |
---|
3175 | } |
---|
3176 | #ifndef __rtems__ |
---|
3177 | for_each_possible_cpu(i) { |
---|
3178 | #else /* __rtems__ */ |
---|
3179 | for (i = 0; i < (int)rtems_get_processor_count(); ++i) { |
---|
3180 | #endif /* __rtems__ */ |
---|
3181 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
3182 | memset(percpu_priv, 0, sizeof(*percpu_priv)); |
---|
3183 | } |
---|
3184 | |
---|
3185 | #ifndef __rtems__ |
---|
3186 | priv->num_tc = 1; |
---|
3187 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); |
---|
3188 | |
---|
3189 | /* Initialize NAPI */ |
---|
3190 | err = dpaa_napi_add(net_dev); |
---|
3191 | if (err < 0) |
---|
3192 | goto napi_add_failed; |
---|
3193 | |
---|
3194 | err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); |
---|
3195 | if (err < 0) |
---|
3196 | goto netdev_init_failed; |
---|
3197 | |
---|
3198 | dpaa_eth_sysfs_init(&net_dev->dev); |
---|
3199 | |
---|
3200 | netif_info(priv, probe, net_dev, "Probed interface %s\n", |
---|
3201 | net_dev->name); |
---|
3202 | #endif /* __rtems__ */ |
---|
3203 | |
---|
3204 | return 0; |
---|
3205 | |
---|
3206 | #ifndef __rtems__ |
---|
3207 | netdev_init_failed: |
---|
3208 | napi_add_failed: |
---|
3209 | #endif /* __rtems__ */ |
---|
3210 | dpaa_napi_del(net_dev); |
---|
3211 | alloc_percpu_failed: |
---|
3212 | init_ports_failed: |
---|
3213 | #ifndef __rtems__ |
---|
3214 | dpaa_fq_free(dev, &priv->dpaa_fq_list); |
---|
3215 | #endif /* __rtems__ */ |
---|
3216 | fq_alloc_failed: |
---|
3217 | #ifndef __rtems__ |
---|
3218 | qman_delete_cgr_safe(&priv->ingress_cgr); |
---|
3219 | qman_release_cgrid(priv->ingress_cgr.cgrid); |
---|
3220 | #endif /* __rtems__ */ |
---|
3221 | rx_cgr_init_failed: |
---|
3222 | #ifndef __rtems__ |
---|
3223 | qman_delete_cgr_safe(&priv->cgr_data.cgr); |
---|
3224 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); |
---|
3225 | #endif /* __rtems__ */ |
---|
3226 | tx_cgr_init_failed: |
---|
3227 | get_channel_failed: |
---|
3228 | dpaa_bps_free(priv); |
---|
3229 | bp_create_failed: |
---|
3230 | fq_probe_failed: |
---|
3231 | #ifndef __rtems__ |
---|
3232 | dev_mask_failed: |
---|
3233 | mac_probe_failed: |
---|
3234 | #endif /* __rtems__ */ |
---|
3235 | dev_set_drvdata(dev, NULL); |
---|
3236 | #ifndef __rtems__ |
---|
3237 | free_netdev(net_dev); |
---|
3238 | alloc_etherdev_mq_failed: |
---|
3239 | for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { |
---|
3240 | if (atomic_read(&dpaa_bps[i]->refs) == 0) |
---|
3241 | devm_kfree(dev, dpaa_bps[i]); |
---|
3242 | } |
---|
3243 | #else /* __rtems__ */ |
---|
3244 | BSD_ASSERT(0); |
---|
3245 | #endif /* __rtems__ */ |
---|
3246 | return err; |
---|
3247 | } |
---|
3248 | |
---|
3249 | #ifndef __rtems__ |
---|
3250 | static int dpaa_remove(struct platform_device *pdev) |
---|
3251 | { |
---|
3252 | struct net_device *net_dev; |
---|
3253 | struct dpaa_priv *priv; |
---|
3254 | struct device *dev; |
---|
3255 | int err; |
---|
3256 | |
---|
3257 | dev = &pdev->dev; |
---|
3258 | net_dev = dev_get_drvdata(dev); |
---|
3259 | |
---|
3260 | priv = netdev_priv(net_dev); |
---|
3261 | |
---|
3262 | dpaa_eth_sysfs_remove(dev); |
---|
3263 | |
---|
3264 | dev_set_drvdata(dev, NULL); |
---|
3265 | unregister_netdev(net_dev); |
---|
3266 | |
---|
3267 | err = dpaa_fq_free(dev, &priv->dpaa_fq_list); |
---|
3268 | |
---|
3269 | qman_delete_cgr_safe(&priv->ingress_cgr); |
---|
3270 | qman_release_cgrid(priv->ingress_cgr.cgrid); |
---|
3271 | qman_delete_cgr_safe(&priv->cgr_data.cgr); |
---|
3272 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); |
---|
3273 | |
---|
3274 | dpaa_napi_del(net_dev); |
---|
3275 | |
---|
3276 | dpaa_bps_free(priv); |
---|
3277 | |
---|
3278 | free_netdev(net_dev); |
---|
3279 | |
---|
3280 | return err; |
---|
3281 | } |
---|
3282 | #endif /* __rtems__ */ |
---|
3283 | |
---|
3284 | #ifndef __rtems__ |
---|
3285 | static struct platform_device_id dpaa_devtype[] = { |
---|
3286 | { |
---|
3287 | .name = "dpaa-ethernet", |
---|
3288 | .driver_data = 0, |
---|
3289 | }, { |
---|
3290 | } |
---|
3291 | }; |
---|
3292 | MODULE_DEVICE_TABLE(platform, dpaa_devtype); |
---|
3293 | |
---|
3294 | static struct platform_driver dpaa_driver = { |
---|
3295 | .driver = { |
---|
3296 | .name = KBUILD_MODNAME, |
---|
3297 | }, |
---|
3298 | .id_table = dpaa_devtype, |
---|
3299 | .probe = dpaa_eth_probe, |
---|
3300 | .remove = dpaa_remove |
---|
3301 | }; |
---|
3302 | |
---|
3303 | static int __init dpaa_load(void) |
---|
3304 | { |
---|
3305 | int err; |
---|
3306 | |
---|
3307 | pr_debug("FSL DPAA Ethernet driver\n"); |
---|
3308 | |
---|
3309 | /* initialize dpaa_eth mirror values */ |
---|
3310 | dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); |
---|
3311 | dpaa_max_frm = fman_get_max_frm(); |
---|
3312 | |
---|
3313 | err = platform_driver_register(&dpaa_driver); |
---|
3314 | if (err < 0) |
---|
3315 | pr_err("Error, platform_driver_register() = %d\n", err); |
---|
3316 | |
---|
3317 | return err; |
---|
3318 | } |
---|
3319 | module_init(dpaa_load); |
---|
3320 | |
---|
3321 | static void __exit dpaa_unload(void) |
---|
3322 | { |
---|
3323 | platform_driver_unregister(&dpaa_driver); |
---|
3324 | |
---|
3325 | /* Only one channel is used and needs to be released after all |
---|
3326 | * interfaces are removed |
---|
3327 | */ |
---|
3328 | dpaa_release_channel(); |
---|
3329 | } |
---|
3330 | module_exit(dpaa_unload); |
---|
3331 | |
---|
3332 | MODULE_LICENSE("Dual BSD/GPL"); |
---|
3333 | MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); |
---|
3334 | #endif /* __rtems__ */ |
---|