1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | #include <rtems/bsd/local/opt_dpaa.h> |
---|
4 | |
---|
5 | /* Copyright 2008 - 2015 Freescale Semiconductor, Inc. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions are met: |
---|
9 | * * Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * * Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * * Neither the name of Freescale Semiconductor nor the |
---|
15 | * names of its contributors may be used to endorse or promote products |
---|
16 | * derived from this software without specific prior written permission. |
---|
17 | * |
---|
18 | * ALTERNATIVELY, this software may be distributed under the terms of the |
---|
19 | * GNU General Public License ("GPL") as published by the Free Software |
---|
20 | * Foundation, either version 2 of that License or (at your option) any |
---|
21 | * later version. |
---|
22 | * |
---|
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
---|
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
---|
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
33 | */ |
---|
34 | |
---|
35 | #include <linux/init.h> |
---|
36 | #include <linux/module.h> |
---|
37 | #include <linux/of_platform.h> |
---|
38 | #include <linux/of_net.h> |
---|
39 | #include <linux/etherdevice.h> |
---|
40 | #include <linux/kthread.h> |
---|
41 | #include <linux/percpu.h> |
---|
42 | #ifndef __rtems__ |
---|
43 | #include <linux/highmem.h> |
---|
44 | #include <linux/sort.h> |
---|
45 | #endif /* __rtems__ */ |
---|
46 | #include <soc/fsl/qman.h> |
---|
47 | #ifndef __rtems__ |
---|
48 | #include <linux/ip.h> |
---|
49 | #include <linux/ipv6.h> |
---|
50 | #include <linux/if_vlan.h> |
---|
51 | #endif /* __rtems__ */ |
---|
52 | #include "dpaa_eth.h" |
---|
53 | #include "dpaa_eth_common.h" |
---|
54 | #include "mac.h" |
---|
55 | |
---|
56 | /* Size in bytes of the FQ taildrop threshold */ |
---|
57 | #define DPA_FQ_TD 0x200000 |
---|
58 | |
---|
59 | #define DPAA_CS_THRESHOLD_1G 0x06000000 |
---|
60 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 |
---|
61 | * The size in bytes of the egress Congestion State notification threshold on |
---|
62 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a |
---|
63 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), |
---|
64 | * and the larger the frame size, the more acute the problem. |
---|
65 | * So we have to find a balance between these factors: |
---|
66 | * - avoiding the device staying congested for a prolonged time (risking |
---|
67 | * the netdev watchdog to fire - see also the tx_timeout module param); |
---|
68 | * - affecting performance of protocols such as TCP, which otherwise |
---|
69 | * behave well under the congestion notification mechanism; |
---|
70 | * - preventing the Tx cores from tightly-looping (as if the congestion |
---|
71 | * threshold was too low to be effective); |
---|
72 | * - running out of memory if the CS threshold is set too high. |
---|
73 | */ |
---|
74 | |
---|
75 | #define DPAA_CS_THRESHOLD_10G 0x10000000 |
---|
76 | /* The size in bytes of the egress Congestion State notification threshold on |
---|
77 | * 10G ports, range 0x1000 .. 0x10000000 |
---|
78 | */ |
---|
79 | |
---|
80 | static struct dpa_bp *dpa_bp_array[64]; |
---|
81 | |
---|
82 | #ifndef __rtems__ |
---|
83 | int dpa_max_frm; |
---|
84 | |
---|
85 | int dpa_rx_extra_headroom; |
---|
86 | #endif /* __rtems__ */ |
---|
87 | |
---|
88 | static const struct fqid_cell tx_confirm_fqids[] = { |
---|
89 | {0, DPAA_ETH_TX_QUEUES} |
---|
90 | }; |
---|
91 | |
---|
92 | static const struct fqid_cell default_fqids[][3] = { |
---|
93 | [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} }, |
---|
94 | [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} } |
---|
95 | }; |
---|
96 | |
---|
97 | #ifndef __rtems__ |
---|
98 | int dpa_netdev_init(struct net_device *net_dev, |
---|
99 | const u8 *mac_addr, |
---|
100 | u16 tx_timeout) |
---|
101 | { |
---|
102 | int err; |
---|
103 | struct dpa_priv_s *priv = netdev_priv(net_dev); |
---|
104 | struct device *dev = net_dev->dev.parent; |
---|
105 | |
---|
106 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
---|
107 | /* we do not want shared skbs on TX */ |
---|
108 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
---|
109 | |
---|
110 | net_dev->features |= net_dev->hw_features; |
---|
111 | net_dev->vlan_features = net_dev->features; |
---|
112 | |
---|
113 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); |
---|
114 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); |
---|
115 | |
---|
116 | net_dev->ethtool_ops = &dpa_ethtool_ops; |
---|
117 | |
---|
118 | net_dev->needed_headroom = priv->tx_headroom; |
---|
119 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); |
---|
120 | |
---|
121 | /* start without the RUNNING flag, phylib controls it later */ |
---|
122 | netif_carrier_off(net_dev); |
---|
123 | |
---|
124 | err = register_netdev(net_dev); |
---|
125 | if (err < 0) { |
---|
126 | dev_err(dev, "register_netdev() = %d\n", err); |
---|
127 | return err; |
---|
128 | } |
---|
129 | |
---|
130 | return 0; |
---|
131 | } |
---|
132 | #endif /* __rtems__ */ |
---|
133 | |
---|
134 | int dpa_start(struct net_device *net_dev) |
---|
135 | { |
---|
136 | int err, i; |
---|
137 | struct dpa_priv_s *priv; |
---|
138 | struct mac_device *mac_dev; |
---|
139 | |
---|
140 | priv = netdev_priv(net_dev); |
---|
141 | mac_dev = priv->mac_dev; |
---|
142 | |
---|
143 | #ifndef __rtems__ |
---|
144 | err = mac_dev->init_phy(net_dev, priv->mac_dev); |
---|
145 | if (err < 0) { |
---|
146 | netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err); |
---|
147 | return err; |
---|
148 | } |
---|
149 | #endif /* __rtems__ */ |
---|
150 | |
---|
151 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { |
---|
152 | err = fman_port_enable(mac_dev->port[i]); |
---|
153 | if (err) |
---|
154 | goto mac_start_failed; |
---|
155 | } |
---|
156 | |
---|
157 | err = priv->mac_dev->start(mac_dev); |
---|
158 | if (err < 0) { |
---|
159 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); |
---|
160 | goto mac_start_failed; |
---|
161 | } |
---|
162 | |
---|
163 | #ifndef __rtems__ |
---|
164 | netif_tx_start_all_queues(net_dev); |
---|
165 | #endif /* __rtems__ */ |
---|
166 | |
---|
167 | return 0; |
---|
168 | |
---|
169 | mac_start_failed: |
---|
170 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) |
---|
171 | fman_port_disable(mac_dev->port[i]); |
---|
172 | |
---|
173 | return err; |
---|
174 | } |
---|
175 | |
---|
176 | int dpa_stop(struct net_device *net_dev) |
---|
177 | { |
---|
178 | int i, err, error; |
---|
179 | struct dpa_priv_s *priv; |
---|
180 | struct mac_device *mac_dev; |
---|
181 | |
---|
182 | priv = netdev_priv(net_dev); |
---|
183 | mac_dev = priv->mac_dev; |
---|
184 | |
---|
185 | #ifndef __rtems__ |
---|
186 | netif_tx_stop_all_queues(net_dev); |
---|
187 | #endif /* __rtems__ */ |
---|
188 | /* Allow the Fman (Tx) port to process in-flight frames before we |
---|
189 | * try switching it off. |
---|
190 | */ |
---|
191 | usleep_range(5000, 10000); |
---|
192 | |
---|
193 | err = mac_dev->stop(mac_dev); |
---|
194 | if (err < 0) |
---|
195 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", |
---|
196 | err); |
---|
197 | |
---|
198 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { |
---|
199 | error = fman_port_disable(mac_dev->port[i]); |
---|
200 | if (error) |
---|
201 | err = error; |
---|
202 | } |
---|
203 | |
---|
204 | #ifndef __rtems__ |
---|
205 | if (mac_dev->phy_dev) |
---|
206 | phy_disconnect(mac_dev->phy_dev); |
---|
207 | mac_dev->phy_dev = NULL; |
---|
208 | #endif /* __rtems__ */ |
---|
209 | |
---|
210 | return err; |
---|
211 | } |
---|
212 | |
---|
213 | #ifndef __rtems__ |
---|
214 | void dpa_timeout(struct net_device *net_dev) |
---|
215 | { |
---|
216 | const struct dpa_priv_s *priv; |
---|
217 | struct dpa_percpu_priv_s *percpu_priv; |
---|
218 | |
---|
219 | priv = netdev_priv(net_dev); |
---|
220 | percpu_priv = raw_cpu_ptr(priv->percpu_priv); |
---|
221 | |
---|
222 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", |
---|
223 | jiffies_to_msecs(jiffies - net_dev->trans_start)); |
---|
224 | |
---|
225 | percpu_priv->stats.tx_errors++; |
---|
226 | } |
---|
227 | |
---|
228 | /* Calculates the statistics for the given device by adding the statistics |
---|
229 | * collected by each CPU. |
---|
230 | */ |
---|
231 | struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev, |
---|
232 | struct rtnl_link_stats64 *stats) |
---|
233 | { |
---|
234 | struct dpa_priv_s *priv = netdev_priv(net_dev); |
---|
235 | u64 *cpustats; |
---|
236 | u64 *netstats = (u64 *)stats; |
---|
237 | int i, j; |
---|
238 | struct dpa_percpu_priv_s *percpu_priv; |
---|
239 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); |
---|
240 | |
---|
241 | for_each_possible_cpu(i) { |
---|
242 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
---|
243 | |
---|
244 | cpustats = (u64 *)&percpu_priv->stats; |
---|
245 | |
---|
246 | for (j = 0; j < numstats; j++) |
---|
247 | netstats[j] += cpustats[j]; |
---|
248 | } |
---|
249 | |
---|
250 | return stats; |
---|
251 | } |
---|
252 | #endif /* __rtems__ */ |
---|
253 | |
---|
254 | int dpa_change_mtu(struct net_device *net_dev, int new_mtu) |
---|
255 | { |
---|
256 | const int max_mtu = dpa_get_max_mtu(); |
---|
257 | |
---|
258 | /* Make sure we don't exceed the Ethernet controller's MAXFRM */ |
---|
259 | if (new_mtu < 68 || new_mtu > max_mtu) { |
---|
260 | netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n", |
---|
261 | new_mtu, 68, max_mtu); |
---|
262 | return -EINVAL; |
---|
263 | } |
---|
264 | #ifndef __rtems__ |
---|
265 | net_dev->mtu = new_mtu; |
---|
266 | #endif /* __rtems__ */ |
---|
267 | |
---|
268 | return 0; |
---|
269 | } |
---|
270 | |
---|
271 | #ifndef __rtems__ |
---|
272 | /* .ndo_init callback */ |
---|
273 | int dpa_ndo_init(struct net_device *net_dev) |
---|
274 | { |
---|
275 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, |
---|
276 | * we choose conservatively and let the user explicitly set a higher |
---|
277 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs |
---|
278 | * in the same LAN. |
---|
279 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, |
---|
280 | * start with the maximum allowed. |
---|
281 | */ |
---|
282 | int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN); |
---|
283 | |
---|
284 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", |
---|
285 | init_mtu); |
---|
286 | net_dev->mtu = init_mtu; |
---|
287 | |
---|
288 | return 0; |
---|
289 | } |
---|
290 | |
---|
291 | int dpa_set_features(struct net_device *dev, netdev_features_t features) |
---|
292 | { |
---|
293 | /* Not much to do here for now */ |
---|
294 | dev->features = features; |
---|
295 | return 0; |
---|
296 | } |
---|
297 | |
---|
298 | netdev_features_t dpa_fix_features(struct net_device *dev, |
---|
299 | netdev_features_t features) |
---|
300 | { |
---|
301 | netdev_features_t unsupported_features = 0; |
---|
302 | |
---|
303 | /* In theory we should never be requested to enable features that |
---|
304 | * we didn't set in netdev->features and netdev->hw_features at probe |
---|
305 | * time, but double check just to be on the safe side. |
---|
306 | * We don't support enabling Rx csum through ethtool yet |
---|
307 | */ |
---|
308 | unsupported_features |= NETIF_F_RXCSUM; |
---|
309 | |
---|
310 | features &= ~unsupported_features; |
---|
311 | |
---|
312 | return features; |
---|
313 | } |
---|
314 | |
---|
315 | int dpa_remove(struct platform_device *pdev) |
---|
316 | { |
---|
317 | int err; |
---|
318 | struct device *dev; |
---|
319 | struct net_device *net_dev; |
---|
320 | struct dpa_priv_s *priv; |
---|
321 | |
---|
322 | dev = &pdev->dev; |
---|
323 | net_dev = dev_get_drvdata(dev); |
---|
324 | |
---|
325 | priv = netdev_priv(net_dev); |
---|
326 | |
---|
327 | dpaa_eth_sysfs_remove(dev); |
---|
328 | |
---|
329 | dev_set_drvdata(dev, NULL); |
---|
330 | unregister_netdev(net_dev); |
---|
331 | |
---|
332 | err = dpa_fq_free(dev, &priv->dpa_fq_list); |
---|
333 | |
---|
334 | qman_delete_cgr_safe(&priv->ingress_cgr); |
---|
335 | qman_release_cgrid(priv->ingress_cgr.cgrid); |
---|
336 | qman_delete_cgr_safe(&priv->cgr_data.cgr); |
---|
337 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); |
---|
338 | |
---|
339 | dpa_private_napi_del(net_dev); |
---|
340 | |
---|
341 | dpa_bp_free(priv); |
---|
342 | |
---|
343 | if (priv->buf_layout) |
---|
344 | devm_kfree(dev, priv->buf_layout); |
---|
345 | |
---|
346 | free_netdev(net_dev); |
---|
347 | |
---|
348 | return err; |
---|
349 | } |
---|
350 | |
---|
351 | struct mac_device *dpa_mac_dev_get(struct platform_device *pdev) |
---|
352 | { |
---|
353 | struct device *dpa_dev, *dev; |
---|
354 | struct device_node *mac_node; |
---|
355 | struct platform_device *of_dev; |
---|
356 | struct mac_device *mac_dev; |
---|
357 | struct dpaa_eth_data *eth_data; |
---|
358 | |
---|
359 | dpa_dev = &pdev->dev; |
---|
360 | eth_data = dpa_dev->platform_data; |
---|
361 | if (!eth_data) |
---|
362 | return ERR_PTR(-ENODEV); |
---|
363 | |
---|
364 | mac_node = eth_data->mac_node; |
---|
365 | |
---|
366 | of_dev = of_find_device_by_node(mac_node); |
---|
367 | if (!of_dev) { |
---|
368 | dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n", |
---|
369 | mac_node->full_name); |
---|
370 | of_node_put(mac_node); |
---|
371 | return ERR_PTR(-EINVAL); |
---|
372 | } |
---|
373 | of_node_put(mac_node); |
---|
374 | |
---|
375 | dev = &of_dev->dev; |
---|
376 | |
---|
377 | mac_dev = dev_get_drvdata(dev); |
---|
378 | if (!mac_dev) { |
---|
379 | dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n", |
---|
380 | dev_name(dev)); |
---|
381 | return ERR_PTR(-EINVAL); |
---|
382 | } |
---|
383 | |
---|
384 | return mac_dev; |
---|
385 | } |
---|
386 | |
---|
387 | int dpa_mac_hw_index_get(struct platform_device *pdev) |
---|
388 | { |
---|
389 | struct device *dpa_dev; |
---|
390 | struct dpaa_eth_data *eth_data; |
---|
391 | |
---|
392 | dpa_dev = &pdev->dev; |
---|
393 | eth_data = dpa_dev->platform_data; |
---|
394 | |
---|
395 | return eth_data->mac_hw_id; |
---|
396 | } |
---|
397 | |
---|
398 | int dpa_mac_fman_index_get(struct platform_device *pdev) |
---|
399 | { |
---|
400 | struct device *dpa_dev; |
---|
401 | struct dpaa_eth_data *eth_data; |
---|
402 | |
---|
403 | dpa_dev = &pdev->dev; |
---|
404 | eth_data = dpa_dev->platform_data; |
---|
405 | |
---|
406 | return eth_data->fman_hw_id; |
---|
407 | } |
---|
408 | |
---|
409 | int dpa_set_mac_address(struct net_device *net_dev, void *addr) |
---|
410 | { |
---|
411 | const struct dpa_priv_s *priv; |
---|
412 | int err; |
---|
413 | struct mac_device *mac_dev; |
---|
414 | |
---|
415 | priv = netdev_priv(net_dev); |
---|
416 | |
---|
417 | err = eth_mac_addr(net_dev, addr); |
---|
418 | if (err < 0) { |
---|
419 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); |
---|
420 | return err; |
---|
421 | } |
---|
422 | |
---|
423 | mac_dev = priv->mac_dev; |
---|
424 | |
---|
425 | err = mac_dev->change_addr(mac_dev->fman_mac, |
---|
426 | (enet_addr_t *)net_dev->dev_addr); |
---|
427 | if (err < 0) { |
---|
428 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", |
---|
429 | err); |
---|
430 | return err; |
---|
431 | } |
---|
432 | |
---|
433 | return 0; |
---|
434 | } |
---|
435 | |
---|
436 | void dpa_set_rx_mode(struct net_device *net_dev) |
---|
437 | { |
---|
438 | int err; |
---|
439 | const struct dpa_priv_s *priv; |
---|
440 | |
---|
441 | priv = netdev_priv(net_dev); |
---|
442 | |
---|
443 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { |
---|
444 | priv->mac_dev->promisc = !priv->mac_dev->promisc; |
---|
445 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, |
---|
446 | priv->mac_dev->promisc); |
---|
447 | if (err < 0) |
---|
448 | netif_err(priv, drv, net_dev, |
---|
449 | "mac_dev->set_promisc() = %d\n", |
---|
450 | err); |
---|
451 | } |
---|
452 | |
---|
453 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); |
---|
454 | if (err < 0) |
---|
455 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", |
---|
456 | err); |
---|
457 | } |
---|
458 | #endif /* __rtems__ */ |
---|
459 | |
---|
460 | void dpa_set_buffers_layout(struct mac_device *mac_dev, |
---|
461 | struct dpa_buffer_layout_s *layout) |
---|
462 | { |
---|
463 | /* Rx */ |
---|
464 | layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE; |
---|
465 | layout[RX].parse_results = true; |
---|
466 | layout[RX].hash_results = true; |
---|
467 | layout[RX].data_align = DPA_FD_DATA_ALIGNMENT; |
---|
468 | |
---|
469 | /* Tx */ |
---|
470 | layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE; |
---|
471 | layout[TX].parse_results = true; |
---|
472 | layout[TX].hash_results = true; |
---|
473 | layout[TX].data_align = DPA_FD_DATA_ALIGNMENT; |
---|
474 | } |
---|
475 | |
---|
476 | int dpa_bp_alloc(struct dpa_bp *dpa_bp) |
---|
477 | { |
---|
478 | int err; |
---|
479 | struct bman_pool_params bp_params; |
---|
480 | #ifndef __rtems__ |
---|
481 | struct platform_device *pdev; |
---|
482 | #endif /* __rtems__ */ |
---|
483 | |
---|
484 | if (dpa_bp->size == 0 || dpa_bp->config_count == 0) { |
---|
485 | pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers"); |
---|
486 | return -EINVAL; |
---|
487 | } |
---|
488 | |
---|
489 | memset(&bp_params, 0, sizeof(struct bman_pool_params)); |
---|
490 | |
---|
491 | /* If the pool is already specified, we only create one per bpid */ |
---|
492 | if (dpa_bpid2pool_use(dpa_bp->bpid)) |
---|
493 | return 0; |
---|
494 | |
---|
495 | if (dpa_bp->bpid == 0) |
---|
496 | bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID; |
---|
497 | else |
---|
498 | bp_params.bpid = dpa_bp->bpid; |
---|
499 | |
---|
500 | dpa_bp->pool = bman_new_pool(&bp_params); |
---|
501 | if (!dpa_bp->pool) { |
---|
502 | pr_err("bman_new_pool() failed\n"); |
---|
503 | return -ENODEV; |
---|
504 | } |
---|
505 | |
---|
506 | dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid; |
---|
507 | |
---|
508 | #ifndef __rtems__ |
---|
509 | pdev = platform_device_register_simple("DPAA_bpool", |
---|
510 | dpa_bp->bpid, NULL, 0); |
---|
511 | if (IS_ERR(pdev)) { |
---|
512 | err = PTR_ERR(pdev); |
---|
513 | goto pdev_register_failed; |
---|
514 | } |
---|
515 | |
---|
516 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); |
---|
517 | if (err) |
---|
518 | goto pdev_mask_failed; |
---|
519 | |
---|
520 | dpa_bp->dev = &pdev->dev; |
---|
521 | #endif /* __rtems__ */ |
---|
522 | |
---|
523 | if (dpa_bp->seed_cb) { |
---|
524 | err = dpa_bp->seed_cb(dpa_bp); |
---|
525 | if (err) |
---|
526 | goto pool_seed_failed; |
---|
527 | } |
---|
528 | |
---|
529 | dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp); |
---|
530 | |
---|
531 | return 0; |
---|
532 | |
---|
533 | pool_seed_failed: |
---|
534 | #ifndef __rtems__ |
---|
535 | pdev_mask_failed: |
---|
536 | platform_device_unregister(pdev); |
---|
537 | pdev_register_failed: |
---|
538 | #endif /* __rtems__ */ |
---|
539 | bman_free_pool(dpa_bp->pool); |
---|
540 | |
---|
541 | return err; |
---|
542 | } |
---|
543 | |
---|
544 | void dpa_bp_drain(struct dpa_bp *bp) |
---|
545 | { |
---|
546 | int ret; |
---|
547 | u8 num = 8; |
---|
548 | |
---|
549 | do { |
---|
550 | struct bm_buffer bmb[8]; |
---|
551 | int i; |
---|
552 | |
---|
553 | ret = bman_acquire(bp->pool, bmb, num, 0); |
---|
554 | if (ret < 0) { |
---|
555 | if (num == 8) { |
---|
556 | /* we have less than 8 buffers left; |
---|
557 | * drain them one by one |
---|
558 | */ |
---|
559 | num = 1; |
---|
560 | ret = 1; |
---|
561 | continue; |
---|
562 | } else { |
---|
563 | /* Pool is fully drained */ |
---|
564 | break; |
---|
565 | } |
---|
566 | } |
---|
567 | |
---|
568 | for (i = 0; i < num; i++) { |
---|
569 | dma_addr_t addr = bm_buf_addr(&bmb[i]); |
---|
570 | |
---|
571 | #ifndef __rtems__ |
---|
572 | dma_unmap_single(bp->dev, addr, bp->size, |
---|
573 | DMA_BIDIRECTIONAL); |
---|
574 | #endif /* __rtems__ */ |
---|
575 | |
---|
576 | bp->free_buf_cb(phys_to_virt(addr)); |
---|
577 | } |
---|
578 | } while (ret > 0); |
---|
579 | } |
---|
580 | |
---|
581 | static void _dpa_bp_free(struct dpa_bp *dpa_bp) |
---|
582 | { |
---|
583 | struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid); |
---|
584 | |
---|
585 | /* the mapping between bpid and dpa_bp is done very late in the |
---|
586 | * allocation procedure; if something failed before the mapping, the bp |
---|
587 | * was not configured, therefore we don't need the below instructions |
---|
588 | */ |
---|
589 | if (!bp) |
---|
590 | return; |
---|
591 | |
---|
592 | if (!atomic_dec_and_test(&bp->refs)) |
---|
593 | return; |
---|
594 | |
---|
595 | if (bp->free_buf_cb) |
---|
596 | dpa_bp_drain(bp); |
---|
597 | |
---|
598 | dpa_bp_array[bp->bpid] = NULL; |
---|
599 | bman_free_pool(bp->pool); |
---|
600 | |
---|
601 | #ifndef __rtems__ |
---|
602 | if (bp->dev) |
---|
603 | platform_device_unregister(to_platform_device(bp->dev)); |
---|
604 | #endif /* __rtems__ */ |
---|
605 | } |
---|
606 | |
---|
607 | void dpa_bp_free(struct dpa_priv_s *priv) |
---|
608 | { |
---|
609 | int i; |
---|
610 | |
---|
611 | for (i = 0; i < priv->bp_count; i++) |
---|
612 | _dpa_bp_free(&priv->dpa_bp[i]); |
---|
613 | } |
---|
614 | |
---|
615 | struct dpa_bp *dpa_bpid2pool(int bpid) |
---|
616 | { |
---|
617 | return dpa_bp_array[bpid]; |
---|
618 | } |
---|
619 | |
---|
620 | void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp) |
---|
621 | { |
---|
622 | dpa_bp_array[bpid] = dpa_bp; |
---|
623 | atomic_set(&dpa_bp->refs, 1); |
---|
624 | } |
---|
625 | |
---|
626 | bool dpa_bpid2pool_use(int bpid) |
---|
627 | { |
---|
628 | if (dpa_bpid2pool(bpid)) { |
---|
629 | atomic_inc(&dpa_bp_array[bpid]->refs); |
---|
630 | return true; |
---|
631 | } |
---|
632 | |
---|
633 | return false; |
---|
634 | } |
---|
635 | |
---|
636 | #ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE |
---|
637 | u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb, |
---|
638 | void *accel_priv, select_queue_fallback_t fallback) |
---|
639 | { |
---|
640 | return dpa_get_queue_mapping(skb); |
---|
641 | } |
---|
642 | #endif |
---|
643 | |
---|
644 | struct dpa_fq *dpa_fq_alloc(struct device *dev, |
---|
645 | const struct fqid_cell *fqids, |
---|
646 | struct list_head *list, |
---|
647 | enum dpa_fq_type fq_type) |
---|
648 | { |
---|
649 | int i; |
---|
650 | struct dpa_fq *dpa_fq; |
---|
651 | |
---|
652 | dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL); |
---|
653 | if (!dpa_fq) |
---|
654 | return NULL; |
---|
655 | |
---|
656 | for (i = 0; i < fqids->count; i++) { |
---|
657 | dpa_fq[i].fq_type = fq_type; |
---|
658 | dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0; |
---|
659 | list_add_tail(&dpa_fq[i].list, list); |
---|
660 | } |
---|
661 | |
---|
662 | for (i = 0; i < fqids->count; i++) |
---|
663 | _dpa_assign_wq(dpa_fq + i); |
---|
664 | |
---|
665 | return dpa_fq; |
---|
666 | } |
---|
667 | |
---|
668 | int dpa_fq_probe_mac(struct device *dev, struct list_head *list, |
---|
669 | struct fm_port_fqs *port_fqs, |
---|
670 | bool alloc_tx_conf_fqs, |
---|
671 | enum port_type ptype) |
---|
672 | { |
---|
673 | const struct fqid_cell *fqids; |
---|
674 | struct dpa_fq *dpa_fq; |
---|
675 | int num_ranges; |
---|
676 | int i; |
---|
677 | |
---|
678 | if (ptype == TX && alloc_tx_conf_fqs) { |
---|
679 | if (!dpa_fq_alloc(dev, tx_confirm_fqids, list, |
---|
680 | FQ_TYPE_TX_CONF_MQ)) |
---|
681 | goto fq_alloc_failed; |
---|
682 | } |
---|
683 | |
---|
684 | fqids = default_fqids[ptype]; |
---|
685 | num_ranges = 3; |
---|
686 | |
---|
687 | for (i = 0; i < num_ranges; i++) { |
---|
688 | switch (i) { |
---|
689 | case 0: |
---|
690 | /* The first queue is the error queue */ |
---|
691 | if (fqids[i].count != 1) |
---|
692 | goto invalid_error_queue; |
---|
693 | |
---|
694 | dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, |
---|
695 | ptype == RX ? |
---|
696 | FQ_TYPE_RX_ERROR : |
---|
697 | FQ_TYPE_TX_ERROR); |
---|
698 | if (!dpa_fq) |
---|
699 | goto fq_alloc_failed; |
---|
700 | |
---|
701 | if (ptype == RX) |
---|
702 | port_fqs->rx_errq = &dpa_fq[0]; |
---|
703 | else |
---|
704 | port_fqs->tx_errq = &dpa_fq[0]; |
---|
705 | break; |
---|
706 | case 1: |
---|
707 | /* the second queue is the default queue */ |
---|
708 | if (fqids[i].count != 1) |
---|
709 | goto invalid_default_queue; |
---|
710 | |
---|
711 | dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, |
---|
712 | ptype == RX ? |
---|
713 | FQ_TYPE_RX_DEFAULT : |
---|
714 | FQ_TYPE_TX_CONFIRM); |
---|
715 | if (!dpa_fq) |
---|
716 | goto fq_alloc_failed; |
---|
717 | |
---|
718 | if (ptype == RX) |
---|
719 | port_fqs->rx_defq = &dpa_fq[0]; |
---|
720 | else |
---|
721 | port_fqs->tx_defq = &dpa_fq[0]; |
---|
722 | break; |
---|
723 | default: |
---|
724 | /* all subsequent queues are Tx */ |
---|
725 | if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX)) |
---|
726 | goto fq_alloc_failed; |
---|
727 | break; |
---|
728 | } |
---|
729 | } |
---|
730 | |
---|
731 | return 0; |
---|
732 | |
---|
733 | fq_alloc_failed: |
---|
734 | dev_err(dev, "dpa_fq_alloc() failed\n"); |
---|
735 | return -ENOMEM; |
---|
736 | |
---|
737 | invalid_default_queue: |
---|
738 | invalid_error_queue: |
---|
739 | dev_err(dev, "Too many default or error queues\n"); |
---|
740 | return -EINVAL; |
---|
741 | } |
---|
742 | |
---|
743 | static u32 rx_pool_channel; |
---|
744 | static DEFINE_SPINLOCK(rx_pool_channel_init); |
---|
745 | |
---|
746 | int dpa_get_channel(void) |
---|
747 | { |
---|
748 | spin_lock(&rx_pool_channel_init); |
---|
749 | if (!rx_pool_channel) { |
---|
750 | u32 pool; |
---|
751 | int ret = qman_alloc_pool(&pool); |
---|
752 | |
---|
753 | if (!ret) |
---|
754 | rx_pool_channel = pool; |
---|
755 | } |
---|
756 | spin_unlock(&rx_pool_channel_init); |
---|
757 | if (!rx_pool_channel) |
---|
758 | return -ENOMEM; |
---|
759 | return rx_pool_channel; |
---|
760 | } |
---|
761 | |
---|
762 | void dpa_release_channel(void) |
---|
763 | { |
---|
764 | qman_release_pool(rx_pool_channel); |
---|
765 | } |
---|
766 | |
---|
767 | int dpaa_eth_add_channel(void *__arg) |
---|
768 | { |
---|
769 | #ifndef __rtems__ |
---|
770 | const cpumask_t *cpus = qman_affine_cpus(); |
---|
771 | #endif /* __rtems__ */ |
---|
772 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg); |
---|
773 | int cpu; |
---|
774 | struct qman_portal *portal; |
---|
775 | |
---|
776 | #ifndef __rtems__ |
---|
777 | for_each_cpu(cpu, cpus) { |
---|
778 | #else /* __rtems__ */ |
---|
779 | for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) { |
---|
780 | #endif /* __rtems__ */ |
---|
781 | |
---|
782 | portal = (struct qman_portal *)qman_get_affine_portal(cpu); |
---|
783 | qman_p_static_dequeue_add(portal, pool); |
---|
784 | } |
---|
785 | return 0; |
---|
786 | } |
---|
787 | |
---|
788 | /* Congestion group state change notification callback. |
---|
789 | * Stops the device's egress queues while they are congested and |
---|
790 | * wakes them upon exiting congested state. |
---|
791 | * Also updates some CGR-related stats. |
---|
792 | */ |
---|
793 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, |
---|
794 | int congested) |
---|
795 | { |
---|
796 | struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr, |
---|
797 | struct dpa_priv_s, cgr_data.cgr); |
---|
798 | |
---|
799 | if (congested) { |
---|
800 | priv->cgr_data.congestion_start_jiffies = jiffies; |
---|
801 | #ifndef __rtems__ |
---|
802 | netif_tx_stop_all_queues(priv->net_dev); |
---|
803 | #else /* __rtems__ */ |
---|
804 | BSD_ASSERT(0); |
---|
805 | #endif /* __rtems__ */ |
---|
806 | priv->cgr_data.cgr_congested_count++; |
---|
807 | } else { |
---|
808 | priv->cgr_data.congested_jiffies += |
---|
809 | (jiffies - priv->cgr_data.congestion_start_jiffies); |
---|
810 | #ifndef __rtems__ |
---|
811 | netif_tx_wake_all_queues(priv->net_dev); |
---|
812 | #else /* __rtems__ */ |
---|
813 | BSD_ASSERT(0); |
---|
814 | #endif /* __rtems__ */ |
---|
815 | } |
---|
816 | } |
---|
817 | |
---|
818 | int dpaa_eth_cgr_init(struct dpa_priv_s *priv) |
---|
819 | { |
---|
820 | struct qm_mcc_initcgr initcgr; |
---|
821 | u32 cs_th; |
---|
822 | int err; |
---|
823 | |
---|
824 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); |
---|
825 | if (err < 0) { |
---|
826 | pr_err("Error %d allocating CGR ID\n", err); |
---|
827 | goto out_error; |
---|
828 | } |
---|
829 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; |
---|
830 | |
---|
831 | /* Enable Congestion State Change Notifications and CS taildrop */ |
---|
832 | initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES; |
---|
833 | initcgr.cgr.cscn_en = QM_CGR_EN; |
---|
834 | |
---|
835 | /* Set different thresholds based on the MAC speed. |
---|
836 | * This may turn suboptimal if the MAC is reconfigured at a speed |
---|
837 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. |
---|
838 | * In such cases, we ought to reconfigure the threshold, too. |
---|
839 | */ |
---|
840 | #ifndef __rtems__ |
---|
841 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) |
---|
842 | cs_th = DPAA_CS_THRESHOLD_10G; |
---|
843 | else |
---|
844 | cs_th = DPAA_CS_THRESHOLD_1G; |
---|
845 | #else /* __rtems__ */ |
---|
846 | /* FIXME */ |
---|
847 | cs_th = DPAA_CS_THRESHOLD_1G; |
---|
848 | #endif /* __rtems__ */ |
---|
849 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); |
---|
850 | |
---|
851 | initcgr.we_mask |= QM_CGR_WE_CSTD_EN; |
---|
852 | initcgr.cgr.cstd_en = QM_CGR_EN; |
---|
853 | |
---|
854 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, |
---|
855 | &initcgr); |
---|
856 | if (err < 0) { |
---|
857 | pr_err("Error %d creating CGR with ID %d\n", err, |
---|
858 | priv->cgr_data.cgr.cgrid); |
---|
859 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); |
---|
860 | goto out_error; |
---|
861 | } |
---|
862 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", |
---|
863 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, |
---|
864 | priv->cgr_data.cgr.chan); |
---|
865 | |
---|
866 | out_error: |
---|
867 | return err; |
---|
868 | } |
---|
869 | |
---|
870 | static inline void dpa_setup_ingress(const struct dpa_priv_s *priv, |
---|
871 | struct dpa_fq *fq, |
---|
872 | const struct qman_fq *template) |
---|
873 | { |
---|
874 | fq->fq_base = *template; |
---|
875 | fq->net_dev = priv->net_dev; |
---|
876 | |
---|
877 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; |
---|
878 | fq->channel = priv->channel; |
---|
879 | } |
---|
880 | |
---|
881 | static inline void dpa_setup_egress(const struct dpa_priv_s *priv, |
---|
882 | struct dpa_fq *fq, |
---|
883 | struct fman_port *port, |
---|
884 | const struct qman_fq *template) |
---|
885 | { |
---|
886 | fq->fq_base = *template; |
---|
887 | fq->net_dev = priv->net_dev; |
---|
888 | |
---|
889 | if (port) { |
---|
890 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; |
---|
891 | fq->channel = (u16)fman_port_get_qman_channel_id(port); |
---|
892 | } else { |
---|
893 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; |
---|
894 | } |
---|
895 | } |
---|
896 | |
---|
897 | void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs, |
---|
898 | struct fman_port *tx_port) |
---|
899 | { |
---|
900 | struct dpa_fq *fq; |
---|
901 | #ifndef __rtems__ |
---|
902 | u16 portals[NR_CPUS]; |
---|
903 | int cpu, num_portals = 0; |
---|
904 | const cpumask_t *affine_cpus = qman_affine_cpus(); |
---|
905 | #endif /* __rtems__ */ |
---|
906 | int egress_cnt = 0, conf_cnt = 0; |
---|
907 | |
---|
908 | #ifndef __rtems__ |
---|
909 | for_each_cpu(cpu, affine_cpus) |
---|
910 | portals[num_portals++] = qman_affine_channel(cpu); |
---|
911 | if (num_portals == 0) |
---|
912 | dev_err(priv->net_dev->dev.parent, |
---|
913 | "No Qman software (affine) channels found"); |
---|
914 | #else /* __rtems__ */ |
---|
915 | /* FIXME */ |
---|
916 | #endif /* __rtems__ */ |
---|
917 | |
---|
918 | /* Initialize each FQ in the list */ |
---|
919 | list_for_each_entry(fq, &priv->dpa_fq_list, list) { |
---|
920 | switch (fq->fq_type) { |
---|
921 | case FQ_TYPE_RX_DEFAULT: |
---|
922 | DPA_ERR_ON(!priv->mac_dev); |
---|
923 | dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq); |
---|
924 | break; |
---|
925 | case FQ_TYPE_RX_ERROR: |
---|
926 | DPA_ERR_ON(!priv->mac_dev); |
---|
927 | dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq); |
---|
928 | break; |
---|
929 | case FQ_TYPE_TX: |
---|
930 | dpa_setup_egress(priv, fq, tx_port, |
---|
931 | &fq_cbs->egress_ern); |
---|
932 | /* If we have more Tx queues than the number of cores, |
---|
933 | * just ignore the extra ones. |
---|
934 | */ |
---|
935 | if (egress_cnt < DPAA_ETH_TX_QUEUES) |
---|
936 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; |
---|
937 | break; |
---|
938 | case FQ_TYPE_TX_CONFIRM: |
---|
939 | DPA_ERR_ON(!priv->mac_dev); |
---|
940 | dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); |
---|
941 | break; |
---|
942 | case FQ_TYPE_TX_CONF_MQ: |
---|
943 | DPA_ERR_ON(!priv->mac_dev); |
---|
944 | dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq); |
---|
945 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; |
---|
946 | break; |
---|
947 | case FQ_TYPE_TX_ERROR: |
---|
948 | DPA_ERR_ON(!priv->mac_dev); |
---|
949 | dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq); |
---|
950 | break; |
---|
951 | default: |
---|
952 | #ifndef __rtems__ |
---|
953 | dev_warn(priv->net_dev->dev.parent, |
---|
954 | "Unknown FQ type detected!\n"); |
---|
955 | #else /* __rtems__ */ |
---|
956 | BSD_ASSERT(0); |
---|
957 | #endif /* __rtems__ */ |
---|
958 | break; |
---|
959 | } |
---|
960 | } |
---|
961 | |
---|
962 | /* The number of Tx queues may be smaller than the number of cores, if |
---|
963 | * the Tx queue range is specified in the device tree instead of being |
---|
964 | * dynamically allocated. |
---|
965 | * Make sure all CPUs receive a corresponding Tx queue. |
---|
966 | */ |
---|
967 | while (egress_cnt < DPAA_ETH_TX_QUEUES) { |
---|
968 | list_for_each_entry(fq, &priv->dpa_fq_list, list) { |
---|
969 | if (fq->fq_type != FQ_TYPE_TX) |
---|
970 | continue; |
---|
971 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; |
---|
972 | if (egress_cnt == DPAA_ETH_TX_QUEUES) |
---|
973 | break; |
---|
974 | } |
---|
975 | } |
---|
976 | } |
---|
977 | |
---|
978 | int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable) |
---|
979 | { |
---|
980 | int err; |
---|
981 | const struct dpa_priv_s *priv; |
---|
982 | #ifndef __rtems__ |
---|
983 | struct device *dev; |
---|
984 | #endif /* __rtems__ */ |
---|
985 | struct qman_fq *fq; |
---|
986 | struct qm_mcc_initfq initfq; |
---|
987 | struct qman_fq *confq = NULL; |
---|
988 | int queue_id; |
---|
989 | |
---|
990 | priv = netdev_priv(dpa_fq->net_dev); |
---|
991 | #ifndef __rtems__ |
---|
992 | dev = dpa_fq->net_dev->dev.parent; |
---|
993 | #endif /* __rtems__ */ |
---|
994 | |
---|
995 | if (dpa_fq->fqid == 0) |
---|
996 | dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; |
---|
997 | |
---|
998 | dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); |
---|
999 | |
---|
1000 | err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); |
---|
1001 | if (err) { |
---|
1002 | #ifndef __rtems__ |
---|
1003 | dev_err(dev, "qman_create_fq() failed\n"); |
---|
1004 | #else /* __rtems__ */ |
---|
1005 | BSD_ASSERT(0); |
---|
1006 | #endif /* __rtems__ */ |
---|
1007 | return err; |
---|
1008 | } |
---|
1009 | fq = &dpa_fq->fq_base; |
---|
1010 | |
---|
1011 | if (dpa_fq->init) { |
---|
1012 | memset(&initfq, 0, sizeof(initfq)); |
---|
1013 | |
---|
1014 | initfq.we_mask = QM_INITFQ_WE_FQCTRL; |
---|
1015 | /* Note: we may get to keep an empty FQ in cache */ |
---|
1016 | initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; |
---|
1017 | |
---|
1018 | /* Try to reduce the number of portal interrupts for |
---|
1019 | * Tx Confirmation FQs. |
---|
1020 | */ |
---|
1021 | if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) |
---|
1022 | initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; |
---|
1023 | |
---|
1024 | /* FQ placement */ |
---|
1025 | initfq.we_mask |= QM_INITFQ_WE_DESTWQ; |
---|
1026 | |
---|
1027 | initfq.fqd.dest.channel = dpa_fq->channel; |
---|
1028 | initfq.fqd.dest.wq = dpa_fq->wq; |
---|
1029 | |
---|
1030 | /* Put all egress queues in a congestion group of their own. |
---|
1031 | * Sensu stricto, the Tx confirmation queues are Rx FQs, |
---|
1032 | * rather than Tx - but they nonetheless account for the |
---|
1033 | * memory footprint on behalf of egress traffic. We therefore |
---|
1034 | * place them in the netdev's CGR, along with the Tx FQs. |
---|
1035 | */ |
---|
1036 | if (dpa_fq->fq_type == FQ_TYPE_TX || |
---|
1037 | dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM || |
---|
1038 | dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { |
---|
1039 | initfq.we_mask |= QM_INITFQ_WE_CGID; |
---|
1040 | initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; |
---|
1041 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; |
---|
1042 | /* Set a fixed overhead accounting, in an attempt to |
---|
1043 | * reduce the impact of fixed-size skb shells and the |
---|
1044 | * driver's needed headroom on system memory. This is |
---|
1045 | * especially the case when the egress traffic is |
---|
1046 | * composed of small datagrams. |
---|
1047 | * Unfortunately, QMan's OAL value is capped to an |
---|
1048 | * insufficient value, but even that is better than |
---|
1049 | * no overhead accounting at all. |
---|
1050 | */ |
---|
1051 | initfq.we_mask |= QM_INITFQ_WE_OAC; |
---|
1052 | initfq.fqd.oac_init.oac = QM_OAC_CG; |
---|
1053 | #ifndef __rtems__ |
---|
1054 | initfq.fqd.oac_init.oal = |
---|
1055 | (signed char)(min(sizeof(struct sk_buff) + |
---|
1056 | priv->tx_headroom, |
---|
1057 | (size_t)FSL_QMAN_MAX_OAL)); |
---|
1058 | #else /* __rtems__ */ |
---|
1059 | /* FIXME */ |
---|
1060 | initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL; |
---|
1061 | #endif /* __rtems__ */ |
---|
1062 | } |
---|
1063 | |
---|
1064 | if (td_enable) { |
---|
1065 | initfq.we_mask |= QM_INITFQ_WE_TDTHRESH; |
---|
1066 | qm_fqd_taildrop_set(&initfq.fqd.td, |
---|
1067 | DPA_FQ_TD, 1); |
---|
1068 | initfq.fqd.fq_ctrl = QM_FQCTRL_TDE; |
---|
1069 | } |
---|
1070 | |
---|
1071 | /* Configure the Tx confirmation queue, now that we know |
---|
1072 | * which Tx queue it pairs with. |
---|
1073 | */ |
---|
1074 | if (dpa_fq->fq_type == FQ_TYPE_TX) { |
---|
1075 | queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base); |
---|
1076 | if (queue_id >= 0) |
---|
1077 | confq = priv->conf_fqs[queue_id]; |
---|
1078 | if (confq) { |
---|
1079 | initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; |
---|
1080 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) |
---|
1081 | * A2V=1 (contextA A2 field is valid) |
---|
1082 | * A0V=1 (contextA A0 field is valid) |
---|
1083 | * B0V=1 (contextB field is valid) |
---|
1084 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) |
---|
1085 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) |
---|
1086 | */ |
---|
1087 | initfq.fqd.context_a.hi = 0x1e000000; |
---|
1088 | initfq.fqd.context_a.lo = 0x80000000; |
---|
1089 | } |
---|
1090 | } |
---|
1091 | |
---|
1092 | /* Put all *private* ingress queues in our "ingress CGR". */ |
---|
1093 | if (priv->use_ingress_cgr && |
---|
1094 | (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT || |
---|
1095 | dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) { |
---|
1096 | initfq.we_mask |= QM_INITFQ_WE_CGID; |
---|
1097 | initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; |
---|
1098 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; |
---|
1099 | /* Set a fixed overhead accounting, just like for the |
---|
1100 | * egress CGR. |
---|
1101 | */ |
---|
1102 | initfq.we_mask |= QM_INITFQ_WE_OAC; |
---|
1103 | initfq.fqd.oac_init.oac = QM_OAC_CG; |
---|
1104 | #ifndef __rtems__ |
---|
1105 | initfq.fqd.oac_init.oal = |
---|
1106 | (signed char)(min(sizeof(struct sk_buff) + |
---|
1107 | priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL)); |
---|
1108 | #else /* __rtems__ */ |
---|
1109 | /* FIXME */ |
---|
1110 | initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL; |
---|
1111 | #endif /* __rtems__ */ |
---|
1112 | } |
---|
1113 | |
---|
1114 | /* Initialization common to all ingress queues */ |
---|
1115 | if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { |
---|
1116 | initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; |
---|
1117 | initfq.fqd.fq_ctrl |= |
---|
1118 | QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; |
---|
1119 | initfq.fqd.context_a.stashing.exclusive = |
---|
1120 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | |
---|
1121 | QM_STASHING_EXCL_ANNOTATION; |
---|
1122 | initfq.fqd.context_a.stashing.data_cl = 2; |
---|
1123 | initfq.fqd.context_a.stashing.annotation_cl = 1; |
---|
1124 | initfq.fqd.context_a.stashing.context_cl = |
---|
1125 | DIV_ROUND_UP(sizeof(struct qman_fq), 64); |
---|
1126 | } |
---|
1127 | |
---|
1128 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); |
---|
1129 | if (err < 0) { |
---|
1130 | #ifndef __rtems__ |
---|
1131 | dev_err(dev, "qman_init_fq(%u) = %d\n", |
---|
1132 | qman_fq_fqid(fq), err); |
---|
1133 | #endif /* __rtems__ */ |
---|
1134 | qman_destroy_fq(fq, 0); |
---|
1135 | return err; |
---|
1136 | } |
---|
1137 | } |
---|
1138 | |
---|
1139 | dpa_fq->fqid = qman_fq_fqid(fq); |
---|
1140 | |
---|
1141 | return 0; |
---|
1142 | } |
---|
1143 | |
---|
1144 | #ifndef __rtems__ |
---|
1145 | static int _dpa_fq_free(struct device *dev, struct qman_fq *fq) |
---|
1146 | { |
---|
1147 | int err, error; |
---|
1148 | struct dpa_fq *dpa_fq; |
---|
1149 | const struct dpa_priv_s *priv; |
---|
1150 | |
---|
1151 | err = 0; |
---|
1152 | |
---|
1153 | dpa_fq = container_of(fq, struct dpa_fq, fq_base); |
---|
1154 | priv = netdev_priv(dpa_fq->net_dev); |
---|
1155 | |
---|
1156 | if (dpa_fq->init) { |
---|
1157 | err = qman_retire_fq(fq, NULL); |
---|
1158 | if (err < 0 && netif_msg_drv(priv)) |
---|
1159 | dev_err(dev, "qman_retire_fq(%u) = %d\n", |
---|
1160 | qman_fq_fqid(fq), err); |
---|
1161 | |
---|
1162 | error = qman_oos_fq(fq); |
---|
1163 | if (error < 0 && netif_msg_drv(priv)) { |
---|
1164 | dev_err(dev, "qman_oos_fq(%u) = %d\n", |
---|
1165 | qman_fq_fqid(fq), error); |
---|
1166 | if (err >= 0) |
---|
1167 | err = error; |
---|
1168 | } |
---|
1169 | } |
---|
1170 | |
---|
1171 | qman_destroy_fq(fq, 0); |
---|
1172 | list_del(&dpa_fq->list); |
---|
1173 | |
---|
1174 | return err; |
---|
1175 | } |
---|
1176 | |
---|
1177 | int dpa_fq_free(struct device *dev, struct list_head *list) |
---|
1178 | { |
---|
1179 | int err, error; |
---|
1180 | struct dpa_fq *dpa_fq, *tmp; |
---|
1181 | |
---|
1182 | err = 0; |
---|
1183 | list_for_each_entry_safe(dpa_fq, tmp, list, list) { |
---|
1184 | error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); |
---|
1185 | if (error < 0 && err >= 0) |
---|
1186 | err = error; |
---|
1187 | } |
---|
1188 | |
---|
1189 | return err; |
---|
1190 | } |
---|
1191 | #endif /* __rtems__ */ |
---|
1192 | |
---|
1193 | static void |
---|
1194 | dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq, |
---|
1195 | struct dpa_fq *defq, |
---|
1196 | struct dpa_buffer_layout_s *buf_layout) |
---|
1197 | { |
---|
1198 | struct fman_port_params params; |
---|
1199 | struct fman_buffer_prefix_content buf_prefix_content; |
---|
1200 | int err; |
---|
1201 | |
---|
1202 | memset(¶ms, 0, sizeof(params)); |
---|
1203 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); |
---|
1204 | |
---|
1205 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; |
---|
1206 | buf_prefix_content.pass_prs_result = buf_layout->parse_results; |
---|
1207 | buf_prefix_content.pass_hash_result = buf_layout->hash_results; |
---|
1208 | buf_prefix_content.pass_time_stamp = buf_layout->time_stamp; |
---|
1209 | buf_prefix_content.data_align = buf_layout->data_align; |
---|
1210 | |
---|
1211 | params.specific_params.non_rx_params.err_fqid = errq->fqid; |
---|
1212 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; |
---|
1213 | |
---|
1214 | err = fman_port_config(port, ¶ms); |
---|
1215 | if (err) |
---|
1216 | pr_info("fman_port_config failed\n"); |
---|
1217 | |
---|
1218 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); |
---|
1219 | if (err) |
---|
1220 | pr_info("fman_port_cfg_buf_prefix_content failed\n"); |
---|
1221 | |
---|
1222 | err = fman_port_init(port); |
---|
1223 | if (err) |
---|
1224 | pr_err("fm_port_init failed\n"); |
---|
1225 | } |
---|
1226 | |
---|
1227 | static void |
---|
1228 | dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp, |
---|
1229 | size_t count, struct dpa_fq *errq, struct dpa_fq *defq, |
---|
1230 | struct dpa_buffer_layout_s *buf_layout) |
---|
1231 | { |
---|
1232 | struct fman_port_params params; |
---|
1233 | struct fman_buffer_prefix_content buf_prefix_content; |
---|
1234 | struct fman_port_rx_params *rx_p; |
---|
1235 | int i, err; |
---|
1236 | |
---|
1237 | memset(¶ms, 0, sizeof(params)); |
---|
1238 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); |
---|
1239 | |
---|
1240 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; |
---|
1241 | buf_prefix_content.pass_prs_result = buf_layout->parse_results; |
---|
1242 | buf_prefix_content.pass_hash_result = buf_layout->hash_results; |
---|
1243 | buf_prefix_content.pass_time_stamp = buf_layout->time_stamp; |
---|
1244 | buf_prefix_content.data_align = buf_layout->data_align; |
---|
1245 | |
---|
1246 | rx_p = ¶ms.specific_params.rx_params; |
---|
1247 | rx_p->err_fqid = errq->fqid; |
---|
1248 | rx_p->dflt_fqid = defq->fqid; |
---|
1249 | |
---|
1250 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); |
---|
1251 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; |
---|
1252 | for (i = 0; i < count; i++) { |
---|
1253 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bp[i].bpid; |
---|
1254 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size; |
---|
1255 | } |
---|
1256 | |
---|
1257 | err = fman_port_config(port, ¶ms); |
---|
1258 | if (err) |
---|
1259 | pr_info("fman_port_config failed\n"); |
---|
1260 | |
---|
1261 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); |
---|
1262 | if (err) |
---|
1263 | pr_info("fman_port_cfg_buf_prefix_content failed\n"); |
---|
1264 | |
---|
1265 | err = fman_port_init(port); |
---|
1266 | if (err) |
---|
1267 | pr_err("fm_port_init failed\n"); |
---|
1268 | } |
---|
1269 | |
---|
1270 | void dpaa_eth_init_ports(struct mac_device *mac_dev, |
---|
1271 | struct dpa_bp *bp, size_t count, |
---|
1272 | struct fm_port_fqs *port_fqs, |
---|
1273 | struct dpa_buffer_layout_s *buf_layout, |
---|
1274 | struct device *dev) |
---|
1275 | { |
---|
1276 | struct fman_port *rxport = mac_dev->port[RX]; |
---|
1277 | struct fman_port *txport = mac_dev->port[TX]; |
---|
1278 | |
---|
1279 | dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, |
---|
1280 | port_fqs->tx_defq, &buf_layout[TX]); |
---|
1281 | dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq, |
---|
1282 | port_fqs->rx_defq, &buf_layout[RX]); |
---|
1283 | } |
---|
1284 | |
---|
1285 | void dpa_release_sgt(struct qm_sg_entry *sgt) |
---|
1286 | { |
---|
1287 | struct dpa_bp *dpa_bp; |
---|
1288 | struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX]; |
---|
1289 | u8 i = 0, j; |
---|
1290 | |
---|
1291 | memset(bmb, 0, sizeof(bmb)); |
---|
1292 | |
---|
1293 | do { |
---|
1294 | dpa_bp = dpa_bpid2pool(sgt[i].bpid); |
---|
1295 | DPA_ERR_ON(!dpa_bp); |
---|
1296 | |
---|
1297 | j = 0; |
---|
1298 | do { |
---|
1299 | DPA_ERR_ON(sgt[i].extension); |
---|
1300 | |
---|
1301 | bmb[j].hi = sgt[i].addr_hi; |
---|
1302 | bmb[j].lo = be32_to_cpu(sgt[i].addr_lo); |
---|
1303 | |
---|
1304 | j++; i++; |
---|
1305 | } while (j < ARRAY_SIZE(bmb) && |
---|
1306 | !sgt[i - 1].final && |
---|
1307 | sgt[i - 1].bpid == sgt[i].bpid); |
---|
1308 | |
---|
1309 | while (bman_release(dpa_bp->pool, bmb, j, 0)) |
---|
1310 | cpu_relax(); |
---|
1311 | } while (!sgt[i - 1].final); |
---|
1312 | } |
---|
1313 | |
---|
1314 | void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd) |
---|
1315 | { |
---|
1316 | struct qm_sg_entry *sgt; |
---|
1317 | struct dpa_bp *dpa_bp; |
---|
1318 | struct bm_buffer bmb; |
---|
1319 | dma_addr_t addr; |
---|
1320 | void *vaddr; |
---|
1321 | |
---|
1322 | memset(&bmb, 0, sizeof(bmb)); |
---|
1323 | bm_buffer_set64(&bmb, fd->addr); |
---|
1324 | |
---|
1325 | dpa_bp = dpa_bpid2pool(fd->bpid); |
---|
1326 | DPA_ERR_ON(!dpa_bp); |
---|
1327 | |
---|
1328 | if (fd->format == qm_fd_sg) { |
---|
1329 | vaddr = phys_to_virt(fd->addr); |
---|
1330 | sgt = vaddr + dpa_fd_offset(fd); |
---|
1331 | |
---|
1332 | #ifndef __rtems__ |
---|
1333 | dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size, |
---|
1334 | DMA_BIDIRECTIONAL); |
---|
1335 | #endif /* __rtems__ */ |
---|
1336 | |
---|
1337 | dpa_release_sgt(sgt); |
---|
1338 | |
---|
1339 | #ifndef __rtems__ |
---|
1340 | addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size, |
---|
1341 | DMA_BIDIRECTIONAL); |
---|
1342 | if (dma_mapping_error(dpa_bp->dev, addr)) { |
---|
1343 | dev_err(dpa_bp->dev, "DMA mapping failed"); |
---|
1344 | return; |
---|
1345 | } |
---|
1346 | #else /* __rtems__ */ |
---|
1347 | addr = (dma_addr_t)vaddr; |
---|
1348 | #endif /* __rtems__ */ |
---|
1349 | bm_buffer_set64(&bmb, addr); |
---|
1350 | } |
---|
1351 | |
---|
1352 | while (bman_release(dpa_bp->pool, &bmb, 1, 0)) |
---|
1353 | cpu_relax(); |
---|
1354 | } |
---|
1355 | |
---|
1356 | void count_ern(struct dpa_percpu_priv_s *percpu_priv, |
---|
1357 | const struct qm_mr_entry *msg) |
---|
1358 | { |
---|
1359 | switch (msg->ern.rc & QM_MR_RC_MASK) { |
---|
1360 | case QM_MR_RC_CGR_TAILDROP: |
---|
1361 | percpu_priv->ern_cnt.cg_tdrop++; |
---|
1362 | break; |
---|
1363 | case QM_MR_RC_WRED: |
---|
1364 | percpu_priv->ern_cnt.wred++; |
---|
1365 | break; |
---|
1366 | case QM_MR_RC_ERROR: |
---|
1367 | percpu_priv->ern_cnt.err_cond++; |
---|
1368 | break; |
---|
1369 | case QM_MR_RC_ORPWINDOW_EARLY: |
---|
1370 | percpu_priv->ern_cnt.early_window++; |
---|
1371 | break; |
---|
1372 | case QM_MR_RC_ORPWINDOW_LATE: |
---|
1373 | percpu_priv->ern_cnt.late_window++; |
---|
1374 | break; |
---|
1375 | case QM_MR_RC_FQ_TAILDROP: |
---|
1376 | percpu_priv->ern_cnt.fq_tdrop++; |
---|
1377 | break; |
---|
1378 | case QM_MR_RC_ORPWINDOW_RETIRED: |
---|
1379 | percpu_priv->ern_cnt.fq_retired++; |
---|
1380 | break; |
---|
1381 | case QM_MR_RC_ORP_ZERO: |
---|
1382 | percpu_priv->ern_cnt.orp_zero++; |
---|
1383 | break; |
---|
1384 | } |
---|
1385 | } |
---|
1386 | |
---|
1387 | #ifndef __rtems__ |
---|
1388 | /* Turn on HW checksum computation for this outgoing frame. |
---|
1389 | * If the current protocol is not something we support in this regard |
---|
1390 | * (or if the stack has already computed the SW checksum), we do nothing. |
---|
1391 | * |
---|
1392 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value |
---|
1393 | * otherwise. |
---|
1394 | * |
---|
1395 | * Note that this function may modify the fd->cmd field and the skb data buffer |
---|
1396 | * (the Parse Results area). |
---|
1397 | */ |
---|
1398 | int dpa_enable_tx_csum(struct dpa_priv_s *priv, |
---|
1399 | struct sk_buff *skb, |
---|
1400 | struct qm_fd *fd, |
---|
1401 | char *parse_results) |
---|
1402 | { |
---|
1403 | struct fman_prs_result *parse_result; |
---|
1404 | struct iphdr *iph; |
---|
1405 | struct ipv6hdr *ipv6h = NULL; |
---|
1406 | u8 l4_proto; |
---|
1407 | u16 ethertype = ntohs(skb->protocol); |
---|
1408 | int retval = 0; |
---|
1409 | |
---|
1410 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
---|
1411 | return 0; |
---|
1412 | |
---|
1413 | /* Note: L3 csum seems to be already computed in sw, but we can't choose |
---|
1414 | * L4 alone from the FM configuration anyway. |
---|
1415 | */ |
---|
1416 | |
---|
1417 | /* Fill in some fields of the Parse Results array, so the FMan |
---|
1418 | * can find them as if they came from the FMan Parser. |
---|
1419 | */ |
---|
1420 | parse_result = (struct fman_prs_result *)parse_results; |
---|
1421 | |
---|
1422 | /* If we're dealing with VLAN, get the real Ethernet type */ |
---|
1423 | if (ethertype == ETH_P_8021Q) { |
---|
1424 | /* We can't always assume the MAC header is set correctly |
---|
1425 | * by the stack, so reset to beginning of skb->data |
---|
1426 | */ |
---|
1427 | skb_reset_mac_header(skb); |
---|
1428 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); |
---|
1429 | } |
---|
1430 | |
---|
1431 | /* Fill in the relevant L3 parse result fields |
---|
1432 | * and read the L4 protocol type |
---|
1433 | */ |
---|
1434 | switch (ethertype) { |
---|
1435 | case ETH_P_IP: |
---|
1436 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); |
---|
1437 | iph = ip_hdr(skb); |
---|
1438 | DPA_ERR_ON(!iph); |
---|
1439 | l4_proto = iph->protocol; |
---|
1440 | break; |
---|
1441 | case ETH_P_IPV6: |
---|
1442 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); |
---|
1443 | ipv6h = ipv6_hdr(skb); |
---|
1444 | DPA_ERR_ON(!ipv6h); |
---|
1445 | l4_proto = ipv6h->nexthdr; |
---|
1446 | break; |
---|
1447 | default: |
---|
1448 | /* We shouldn't even be here */ |
---|
1449 | if (net_ratelimit()) |
---|
1450 | netif_alert(priv, tx_err, priv->net_dev, |
---|
1451 | "Can't compute HW csum for L3 proto 0x%x\n", |
---|
1452 | ntohs(skb->protocol)); |
---|
1453 | retval = -EIO; |
---|
1454 | goto return_error; |
---|
1455 | } |
---|
1456 | |
---|
1457 | /* Fill in the relevant L4 parse result fields */ |
---|
1458 | switch (l4_proto) { |
---|
1459 | case IPPROTO_UDP: |
---|
1460 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; |
---|
1461 | break; |
---|
1462 | case IPPROTO_TCP: |
---|
1463 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; |
---|
1464 | break; |
---|
1465 | default: |
---|
1466 | /* This can as well be a BUG() */ |
---|
1467 | if (net_ratelimit()) |
---|
1468 | netif_alert(priv, tx_err, priv->net_dev, |
---|
1469 | "Can't compute HW csum for L4 proto 0x%x\n", |
---|
1470 | l4_proto); |
---|
1471 | retval = -EIO; |
---|
1472 | goto return_error; |
---|
1473 | } |
---|
1474 | |
---|
1475 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ |
---|
1476 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); |
---|
1477 | parse_result->l4_off = (u8)skb_transport_offset(skb); |
---|
1478 | |
---|
1479 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ |
---|
1480 | fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; |
---|
1481 | |
---|
1482 | /* On P1023 and similar platforms fd->cmd interpretation could |
---|
1483 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit |
---|
1484 | * is not set so we do not need to check; in the future, if/when |
---|
1485 | * using context_a we need to check this bit |
---|
1486 | */ |
---|
1487 | |
---|
1488 | return_error: |
---|
1489 | return retval; |
---|
1490 | } |
---|
1491 | #endif /* __rtems__ */ |
---|