source: rtems/c/src/libchip/network/dwmac-desc-enh.c @ 8dab980

4.115
Last change on this file since 8dab980 was ddf8d12, checked in by Sebastian Huber <sebastian.huber@…>, on 09/05/14 at 07:12:34

libchip/dwmac: Use RTEMS_DEVOLATILE()

  • Property mode set to 100644
File size: 33.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief DWMAC 10/100/1000 Enhanced DMA Descriptor Handling.
5 *
6 * DWMAC 10/100/1000 on-chip Ethernet controllers.
7 * Functions and data for the handling of enhanced DMA descriptors.
8 */
9
10/*
11 * Copyright (c) 2013 embedded brains GmbH.  All rights reserved.
12 *
13 *  embedded brains GmbH
14 *  Dornierstr. 4
15 *  82178 Puchheim
16 *  Germany
17 *  <rtems@embedded-brains.de>
18 *
19 * The license and distribution terms for this file may be
20 * found in the file LICENSE in this distribution or at
21 * http://www.rtems.org/license/LICENSE.
22 */
23
24#include <assert.h>
25#include <stdlib.h>
26#include <stdio.h>
27#include "dwmac-common.h"
28#include "dwmac-desc-com.h"
29#include "dwmac-core.h"
30#include <sys/queue.h>
31
32#undef DWMAC_DESC_ENH_DEBUG
33#ifdef DWMAC_DESC_ENH_DEBUG
34#define DWMAC_DESC_ENH_PRINT_DBG( fmt, args ... )  printk( fmt, ## args )
35#else
36#define DWMAC_DESC_ENH_PRINT_DBG( fmt, args ... )  do { } while ( 0 )
37#endif
38
39typedef enum {
40  DWMAC_IP_PAYLOAD_TYPE_UNKNOWN,
41  DWMAC_IP_PAYLOAD_TYPE_UDP,
42  DWMAC_IP_PAYLOAD_TYPE_TCP,
43  DWMAC_IP_PAYLOAD_TYPE_ICMP
44} dwmac_ip_payload_type;
45
46static void dwmac_desc_enh_rx_set_on_ring_chain(
47  volatile dwmac_desc_ext *p, int end )
48{
49  /* For simplicity reasons we will not use the second buffer.
50   * If we would use it we would have to set the size to MCLBYTES -1 */
51  p->erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_2_SIZE_SET(
52    p->erx.des0_3.des1, 0
53    );
54  p->erx.des0_3.des3 = (uint32_t) NULL;
55
56  if ( end )
57    p->erx.des0_3.des1 |= DWMAC_DESC_ERX_DES1_RECEIVE_END_OF_RING;
58}
59
60static void dwmac_desc_enh_tx_set_on_ring_chain(
61  volatile dwmac_desc_ext *p, const bool end )
62{
63  if ( end )
64    p->etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING;
65}
66
67static void dwmac_desc_enh_set_tx_desc_len(
68  volatile dwmac_desc_ext *p_enh, size_t len )
69{
70  p_enh->etx.des0_3.des1 = DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_1_SIZE_SET(
71    p_enh->etx.des0_3.des1,
72    len
73    );
74}
75
76static bool dwmac_desc_enh_is_giant_frame( const uint32_t des0 )
77{
78  return (
79    ( des0
80      & DWMAC_DESC_ERX_DES0_TIMESTAMP_AVAIL_OR_CHECKSUM_ERROR_OR_GIANT_FRAME
81    ) != 0
82    );
83}
84
85static bool dwmac_desc_enh_is_udp_payload( const uint32_t des4 )
86{
87  return (
88    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
89    == DWMAC_IP_PAYLOAD_TYPE_UDP
90    );
91}
92
93static bool dwmac_desc_enh_is_tcp_payload( const uint32_t des4 )
94{
95  return (
96    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
97    == DWMAC_IP_PAYLOAD_TYPE_TCP
98    );
99}
100
101static bool dwmac_desc_enh_is_icmp_payload( const uint32_t des4 )
102{
103  return (
104    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
105    == DWMAC_IP_PAYLOAD_TYPE_ICMP
106    );
107}
108
109static dwmac_common_rx_frame_status dwmac_desc_enh_coe_status(
110  volatile dwmac_desc_ext *p_enh )
111{
112  dwmac_common_rx_frame_status ret  = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
113  const uint32_t               DES0 = p_enh->erx.des0_3.des0;
114  const uint32_t               DES4 = p_enh->erx.des4;
115
116
117  if ( ( DES0 & DWMAC_DESC_ERX_DES0_EXT_STATUS_AVAIL_OR_RX_MAC_ADDR_STATUS )
118       != 0 ) {
119    if ( !dwmac_desc_enh_is_giant_frame( DES0 )
120         && ( DES0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE ) == 0
121         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) == 0
122         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED ) == 0
123         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) == 0
124         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) == 0 ) {
125      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IEEE 802.3 Type frame.\n" );
126      ret = DWMAC_COMMON_RX_FRAME_STATUS_LLC_SNAP;
127    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
128                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
129                  != 0 )
130                && dwmac_desc_enh_is_giant_frame( DES0 ) ) {
131      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 No CSUM Error.\n" );
132      ret = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
133    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
134                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
135                  != 0 )
136                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) != 0 ) {
137      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Payload Error.\n" );
138      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
139    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
140                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
141                  != 0 )
142                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) != 0 ) {
143      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Header Error.\n" );
144      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
145    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
146                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
147                  != 0 )
148                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) != 0
149                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) != 0 ) {
150      DWMAC_DESC_ENH_PRINT_DBG(
151        "RX Des0 status: IPv4/6 Header and Payload Error.\n" );
152      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
153    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
154                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
155                  != 0 )
156                && ( !dwmac_desc_enh_is_udp_payload( DES4 ) )
157                && ( !dwmac_desc_enh_is_tcp_payload( DES4 ) )
158                && ( !dwmac_desc_enh_is_icmp_payload( DES4 ) ) ) {
159      DWMAC_DESC_ENH_PRINT_DBG(
160        "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n" );
161      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
162    } else if ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) == 0
163                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
164                == 0 ) {
165      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: No IPv4, IPv6 frame.\n" );
166      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
167    }
168  } else {
169    uint32_t status = (
170      (uint32_t) ( dwmac_desc_enh_is_giant_frame( DES0 ) << 2U )
171      | (uint32_t) ( ( ( DES0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE )
172                       != 0 ) << 1U ) )
173                      & 0x7U;
174
175    /* bits 5 7 0 | Frame status
176     * ----------------------------------------------------------
177     *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
178     *      1 0 0 | IPv4/6 No CSUM errorS.
179     *      1 0 1 | IPv4/6 CSUM PAYLOAD error
180     *      1 1 0 | IPv4/6 CSUM IP HR error
181     *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
182     *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
183     *      0 1 1 | COE bypassed.. no IPv4/6 frame
184     *      0 1 0 | Reserved.
185     */
186    if ( status == 0x0 ) {
187      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IEEE 802.3 Type frame.\n" );
188      ret = DWMAC_COMMON_RX_FRAME_STATUS_LLC_SNAP;
189    } else if ( status == 0x4 ) {
190      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 No CSUM errorS.\n" );
191      ret = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
192    } else if ( status == 0x5 ) {
193      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Payload Error.\n" );
194      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
195    } else if ( status == 0x6 ) {
196      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Header Error.\n" );
197      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
198    } else if ( status == 0x7 ) {
199      DWMAC_DESC_ENH_PRINT_DBG(
200        "RX Des0 status: IPv4/6 Header and Payload Error.\n" );
201      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
202    } else if ( status == 0x1 ) {
203      DWMAC_DESC_ENH_PRINT_DBG(
204        "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n" );
205      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
206    } else if ( status == 0x3 ) {
207      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: No IPv4, IPv6 frame.\n" );
208      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
209    }
210  }
211
212  return ret;
213}
214
215static int dwmac_desc_enh_get_tx_status(
216  dwmac_common_context *self,
217  const unsigned int    idx_tx )
218{
219  int                                 ret    = 0;
220  volatile dwmac_desc_ext            *dma_tx =
221    (volatile dwmac_desc_ext *) self->dma_tx;
222  volatile dwmac_desc_ext            *p_desc = &dma_tx[idx_tx];
223  dwmac_common_desc_status_counts_tx *counts =
224    &self->stats.desc_status_counts_tx;
225
226
227  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_ERROR_SUMMARY ) != 0 ) {
228    DWMAC_DESC_ENH_PRINT_DBG( "DWMAC TX error... 0x%08x\n", p->des01.etx );
229
230    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_JABBER_TIMEOUT )
231         != 0 ) {
232      DWMAC_DESC_ENH_PRINT_DBG( "\tjabber_timeout error\n" );
233      ++counts->jabber;
234    }
235
236    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_FRAME_FLUSHED )
237         != 0 ) {
238      DWMAC_DESC_ENH_PRINT_DBG( "\tframe_flushed error\n" );
239      ++counts->frame_flushed;
240      dwmac_core_dma_flush_tx_fifo( self );
241    }
242
243    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_LOSS_OF_CARRIER )
244         != 0 ) {
245      DWMAC_DESC_ENH_PRINT_DBG( "\tloss_carrier error\n" );
246      ++counts->losscarrier;
247    }
248
249    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_NO_CARRIER )
250         != 0 ) {
251      DWMAC_DESC_ENH_PRINT_DBG( "\tno_carrier error\n" );
252      ++counts->no_carrier;
253    }
254
255    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION )
256         != 0 ) {
257      DWMAC_DESC_ENH_PRINT_DBG( "\texcessive_collisions\n" );
258      ++counts->excessive_collisions;
259    }
260
261    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_EXCESSIVE_DEFERAL )
262         != 0 ) {
263      DWMAC_DESC_ENH_PRINT_DBG( "\texcessive tx_deferral\n" );
264      ++counts->excessive_deferral;
265    }
266
267    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_UNDERFLOW_ERROR )
268         != 0 ) {
269      DWMAC_DESC_ENH_PRINT_DBG( "\tunderflow error\n" );
270      dwmac_core_dma_flush_tx_fifo( self );
271      ++counts->underflow;
272    }
273
274    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_HEADER_ERROR )
275         != 0 ) {
276      DWMAC_DESC_ENH_PRINT_DBG( "\tTX IP header csum error\n" );
277      ++counts->ip_header_error;
278    }
279
280    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_PAYLOAD_ERROR )
281         != 0 ) {
282      DWMAC_DESC_ENH_PRINT_DBG( "\tAddr/Payload csum error\n" );
283      ++counts->payload_error;
284      dwmac_core_dma_flush_tx_fifo( self );
285    }
286
287    ret = -1;
288  }
289
290  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DEFERRED_BIT ) != 0 ) {
291    DWMAC_DESC_ENH_PRINT_DBG( "GMAC TX status: tx deferred\n" );
292    ++counts->deferred;
293  }
294
295  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_VLAN_FRAME ) != 0 ) {
296    DWMAC_DESC_ENH_PRINT_DBG( "GMAC TX status: VLAN frame\n" );
297    ++counts->vlan;
298  }
299
300  return ret;
301}
302
303static dwmac_common_rx_frame_status dwmac_desc_enh_get_rx_status(
304  dwmac_common_context *self,
305  const unsigned int    desc_idx )
306{
307  dwmac_common_desc_status_counts_rx *counts =
308    &self->stats.desc_status_counts_rx;
309  dwmac_common_rx_frame_status        ret    =
310    DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
311  volatile dwmac_desc_ext            *dma_rx =
312    (volatile dwmac_desc_ext *) self->dma_rx;
313  const uint32_t                      DES0   = dma_rx[desc_idx].erx.des0_3.des0;
314
315
316  if ( ( DES0 & DWMAC_DESC_ERX_DES0_ERROR_SUMMARY ) != 0 ) {
317    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX Error Summary 0x%08x\n",
318                              DES0 );
319
320    if ( ( DES0 & DWMAC_DESC_ERX_DES0_DESCRIPTOR_ERROR ) != 0 ) {
321      DWMAC_DESC_ENH_PRINT_DBG( "\tdescriptor error\n" );
322      ++counts->descriptor_error;
323    }
324
325    if ( ( DES0 & DWMAC_DESC_ERX_DES0_OVERFLOW_ERROR ) != 0 ) {
326      DWMAC_DESC_ENH_PRINT_DBG( "\toverflow error\n" );
327      ++counts->overflow_error;
328    }
329
330    if ( dwmac_desc_enh_is_giant_frame( DES0 ) ) {
331      DWMAC_DESC_ENH_PRINT_DBG( "\tIPC Csum Error/Giant frame\n" );
332      ++counts->giant_frame;
333    }
334
335    if ( ( DES0 & DWMAC_DESC_ERX_DES0_LATE_COLLISION ) != 0 ) {
336      DWMAC_DESC_ENH_PRINT_DBG( "\tlate_collision error\n" );
337      ++counts->late_collision;
338    }
339
340    if ( ( DES0 & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT )
341         != 0 ) {
342      DWMAC_DESC_ENH_PRINT_DBG( "\treceive_watchdog error\n" );
343      ++counts->watchdog_timeout;
344    }
345
346    if ( ( DES0 & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
347      DWMAC_DESC_ENH_PRINT_DBG( "\tReceive Error\n" );
348      ++counts->receive_error;
349    }
350
351    if ( ( DES0 & DWMAC_DESC_ERX_DES0_CRC_ERROR ) != 0 ) {
352      DWMAC_DESC_ENH_PRINT_DBG( "\tCRC error\n" );
353      ++counts->crc_error;
354    }
355
356    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
357  }
358
359  if ( ret == DWMAC_COMMON_RX_FRAME_STATUS_GOOD ) {
360    /* After a payload csum error, the ES bit is set.
361     * It doesn't match with the information reported into the databook.
362     * At any rate, we need to understand if the CSUM hw computation is ok
363     * and report this info to the upper layers. */
364    ret = dwmac_desc_enh_coe_status( &dma_rx[desc_idx] );
365  }
366
367  if ( ( DES0 & DWMAC_DESC_ERX_DES0_DRIBBLE_BIT_ERROR ) != 0 ) {
368    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: dribbling error\n" );
369    ++counts->dribble_bit_error;
370  }
371
372  if ( ( DES0 & DWMAC_DESC_ERX_DES0_SRC_ADDR_FILTER_FAIL ) != 0 ) {
373    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX : Source Address filter fail\n" );
374    ++counts->source_addr_fail;
375    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
376  }
377
378  if ( ( DES0 & DWMAC_DESC_ERX_DES0_DEST_ADDR_FILTER_FAIL ) != 0 ) {
379    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX : Dest Address filter fail\n" );
380    ++counts->dest_addr_fail;
381    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
382  }
383
384  if ( ( DES0 & DWMAC_DESC_ERX_DES0_LENGTH_ERROR ) != 0 ) {
385    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: length_error error\n" );
386    ++counts->length_error;
387    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
388  }
389
390  if ( ( DES0 & DWMAC_DESC_ERX_DES0_VLAN_TAG ) != 0 ) {
391    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: VLAN frame tagged\n" );
392    ++counts->vlan_tag;
393  }
394
395  return ret;
396}
397
398static void dwmac_desc_enh_print_tx_desc(
399  volatile dwmac_desc *p,
400  const unsigned int   count )
401{
402  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) p;
403  unsigned int             index;
404
405
406  if ( p_enh != NULL ) {
407    for ( index = 0; index < count; ++index ) {
408      printf( "Transmit DMA Descriptor %d\n", index );
409      printf( "des0\n" );
410      printf(
411        " %u own bit\n"
412        " %u IRQ on Completion\n"
413        " %u Last Segment\n"
414        " %u First Segment\n"
415        " %u Disable CRC\n"
416        " %u Disable Pad\n"
417        " %u Transmit Timestamp Enable\n"
418        " %lu Checksum Insertion Control\n"
419        " %u Transmit End of Ring\n"
420        " %u Second Address Chained\n"
421        " %u Transmit Timestamp Status\n"
422        " %u IP Header Error\n"
423        " %u VLAN Frame\n"
424        " %lu Collision Count\n"
425        " %u Deferred Bit\n",
426        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_OWN_BIT ) != 0,
427        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IRQ_ON_COMPLETION ) != 0,
428        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_LAST_SEGMENT ) != 0,
429        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_FIRST_SEGMENT ) != 0,
430        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DISABLE_CRC ) != 0,
431        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DISABLE_PAD ) != 0,
432        ( p_enh[index].etx.des0_3.des0
433          & DWMAC_DESC_ETX_DES0_TRANSMIT_TIMESTAMP_ENABLE ) != 0,
434        DWMAC_DESC_ETX_DES0_CHECKSUM_INSERTION_CONTROL_GET( p_enh[index].etx.
435                                                            des0_3.des0 ),
436        ( p_enh[index].etx.des0_3.des0
437          & DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING ) != 0,
438        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_SECOND_ADDR_CHAINED ) != 0,
439        ( p_enh[index].etx.des0_3.des0
440          & DWMAC_DESC_ETX_DES0_TRANSMIT_TIMESTAMP_STATUS ) != 0,
441        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_HEADER_ERROR ) != 0,
442        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_VLAN_FRAME ) != 0,
443        DWMAC_DESC_ETX_DES0_COLLISION_COUNT_GET( p_enh[index].etx.des0_3.des0 ),
444        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DEFERRED_BIT ) != 0
445        );
446
447      if ( ( p_enh[index].etx.des0_3.des0
448             & DWMAC_DESC_ETX_DES0_ERROR_SUMMARY ) != 0 ) {
449        printf( " Error Summary:\n" );
450
451        if ( p_enh[index].etx.des0_3.des0
452             & DWMAC_DESC_ETX_DES0_JABBER_TIMEOUT ) {
453          printf( "  Jabber Timeout\n" );
454        }
455
456        if ( ( p_enh[index].etx.des0_3.des0
457               & DWMAC_DESC_ETX_DES0_FRAME_FLUSHED ) != 0 ) {
458          printf( "  Frame Flush\n" );
459        }
460
461        if ( ( p_enh[index].etx.des0_3.des0
462               & DWMAC_DESC_ETX_DES0_IP_PAYLOAD_ERROR ) != 0 ) {
463          printf( "  Payload Error\n" );
464        }
465
466        if ( ( p_enh[index].etx.des0_3.des0
467               & DWMAC_DESC_ETX_DES0_LOSS_OF_CARRIER ) != 0 ) {
468          printf( "  Loss of Carrier\n" );
469        }
470
471        if ( ( p_enh[index].etx.des0_3.des0
472               & DWMAC_DESC_ETX_DES0_NO_CARRIER ) != 0 ) {
473          printf( "  No Carrier\n" );
474        }
475
476        if ( ( p_enh[index].etx.des0_3.des0
477               & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION ) != 0 ) {
478          printf( "  Excessive Collision\n" );
479        }
480
481        if ( ( p_enh[index].etx.des0_3.des0
482               & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION ) != 0 ) {
483          printf( "  Ecessive Deferral\n" );
484        }
485
486        if ( ( p_enh[index].etx.des0_3.des0
487               & DWMAC_DESC_ETX_DES0_UNDERFLOW_ERROR ) != 0 ) {
488          printf( "  Undeflow Error\n" );
489        }
490      }
491
492      printf( "des1\n" );
493      printf(
494        " %lu Transmit Buffer 2 Size\n"
495        " %lu Transmit Buffer 1 Size\n",
496        DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_2_SIZE_GET( p_enh[index].etx.des0_3.
497                                                        des1 ),
498        DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_1_SIZE_GET( p_enh[index].etx.des0_3.
499                                                        des1 )
500        );
501      printf( "des2\n" );
502      printf( " %p Buffer 1 Address\n", (void *) p_enh[index].etx.des0_3.des2 );
503      printf( "des3\n" );
504      printf( " %p Buffer 2 Address\n", (void *) p_enh[index].etx.des0_3.des3 );
505    }
506  }
507}
508
509static void dwmac_desc_enh_print_rx_desc(
510  volatile dwmac_desc *p,
511  const unsigned int   count )
512{
513  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) p;
514  unsigned int             index;
515
516
517  if ( p_enh != NULL ) {
518    for ( index = 0; index < count; ++index ) {
519      printf( "Receive DMA Descriptor %d\n", index );
520      printf( "des0\n" );
521      printf(
522        " %u Own Bit\n"
523        " %u Dest. Addr. Filter Fail\n"
524        " %lu Frame Length\n"
525        " %u Source Addr. Filter Fail\n"
526        " %u Length Error\n"
527        " %u VLAN Tag\n"
528        " %u First Descriptor\n"
529        " %u Last Descriptor\n"
530        " %u Frame Type\n"
531        " %u Dribble Bit Error\n"
532        " %u Extended Status Available\n",
533        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_OWN_BIT ) != 0,
534        ( p_enh[index].erx.des0_3.des0
535          & DWMAC_DESC_ERX_DES0_DEST_ADDR_FILTER_FAIL ) != 0,
536        DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET(
537          p_enh[index].erx.des0_3.des0 ),
538        ( p_enh[index].erx.des0_3.des0
539          & DWMAC_DESC_ERX_DES0_SRC_ADDR_FILTER_FAIL ) != 0,
540        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_LENGTH_ERROR ) != 0,
541        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_VLAN_TAG ) != 0,
542        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_FIRST_DESCRIPTOR ) != 0,
543        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_LAST_DESCRIPTOR ) != 0,
544        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE ) != 0,
545        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_DRIBBLE_BIT_ERROR ) != 0,
546        ( p_enh[index].erx.des0_3.des0
547          & DWMAC_DESC_ERX_DES0_EXT_STATUS_AVAIL_OR_RX_MAC_ADDR_STATUS ) != 0
548        );
549
550      if ( ( p_enh[index].erx.des0_3.des0
551             & DWMAC_DESC_ERX_DES0_ERROR_SUMMARY ) != 0 ) {
552        printf( " Error Summary:\n" );
553
554        if ( ( p_enh[index].erx.des0_3.des0
555               & DWMAC_DESC_ERX_DES0_DESCRIPTOR_ERROR ) != 0 ) {
556          printf( "  Descriptor Error\n" );
557        }
558
559        if ( ( p_enh[index].erx.des0_3.des0
560               & DWMAC_DESC_ERX_DES0_OVERFLOW_ERROR ) != 0 ) {
561          printf( "  Overflow Error\n" );
562        }
563
564        if ( ( p_enh[index].erx.des0_3.des0
565               &
566               DWMAC_DESC_ERX_DES0_TIMESTAMP_AVAIL_OR_CHECKSUM_ERROR_OR_GIANT_FRAME )
567             != 0 ) {
568          printf( "  Giant Frame\n" );
569        }
570
571        if ( ( p_enh[index].erx.des0_3.des0
572               & DWMAC_DESC_ERX_DES0_LATE_COLLISION ) != 0 ) {
573          printf( "  Late Collision\n" );
574        }
575
576        if ( ( p_enh[index].erx.des0_3.des0
577               & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT ) != 0
578             || ( p_enh[index].erx.des0_3.des0
579                  & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
580          printf( "  IP Header or IP Payload:\n" );
581
582          if ( ( p_enh[index].erx.des0_3.des0
583                 & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT ) != 0 ) {
584            printf( "   Watchdog Timeout\n" );
585          }
586
587          if ( ( p_enh[index].erx.des0_3.des0
588                 & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
589            printf( "   Receive Error\n" );
590          }
591        }
592
593        if ( ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_CRC_ERROR )
594             != 0 ) {
595          printf( "  CRC Error\n" );
596        }
597      }
598
599      printf( "des1\n" );
600      printf(
601        " %u Disable Interrupt on Completion\n"
602        " %lu Receive Buffer 2 Size\n"
603        " %u Receive End of Ring\n"
604        " %u Second Addr. Chained\n"
605        " %lu Receive Buffer 1 Size\n",
606        ( p_enh[index].erx.des0_3.des1
607          & DWMAC_DESC_ERX_DES1_DISABLE_IRQ_ON_COMPLETION ) != 0,
608        DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_2_SIZE_GET( p_enh[index].erx.des0_3.
609                                                     des1 ),
610        ( p_enh[index].erx.des0_3.des1 & DWMAC_DESC_ERX_DES1_RECEIVE_END_OF_RING ) != 0,
611        ( p_enh[index].erx.des0_3.des1 & DWMAC_DESC_ERX_DES1_SECOND_ADDR_CHAINED ) != 0,
612        DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_GET( p_enh[index].erx.des0_3.
613                                                     des1 )
614        );
615      printf( "des2\n" );
616      printf( " %p Buffer 1 Address\n", (void *) p_enh[index].erx.des0_3.des2 );
617      printf( "des3\n" );
618      printf( " %p Buffer 2 Address\n", (void *) p_enh[index].erx.des0_3.des3 );
619    }
620  }
621}
622
623static int dwmac_desc_enh_create_rx_desc( dwmac_common_context *self )
624{
625  int          eno        = 0;
626  const size_t NUM_DESCS  = (size_t) self->bsd_config->rbuf_count;
627  const size_t SIZE_DESCS = NUM_DESCS * sizeof( dwmac_desc_ext );
628  void        *desc_mem   = NULL;
629
630
631  assert( NULL == self->dma_rx );
632
633  /* Allocate an array of mbuf pointers */
634  self->mbuf_addr_rx = calloc( NUM_DESCS, sizeof( struct mbuf * ) );
635
636  if ( self->mbuf_addr_rx == NULL ) {
637    eno = ENOMEM;
638  }
639
640  /* Allocate an array of dma descriptors */
641  if ( eno == 0 ) {
642    eno = ( self->CFG->CALLBACK.mem_alloc_nocache )(
643      self->arg,
644      &desc_mem,
645      SIZE_DESCS
646      );
647  }
648
649  if ( eno == 0 ) {
650    if ( desc_mem != NULL ) {
651      memset( desc_mem, 0, SIZE_DESCS );
652      DWMAC_COMMON_DSB();
653    } else {
654      eno = ENOMEM;
655    }
656  }
657
658  if ( eno == 0 ) {
659    self->dma_rx = (volatile dwmac_desc *) desc_mem;
660    DWMAC_COMMON_DSB();
661  }
662
663  return eno;
664}
665
666static void dwmac_desc_enh_init_rx_desc(
667  dwmac_common_context *self,
668  const unsigned int    index )
669{
670  volatile dwmac_desc_ext *p_enh       =
671    (volatile dwmac_desc_ext *) self->dma_rx;
672  const size_t             NUM_DESCS   = (size_t) self->bsd_config->rbuf_count;
673  char                    *clust_start =
674    mtod( self->mbuf_addr_rx[index], char * );
675
676
677  assert( NULL != p_enh );
678
679  DWMAC_COMMON_DSB();
680
681  rtems_cache_invalidate_multiple_data_lines(
682    clust_start,
683    DWMAC_DESC_COM_BUF_SIZE + ETHER_ALIGN
684    );
685
686  if ( self->mbuf_addr_rx[index] != NULL ) {
687    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
688      p_enh->erx.des0_3.des1,
689      DWMAC_DESC_COM_BUF_SIZE );
690  } else {
691    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
692      p_enh->erx.des0_3.des1,
693      0 );
694  }
695
696  p_enh[index].erx.des0_3.des2 = (uint32_t) clust_start;
697
698  /* The network controller supports adding a second data buffer to
699   * p_enh->erx.des0_3.des3. For simplicity reasons we will not do this */
700  dwmac_desc_enh_rx_set_on_ring_chain( &p_enh[index],
701                                       ( index == NUM_DESCS - 1 ) );
702  DWMAC_COMMON_DSB();
703  p_enh[index].erx.des0_3.des0 = DWMAC_DESC_ERX_DES0_OWN_BIT;
704}
705
706static int dwmac_desc_enh_destroy_rx_desc( dwmac_common_context *self )
707{
708  int                  eno    = 0;
709  volatile dwmac_desc *dma_rx = self->dma_rx;
710
711
712  if ( self->mbuf_addr_rx != NULL ) {
713    free( self->mbuf_addr_rx, 0 );
714    self->mbuf_addr_rx = NULL;
715  }
716
717  if ( dma_rx != NULL ) {
718    eno = self->CFG->CALLBACK.mem_free_nocache(
719      self->arg,
720      RTEMS_DEVOLATILE( void *, dma_rx )
721    );
722    self->dma_rx = NULL;
723  }
724
725  DWMAC_COMMON_DSB();
726
727  return eno;
728}
729
730static void dwmac_desc_enh_release_rx_bufs( dwmac_common_context *self )
731{
732  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_rx;
733  const size_t             NUM_DESCS = (size_t) self->bsd_config->rbuf_count;
734  unsigned int             i;
735
736
737  assert( p_enh != NULL );
738
739  for ( i = 0; i < NUM_DESCS; ++i ) {
740    if ( p_enh[i].erx.des0_3.des2 != 0 ) {
741      struct mbuf *dummy;
742
743      assert( self->mbuf_addr_rx[i] != NULL );
744
745      MFREE( self->mbuf_addr_rx[i], dummy );
746      (void) dummy;
747      memset(
748        RTEMS_DEVOLATILE( void *, &p_enh[i].erx ),
749        0,
750        sizeof( dwmac_desc_ext )
751      );
752    }
753  }
754
755  self->dma_rx = (volatile dwmac_desc *) p_enh;
756  DWMAC_COMMON_DSB();
757}
758
759static int dwmac_desc_enh_create_tx_desc( dwmac_common_context *self )
760{
761  int          eno        = 0;
762  void        *mem_desc   = NULL;
763  const size_t NUM_DESCS  = (size_t) self->bsd_config->xbuf_count;
764  const size_t SIZE_DESCS = NUM_DESCS * sizeof( dwmac_desc_ext );
765
766
767  assert( self->dma_tx == NULL );
768
769  /* Allocate an array of mbuf pointers */
770  self->mbuf_addr_tx = calloc( NUM_DESCS, sizeof( struct mbuf * ) );
771
772  if ( self->mbuf_addr_tx == NULL ) {
773    eno = ENOMEM;
774  }
775
776  if ( eno == 0 ) {
777    eno = ( self->CFG->CALLBACK.mem_alloc_nocache )(
778      self->arg,
779      &mem_desc,
780      SIZE_DESCS
781      );
782  }
783
784  if ( eno == 0 ) {
785    if ( mem_desc != NULL ) {
786      memset( mem_desc, 0, SIZE_DESCS );
787      DWMAC_COMMON_DSB();
788    } else {
789      eno = ENOMEM;
790    }
791  }
792
793  if ( eno == 0 ) {
794    self->dma_tx = mem_desc;
795    DWMAC_COMMON_DSB();
796  }
797
798  return eno;
799}
800
801static void dwmac_desc_enh_init_tx_desc( dwmac_common_context *self )
802{
803  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_tx;
804  const size_t             NUM_DESCS = (size_t) self->bsd_config->xbuf_count;
805  unsigned int             i;
806
807
808  assert( p_enh != NULL );
809
810  for ( i = 0; i < NUM_DESCS; ++i ) {
811    dwmac_desc_enh_tx_set_on_ring_chain( &p_enh[i], ( i == NUM_DESCS - 1 ) );
812  }
813
814  self->dma_tx = (volatile dwmac_desc *) &p_enh[0];
815  DWMAC_COMMON_DSB();
816}
817
818static int dwmac_desc_enh_destroy_tx_desc( dwmac_common_context *self )
819{
820  int   eno      = 0;
821  void *mem_desc = RTEMS_DEVOLATILE( void *, self->dma_tx );
822
823
824  if ( self->mbuf_addr_tx != NULL ) {
825    free( self->mbuf_addr_tx, 0 );
826    self->mbuf_addr_tx = NULL;
827  }
828
829  if ( mem_desc != NULL ) {
830    eno          = self->CFG->CALLBACK.mem_free_nocache( self->arg, mem_desc );
831    mem_desc     = NULL;
832    self->dma_tx = (volatile dwmac_desc *) mem_desc;
833  }
834
835  DWMAC_COMMON_DSB();
836
837  return eno;
838}
839
840static void dwmac_desc_enh_release_tx_bufs( dwmac_common_context *self )
841{
842  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_tx;
843  const size_t             NUM_DESCS = (size_t) self->bsd_config->xbuf_count;
844  unsigned int             i;
845
846
847  assert( p_enh != NULL );
848
849  for ( i = 0; i < NUM_DESCS; ++i ) {
850    if ( p_enh[i].etx.des0_3.des1 != 0 ) {
851      struct mbuf *dummy;
852
853      assert( self->mbuf_addr_tx[i] != NULL );
854
855      MFREE( self->mbuf_addr_tx[i], dummy );
856      (void) dummy;
857      memset( RTEMS_DEVOLATILE( void *,
858                            &p_enh[i].etx ), 0, sizeof( dwmac_desc_ext ) );
859    }
860  }
861
862  self->dma_tx = (volatile dwmac_desc *) p_enh;
863  DWMAC_COMMON_DSB();
864}
865
866static inline size_t dwmac_desc_enh_get_rx_frame_len(
867  dwmac_common_context *self,
868  const unsigned int    desc_idx )
869{
870  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_rx;
871
872
873  /* The type-1 checksum offload engines append the checksum at
874   * the end of frame and the two bytes of checksum are added in
875   * the length.
876   * Adjust for that in the framelen for type-1 checksum offload
877   * engines. */
878  if ( self->dmagrp->hw_feature & DMAGRP_HW_FEATURE_RXTYP1COE ) {
879    return DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET( p_enh[desc_idx].erx.des0_3.des0 )
880           - 2U;
881  } else {
882    return DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET( p_enh[desc_idx].erx.des0_3.des0 );
883  }
884}
885
886static bool dwmac_desc_enh_am_i_rx_owner(
887  dwmac_common_context *self,
888  const unsigned int    desc_idx )
889{
890  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_rx;
891  bool                     am_i_owner;
892
893
894  DWMAC_COMMON_DSB();
895  am_i_owner =
896    ( p_enh[desc_idx].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_OWN_BIT ) == 0;
897
898  return am_i_owner;
899}
900
901static bool dwmac_desc_enh_am_i_tx_owner(
902  dwmac_common_context *self,
903  const unsigned int    idx_tx )
904{
905  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
906  bool                     am_i_owner;
907
908
909  DWMAC_COMMON_DSB();
910  am_i_owner =
911    ( p_enh[idx_tx].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_OWN_BIT ) == 0;
912
913  return am_i_owner;
914}
915
916static void dwmac_desc_enh_release_tx_ownership(
917  dwmac_common_context *self,
918  const unsigned int    idx_tx )
919{
920  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
921
922
923  DWMAC_COMMON_DSB();
924  p_enh[idx_tx].erx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_OWN_BIT;
925}
926
927static int dwmac_desc_enh_get_tx_ls(
928  dwmac_common_context *self,
929  const unsigned int    idx_tx )
930{
931  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
932
933
934  return ( ( p_enh[idx_tx].etx.des0_3.des0
935             & DWMAC_DESC_ETX_DES0_LAST_SEGMENT ) != 0 );
936}
937
938static void dwmac_desc_enh_release_tx_desc(
939  dwmac_common_context *self,
940  const unsigned int    idx_tx )
941{
942  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
943
944
945  p_enh[idx_tx].etx.des0_3.des0 =
946    p_enh[idx_tx].etx.des0_3.des0
947    & ( DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING
948        | DWMAC_DESC_ETX_DES0_SECOND_ADDR_CHAINED );
949
950  p_enh[idx_tx].etx.des0_3.des1 = 0;
951}
952
953static void dwmac_desc_enh_prepare_tx_desc(
954  dwmac_common_context *self,
955  const unsigned int    idx,
956  const bool            is_first,
957  const size_t          len,
958  const void           *pdata )
959{
960  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
961
962
963  if ( is_first ) {
964    p_enh[idx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_FIRST_SEGMENT;
965  }
966
967  dwmac_desc_enh_set_tx_desc_len( &p_enh[idx], len );
968
969  p_enh[idx].etx.des0_3.des2 = (uintptr_t) pdata;
970}
971
972static void dwmac_desc_enh_close_tx_desc(
973  dwmac_common_context *self,
974  const unsigned int    idx_tx
975)
976{
977  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
978
979  p_enh[idx_tx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_LAST_SEGMENT;
980  p_enh[idx_tx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_IRQ_ON_COMPLETION;
981}
982
983static bool dwmac_desc_enh_is_first_rx_segment(
984  dwmac_common_context *self,
985  const unsigned int    descriptor_index )
986{
987  volatile dwmac_desc_ext *p_descs = (volatile dwmac_desc_ext *) self->dma_rx;
988
989
990  return ( ( p_descs[descriptor_index].erx.des0_3.des0
991             & DWMAC_DESC_ERX_DES0_FIRST_DESCRIPTOR ) != 0 );
992}
993
994static bool dwmac_desc_enh_is_last_rx_segment(
995  dwmac_common_context *self,
996  const unsigned int    descriptor_index )
997{
998  volatile dwmac_desc_ext *p_descs = (volatile dwmac_desc_ext *) self->dma_rx;
999
1000
1001  return ( ( p_descs[descriptor_index].erx.des0_3.des0
1002             & DWMAC_DESC_ERX_DES0_LAST_DESCRIPTOR ) != 0 );
1003}
1004
1005static int dwmac_desc_enh_validate( dwmac_common_context *self )
1006{
1007  /* Does the hardware support enhanced descriptors? */
1008  if ( ( self->dmagrp->hw_feature & DMAGRP_HW_FEATURE_ENHDESSEL ) != 0 ) {
1009    return 0;
1010  } else {
1011    return EINVAL;
1012  }
1013}
1014
1015static bool dwmac_desc_enh_use_enhanced_descs( dwmac_common_context *self )
1016{
1017  (void) self;
1018
1019  /* Yes, we use enhanced descriptors */
1020  return true;
1021}
1022
1023const dwmac_common_desc_ops dwmac_desc_ops_enhanced = {
1024  .validate             = dwmac_desc_enh_validate,
1025  .use_enhanced_descs   = dwmac_desc_enh_use_enhanced_descs,
1026  .tx_status            = dwmac_desc_enh_get_tx_status,
1027  .rx_status            = dwmac_desc_enh_get_rx_status,
1028  .create_rx_desc       = dwmac_desc_enh_create_rx_desc,
1029  .create_tx_desc       = dwmac_desc_enh_create_tx_desc,
1030  .destroy_rx_desc      = dwmac_desc_enh_destroy_rx_desc,
1031  .destroy_tx_desc      = dwmac_desc_enh_destroy_tx_desc,
1032  .init_rx_desc         = dwmac_desc_enh_init_rx_desc,
1033  .init_tx_desc         = dwmac_desc_enh_init_tx_desc,
1034  .release_rx_bufs      = dwmac_desc_enh_release_rx_bufs,
1035  .release_tx_bufs      = dwmac_desc_enh_release_tx_bufs,
1036  .alloc_data_buf       = dwmac_desc_com_new_mbuf,
1037  .am_i_tx_owner        = dwmac_desc_enh_am_i_tx_owner,
1038  .am_i_rx_owner        = dwmac_desc_enh_am_i_rx_owner,
1039  .release_tx_desc      = dwmac_desc_enh_release_tx_desc,
1040  .prepare_tx_desc      = dwmac_desc_enh_prepare_tx_desc,
1041  .close_tx_desc        = dwmac_desc_enh_close_tx_desc,
1042  .get_tx_ls            = dwmac_desc_enh_get_tx_ls,
1043  .release_tx_ownership = dwmac_desc_enh_release_tx_ownership,
1044  .get_rx_frame_len     = dwmac_desc_enh_get_rx_frame_len,
1045  .is_first_rx_segment  = dwmac_desc_enh_is_first_rx_segment,
1046  .is_last_rx_segment   = dwmac_desc_enh_is_last_rx_segment,
1047  .print_tx_desc        = dwmac_desc_enh_print_tx_desc,
1048  .print_rx_desc        = dwmac_desc_enh_print_rx_desc,
1049};
1050
1051/* This wrapped function pointer struct can be passed into the
1052 * configuration initializer for the driver */
1053const dwmac_descriptor_ops DWMAC_DESCRIPTOR_OPS_ENHANCED =
1054  DWMAC_DESCRIPTOR_OPS_INITIALIZER(
1055    &dwmac_desc_ops_enhanced
1056    );
Note: See TracBrowser for help on using the repository browser.