source: rtems/c/src/libchip/network/dwmac-desc-enh.c @ f28b8d45

4.115
Last change on this file since f28b8d45 was f28b8d45, checked in by Ralf Kirchner <ralf.kirchner@…>, on Apr 10, 2014 at 3:03:54 PM

libchip: Cleanup

  • Property mode set to 100644
File size: 33.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief DWMAC 10/100/1000 Enhanced DMA Descriptor Handling.
5 *
6 * DWMAC 10/100/1000 on-chip Ethernet controllers.
7 * Functions and data for the handling of enhanced DMA descriptors.
8 */
9
10/*
11 * Copyright (c) 2013 embedded brains GmbH.  All rights reserved.
12 *
13 *  embedded brains GmbH
14 *  Dornierstr. 4
15 *  82178 Puchheim
16 *  Germany
17 *  <rtems@embedded-brains.de>
18 *
19 * The license and distribution terms for this file may be
20 * found in the file LICENSE in this distribution or at
21 * http://www.rtems.org/license/LICENSE.
22 */
23
24#include <assert.h>
25#include <stdlib.h>
26#include <stdio.h>
27#include "dwmac-common.h"
28#include "dwmac-desc-com.h"
29#include "dwmac-core.h"
30#include <sys/queue.h>
31
32#undef DWMAC_DESC_ENH_DEBUG
33#ifdef DWMAC_DESC_ENH_DEBUG
34#define DWMAC_DESC_ENH_PRINT_DBG( fmt, args ... )  printk( fmt, ## args )
35#else
36#define DWMAC_DESC_ENH_PRINT_DBG( fmt, args ... )  do { } while ( 0 )
37#endif
38
39typedef enum {
40  DWMAC_IP_PAYLOAD_TYPE_UNKNOWN,
41  DWMAC_IP_PAYLOAD_TYPE_UDP,
42  DWMAC_IP_PAYLOAD_TYPE_TCP,
43  DWMAC_IP_PAYLOAD_TYPE_ICMP
44} dwmac_ip_payload_type;
45
46static void dwmac_desc_enh_rx_set_on_ring_chain(
47  volatile dwmac_desc_ext *p, int end )
48{
49  /* For simplicity reasons we will not use the second buffer.
50   * If we would use it we would have to set the size to MCLBYTES -1 */
51  p->erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_2_SIZE_SET(
52    p->erx.des0_3.des1, 0
53    );
54  p->erx.des0_3.des3 = (uint32_t) NULL;
55
56  if ( end )
57    p->erx.des0_3.des1 |= DWMAC_DESC_ERX_DES1_RECEIVE_END_OF_RING;
58}
59
60static void dwmac_desc_enh_tx_set_on_ring_chain(
61  volatile dwmac_desc_ext *p, const bool end )
62{
63  if ( end )
64    p->etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING;
65}
66
67static void dwmac_desc_enh_set_tx_desc_len(
68  volatile dwmac_desc_ext *p_enh, size_t len )
69{
70  p_enh->etx.des0_3.des1 = DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_1_SIZE_SET(
71    p_enh->etx.des0_3.des1,
72    len
73    );
74}
75
76static bool dwmac_desc_enh_is_giant_frame( const uint32_t des0 )
77{
78  return (
79    ( des0
80      & DWMAC_DESC_ERX_DES0_TIMESTAMP_AVAIL_OR_CHECKSUM_ERROR_OR_GIANT_FRAME
81    ) != 0
82    );
83}
84
85static bool dwmac_desc_enh_is_udp_payload( const uint32_t des4 )
86{
87  return (
88    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
89    == DWMAC_IP_PAYLOAD_TYPE_UDP
90    );
91}
92
93static bool dwmac_desc_enh_is_tcp_payload( const uint32_t des4 )
94{
95  return (
96    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
97    == DWMAC_IP_PAYLOAD_TYPE_TCP
98    );
99}
100
101static bool dwmac_desc_enh_is_icmp_payload( const uint32_t des4 )
102{
103  return (
104    DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_TYPE_GET( des4 )
105    == DWMAC_IP_PAYLOAD_TYPE_ICMP
106    );
107}
108
109static dwmac_common_rx_frame_status dwmac_desc_enh_coe_status(
110  volatile dwmac_desc_ext *p_enh )
111{
112  dwmac_common_rx_frame_status ret  = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
113  const uint32_t               DES0 = p_enh->erx.des0_3.des0;
114  const uint32_t               DES4 = p_enh->erx.des4;
115
116
117  if ( ( DES0 & DWMAC_DESC_ERX_DES0_EXT_STATUS_AVAIL_OR_RX_MAC_ADDR_STATUS )
118       != 0 ) {
119    if ( !dwmac_desc_enh_is_giant_frame( DES0 )
120         && ( DES0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE ) == 0
121         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) == 0
122         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED ) == 0
123         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) == 0
124         && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) == 0 ) {
125      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IEEE 802.3 Type frame.\n" );
126      ret = DWMAC_COMMON_RX_FRAME_STATUS_LLC_SNAP;
127    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
128                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
129                  != 0 )
130                && dwmac_desc_enh_is_giant_frame( DES0 ) ) {
131      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 No CSUM Error.\n" );
132      ret = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
133    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
134                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
135                  != 0 )
136                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) != 0 ) {
137      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Payload Error.\n" );
138      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
139    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
140                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
141                  != 0 )
142                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) != 0 ) {
143      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Header Error.\n" );
144      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
145    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
146                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
147                  != 0 )
148                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_PAYLOAD_ERROR ) != 0
149                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IP_HEADER_ERROR ) != 0 ) {
150      DWMAC_DESC_ENH_PRINT_DBG(
151        "RX Des0 status: IPv4/6 Header and Payload Error.\n" );
152      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
153    } else if ( ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) != 0
154                  || ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
155                  != 0 )
156                && ( !dwmac_desc_enh_is_udp_payload( DES4 ) )
157                && ( !dwmac_desc_enh_is_tcp_payload( DES4 ) )
158                && ( !dwmac_desc_enh_is_icmp_payload( DES4 ) ) ) {
159      DWMAC_DESC_ENH_PRINT_DBG(
160        "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n" );
161      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
162    } else if ( ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV6_PACKET_RECEIVED ) == 0
163                && ( DES4 & DWMAC_DESC_EXT_ERX_DES4_IPV4_PACKET_RECEIVED )
164                == 0 ) {
165      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: No IPv4, IPv6 frame.\n" );
166      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
167    }
168  } else {
169    uint32_t status = (
170      (uint32_t) ( dwmac_desc_enh_is_giant_frame( DES0 ) << 2U )
171      | (uint32_t) ( ( ( DES0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE )
172                       != 0 ) << 1U ) )
173                      & 0x7U;
174
175    /* bits 5 7 0 | Frame status
176     * ----------------------------------------------------------
177     *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
178     *      1 0 0 | IPv4/6 No CSUM errorS.
179     *      1 0 1 | IPv4/6 CSUM PAYLOAD error
180     *      1 1 0 | IPv4/6 CSUM IP HR error
181     *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
182     *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
183     *      0 1 1 | COE bypassed.. no IPv4/6 frame
184     *      0 1 0 | Reserved.
185     */
186    if ( status == 0x0 ) {
187      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IEEE 802.3 Type frame.\n" );
188      ret = DWMAC_COMMON_RX_FRAME_STATUS_LLC_SNAP;
189    } else if ( status == 0x4 ) {
190      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 No CSUM errorS.\n" );
191      ret = DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
192    } else if ( status == 0x5 ) {
193      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Payload Error.\n" );
194      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
195    } else if ( status == 0x6 ) {
196      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: IPv4/6 Header Error.\n" );
197      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
198    } else if ( status == 0x7 ) {
199      DWMAC_DESC_ENH_PRINT_DBG(
200        "RX Des0 status: IPv4/6 Header and Payload Error.\n" );
201      ret = DWMAC_COMMON_RX_FRAME_STATUS_CSUM_NONE;
202    } else if ( status == 0x1 ) {
203      DWMAC_DESC_ENH_PRINT_DBG(
204        "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n" );
205      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
206    } else if ( status == 0x3 ) {
207      DWMAC_DESC_ENH_PRINT_DBG( "RX Des0 status: No IPv4, IPv6 frame.\n" );
208      ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
209    }
210  }
211
212  return ret;
213}
214
215static int dwmac_desc_enh_get_tx_status(
216  dwmac_common_context *self,
217  const unsigned int    idx_tx )
218{
219  int                                 ret    = 0;
220  volatile dwmac_desc_ext            *dma_tx =
221    (volatile dwmac_desc_ext *) self->dma_tx;
222  volatile dwmac_desc_ext            *p_desc = &dma_tx[idx_tx];
223  dwmac_common_desc_status_counts_tx *counts =
224    &self->stats.desc_status_counts_tx;
225
226
227  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_ERROR_SUMMARY ) != 0 ) {
228    DWMAC_DESC_ENH_PRINT_DBG( "DWMAC TX error... 0x%08x\n", p->des01.etx );
229
230    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_JABBER_TIMEOUT )
231         != 0 ) {
232      DWMAC_DESC_ENH_PRINT_DBG( "\tjabber_timeout error\n" );
233      ++counts->jabber;
234    }
235
236    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_FRAME_FLUSHED )
237         != 0 ) {
238      DWMAC_DESC_ENH_PRINT_DBG( "\tframe_flushed error\n" );
239      ++counts->frame_flushed;
240      dwmac_core_dma_flush_tx_fifo( self );
241    }
242
243    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_LOSS_OF_CARRIER )
244         != 0 ) {
245      DWMAC_DESC_ENH_PRINT_DBG( "\tloss_carrier error\n" );
246      ++counts->losscarrier;
247    }
248
249    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_NO_CARRIER )
250         != 0 ) {
251      DWMAC_DESC_ENH_PRINT_DBG( "\tno_carrier error\n" );
252      ++counts->no_carrier;
253    }
254
255    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION )
256         != 0 ) {
257      DWMAC_DESC_ENH_PRINT_DBG( "\texcessive_collisions\n" );
258      ++counts->excessive_collisions;
259    }
260
261    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_EXCESSIVE_DEFERAL )
262         != 0 ) {
263      DWMAC_DESC_ENH_PRINT_DBG( "\texcessive tx_deferral\n" );
264      ++counts->excessive_deferral;
265    }
266
267    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_UNDERFLOW_ERROR )
268         != 0 ) {
269      DWMAC_DESC_ENH_PRINT_DBG( "\tunderflow error\n" );
270      dwmac_core_dma_flush_tx_fifo( self );
271      ++counts->underflow;
272    }
273
274    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_HEADER_ERROR )
275         != 0 ) {
276      DWMAC_DESC_ENH_PRINT_DBG( "\tTX IP header csum error\n" );
277      ++counts->ip_header_error;
278    }
279
280    if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_PAYLOAD_ERROR )
281         != 0 ) {
282      DWMAC_DESC_ENH_PRINT_DBG( "\tAddr/Payload csum error\n" );
283      ++counts->payload_error;
284      dwmac_core_dma_flush_tx_fifo( self );
285    }
286
287    ret = -1;
288  }
289
290  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DEFERRED_BIT ) != 0 ) {
291    DWMAC_DESC_ENH_PRINT_DBG( "GMAC TX status: tx deferred\n" );
292    ++counts->deferred;
293  }
294
295  if ( ( p_desc->etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_VLAN_FRAME ) != 0 ) {
296    DWMAC_DESC_ENH_PRINT_DBG( "GMAC TX status: VLAN frame\n" );
297    ++counts->vlan;
298  }
299
300  return ret;
301}
302
303static dwmac_common_rx_frame_status dwmac_desc_enh_get_rx_status(
304  dwmac_common_context *self,
305  const unsigned int    desc_idx )
306{
307  dwmac_common_desc_status_counts_rx *counts =
308    &self->stats.desc_status_counts_rx;
309  dwmac_common_rx_frame_status        ret    =
310    DWMAC_COMMON_RX_FRAME_STATUS_GOOD;
311  volatile dwmac_desc_ext            *dma_rx =
312    (volatile dwmac_desc_ext *) self->dma_rx;
313  const uint32_t                      DES0   = dma_rx[desc_idx].erx.des0_3.des0;
314
315
316  if ( ( DES0 & DWMAC_DESC_ERX_DES0_ERROR_SUMMARY ) != 0 ) {
317    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX Error Summary 0x%08x\n",
318                              DES0 );
319
320    if ( ( DES0 & DWMAC_DESC_ERX_DES0_DESCRIPTOR_ERROR ) != 0 ) {
321      DWMAC_DESC_ENH_PRINT_DBG( "\tdescriptor error\n" );
322      ++counts->descriptor_error;
323    }
324
325    if ( ( DES0 & DWMAC_DESC_ERX_DES0_OVERFLOW_ERROR ) != 0 ) {
326      DWMAC_DESC_ENH_PRINT_DBG( "\toverflow error\n" );
327      ++counts->overflow_error;
328    }
329
330    if ( dwmac_desc_enh_is_giant_frame( DES0 ) ) {
331      DWMAC_DESC_ENH_PRINT_DBG( "\tIPC Csum Error/Giant frame\n" );
332      ++counts->giant_frame;
333    }
334
335    if ( ( DES0 & DWMAC_DESC_ERX_DES0_LATE_COLLISION ) != 0 ) {
336      DWMAC_DESC_ENH_PRINT_DBG( "\tlate_collision error\n" );
337      ++counts->late_collision;
338    }
339
340    if ( ( DES0 & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT )
341         != 0 ) {
342      DWMAC_DESC_ENH_PRINT_DBG( "\treceive_watchdog error\n" );
343      ++counts->watchdog_timeout;
344    }
345
346    if ( ( DES0 & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
347      DWMAC_DESC_ENH_PRINT_DBG( "\tReceive Error\n" );
348      ++counts->receive_error;
349    }
350
351    if ( ( DES0 & DWMAC_DESC_ERX_DES0_CRC_ERROR ) != 0 ) {
352      DWMAC_DESC_ENH_PRINT_DBG( "\tCRC error\n" );
353      ++counts->crc_error;
354    }
355
356    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
357  }
358
359  if ( ret == DWMAC_COMMON_RX_FRAME_STATUS_GOOD ) {
360    /* After a payload csum error, the ES bit is set.
361     * It doesn't match with the information reported into the databook.
362     * At any rate, we need to understand if the CSUM hw computation is ok
363     * and report this info to the upper layers. */
364    ret = dwmac_desc_enh_coe_status( &dma_rx[desc_idx] );
365  }
366
367  if ( ( DES0 & DWMAC_DESC_ERX_DES0_DRIBBLE_BIT_ERROR ) != 0 ) {
368    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: dribbling error\n" );
369    ++counts->dribble_bit_error;
370  }
371
372  if ( ( DES0 & DWMAC_DESC_ERX_DES0_SRC_ADDR_FILTER_FAIL ) != 0 ) {
373    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX : Source Address filter fail\n" );
374    ++counts->source_addr_fail;
375    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
376  }
377
378  if ( ( DES0 & DWMAC_DESC_ERX_DES0_DEST_ADDR_FILTER_FAIL ) != 0 ) {
379    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX : Dest Address filter fail\n" );
380    ++counts->dest_addr_fail;
381    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
382  }
383
384  if ( ( DES0 & DWMAC_DESC_ERX_DES0_LENGTH_ERROR ) != 0 ) {
385    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: length_error error\n" );
386    ++counts->length_error;
387    ret = DWMAC_COMMON_RX_FRAME_STATUS_DISCARD;
388  }
389
390  if ( ( DES0 & DWMAC_DESC_ERX_DES0_VLAN_TAG ) != 0 ) {
391    DWMAC_DESC_ENH_PRINT_DBG( "GMAC RX: VLAN frame tagged\n" );
392    ++counts->vlan_tag;
393  }
394
395  return ret;
396}
397
398static void dwmac_desc_enh_print_tx_desc(
399  volatile dwmac_desc *p,
400  const unsigned int   count )
401{
402  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) p;
403  unsigned int             index;
404
405
406  if ( p_enh != NULL ) {
407    for ( index = 0; index < count; ++index ) {
408      printf( "Transmit DMA Descriptor %d\n", index );
409      printf( "des0\n" );
410      printf(
411        " %u own bit\n"
412        " %u IRQ on Completion\n"
413        " %u Last Segment\n"
414        " %u First Segment\n"
415        " %u Disable CRC\n"
416        " %u Disable Pad\n"
417        " %u Transmit Timestamp Enable\n"
418        " %lu Checksum Insertion Control\n"
419        " %u Transmit End of Ring\n"
420        " %u Second Address Chained\n"
421        " %u Transmit Timestamp Status\n"
422        " %u IP Header Error\n"
423        " %u VLAN Frame\n"
424        " %lu Collision Count\n"
425        " %u Deferred Bit\n",
426        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_OWN_BIT ) != 0,
427        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IRQ_ON_COMPLETION ) != 0,
428        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_LAST_SEGMENT ) != 0,
429        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_FIRST_SEGMENT ) != 0,
430        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DISABLE_CRC ) != 0,
431        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DISABLE_PAD ) != 0,
432        ( p_enh[index].etx.des0_3.des0
433          & DWMAC_DESC_ETX_DES0_TRANSMIT_TIMESTAMP_ENABLE ) != 0,
434        DWMAC_DESC_ETX_DES0_CHECKSUM_INSERTION_CONTROL_GET( p_enh[index].etx.
435                                                            des0_3.des0 ),
436        ( p_enh[index].etx.des0_3.des0
437          & DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING ) != 0,
438        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_SECOND_ADDR_CHAINED ) != 0,
439        ( p_enh[index].etx.des0_3.des0
440          & DWMAC_DESC_ETX_DES0_TRANSMIT_TIMESTAMP_STATUS ) != 0,
441        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_IP_HEADER_ERROR ) != 0,
442        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_VLAN_FRAME ) != 0,
443        DWMAC_DESC_ETX_DES0_COLLISION_COUNT_GET( p_enh[index].etx.des0_3.des0 ),
444        ( p_enh[index].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_DEFERRED_BIT ) != 0
445        );
446
447      if ( ( p_enh[index].etx.des0_3.des0
448             & DWMAC_DESC_ETX_DES0_ERROR_SUMMARY ) != 0 ) {
449        printf( " Error Summary:\n" );
450
451        if ( p_enh[index].etx.des0_3.des0
452             & DWMAC_DESC_ETX_DES0_JABBER_TIMEOUT ) {
453          printf( "  Jabber Timeout\n" );
454        }
455
456        if ( ( p_enh[index].etx.des0_3.des0
457               & DWMAC_DESC_ETX_DES0_FRAME_FLUSHED ) != 0 ) {
458          printf( "  Frame Flush\n" );
459        }
460
461        if ( ( p_enh[index].etx.des0_3.des0
462               & DWMAC_DESC_ETX_DES0_IP_PAYLOAD_ERROR ) != 0 ) {
463          printf( "  Payload Error\n" );
464        }
465
466        if ( ( p_enh[index].etx.des0_3.des0
467               & DWMAC_DESC_ETX_DES0_LOSS_OF_CARRIER ) != 0 ) {
468          printf( "  Loss of Carrier\n" );
469        }
470
471        if ( ( p_enh[index].etx.des0_3.des0
472               & DWMAC_DESC_ETX_DES0_NO_CARRIER ) != 0 ) {
473          printf( "  No Carrier\n" );
474        }
475
476        if ( ( p_enh[index].etx.des0_3.des0
477               & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION ) != 0 ) {
478          printf( "  Excessive Collision\n" );
479        }
480
481        if ( ( p_enh[index].etx.des0_3.des0
482               & DWMAC_DESC_ETX_DES0_EXCESSIVE_COLLISION ) != 0 ) {
483          printf( "  Ecessive Deferral\n" );
484        }
485
486        if ( ( p_enh[index].etx.des0_3.des0
487               & DWMAC_DESC_ETX_DES0_UNDERFLOW_ERROR ) != 0 ) {
488          printf( "  Undeflow Error\n" );
489        }
490      }
491
492      printf( "des1\n" );
493      printf(
494        " %lu Transmit Buffer 2 Size\n"
495        " %lu Transmit Buffer 1 Size\n",
496        DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_2_SIZE_GET( p_enh[index].etx.des0_3.
497                                                        des1 ),
498        DWMAC_DESC_ETX_DES1_TRANSMIT_BUFFER_1_SIZE_GET( p_enh[index].etx.des0_3.
499                                                        des1 )
500        );
501      printf( "des2\n" );
502      printf( " %p Buffer 1 Address\n", (void *) p_enh[index].etx.des0_3.des2 );
503      printf( "des3\n" );
504      printf( " %p Buffer 2 Address\n", (void *) p_enh[index].etx.des0_3.des3 );
505    }
506  }
507}
508
509static void dwmac_desc_enh_print_rx_desc(
510  volatile dwmac_desc *p,
511  const unsigned int   count )
512{
513  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) p;
514  unsigned int             index;
515
516
517  if ( p_enh != NULL ) {
518    for ( index = 0; index < count; ++index ) {
519      printf( "Receive DMA Descriptor %d\n", index );
520      printf( "des0\n" );
521      printf(
522        " %u Own Bit\n"
523        " %u Dest. Addr. Filter Fail\n"
524        " %lu Frame Length\n"
525        " %u Source Addr. Filter Fail\n"
526        " %u Length Error\n"
527        " %u VLAN Tag\n"
528        " %u First Descriptor\n"
529        " %u Last Descriptor\n"
530        " %u Frame Type\n"
531        " %u Dribble Bit Error\n"
532        " %u Extended Status Available\n",
533        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_OWN_BIT ) != 0,
534        ( p_enh[index].erx.des0_3.des0
535          & DWMAC_DESC_ERX_DES0_DEST_ADDR_FILTER_FAIL ) != 0,
536        DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET(
537          p_enh[index].erx.des0_3.des0 ),
538        ( p_enh[index].erx.des0_3.des0
539          & DWMAC_DESC_ERX_DES0_SRC_ADDR_FILTER_FAIL ) != 0,
540        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_LENGTH_ERROR ) != 0,
541        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_VLAN_TAG ) != 0,
542        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_FIRST_DESCRIPTOR ) != 0,
543        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_LAST_DESCRIPTOR ) != 0,
544        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_FREAME_TYPE ) != 0,
545        ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_DRIBBLE_BIT_ERROR ) != 0,
546        ( p_enh[index].erx.des0_3.des0
547          & DWMAC_DESC_ERX_DES0_EXT_STATUS_AVAIL_OR_RX_MAC_ADDR_STATUS ) != 0
548        );
549
550      if ( ( p_enh[index].erx.des0_3.des0
551             & DWMAC_DESC_ERX_DES0_ERROR_SUMMARY ) != 0 ) {
552        printf( " Error Summary:\n" );
553
554        if ( ( p_enh[index].erx.des0_3.des0
555               & DWMAC_DESC_ERX_DES0_DESCRIPTOR_ERROR ) != 0 ) {
556          printf( "  Descriptor Error\n" );
557        }
558
559        if ( ( p_enh[index].erx.des0_3.des0
560               & DWMAC_DESC_ERX_DES0_OVERFLOW_ERROR ) != 0 ) {
561          printf( "  Overflow Error\n" );
562        }
563
564        if ( ( p_enh[index].erx.des0_3.des0
565               &
566               DWMAC_DESC_ERX_DES0_TIMESTAMP_AVAIL_OR_CHECKSUM_ERROR_OR_GIANT_FRAME )
567             != 0 ) {
568          printf( "  Giant Frame\n" );
569        }
570
571        if ( ( p_enh[index].erx.des0_3.des0
572               & DWMAC_DESC_ERX_DES0_LATE_COLLISION ) != 0 ) {
573          printf( "  Late Collision\n" );
574        }
575
576        if ( ( p_enh[index].erx.des0_3.des0
577               & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT ) != 0
578             || ( p_enh[index].erx.des0_3.des0
579                  & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
580          printf( "  IP Header or IP Payload:\n" );
581
582          if ( ( p_enh[index].erx.des0_3.des0
583                 & DWMAC_DESC_ERX_DES0_RECEIVE_WATCHDOG_TIMEOUT ) != 0 ) {
584            printf( "   Watchdog Timeout\n" );
585          }
586
587          if ( ( p_enh[index].erx.des0_3.des0
588                 & DWMAC_DESC_ERX_DES0_RECEIVE_ERROR ) != 0 ) {
589            printf( "   Receive Error\n" );
590          }
591        }
592
593        if ( ( p_enh[index].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_CRC_ERROR )
594             != 0 ) {
595          printf( "  CRC Error\n" );
596        }
597      }
598
599      printf( "des1\n" );
600      printf(
601        " %u Disable Interrupt on Completion\n"
602        " %lu Receive Buffer 2 Size\n"
603        " %u Receive End of Ring\n"
604        " %u Second Addr. Chained\n"
605        " %lu Receive Buffer 1 Size\n",
606        ( p_enh[index].erx.des0_3.des1
607          & DWMAC_DESC_ERX_DES1_DISABLE_IRQ_ON_COMPLETION ) != 0,
608        DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_2_SIZE_GET( p_enh[index].erx.des0_3.
609                                                     des1 ),
610        ( p_enh[index].erx.des0_3.des1 & DWMAC_DESC_ERX_DES1_RECEIVE_END_OF_RING ) != 0,
611        ( p_enh[index].erx.des0_3.des1 & DWMAC_DESC_ERX_DES1_SECOND_ADDR_CHAINED ) != 0,
612        DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_GET( p_enh[index].erx.des0_3.
613                                                     des1 )
614        );
615      printf( "des2\n" );
616      printf( " %p Buffer 1 Address\n", (void *) p_enh[index].erx.des0_3.des2 );
617      printf( "des3\n" );
618      printf( " %p Buffer 2 Address\n", (void *) p_enh[index].erx.des0_3.des3 );
619    }
620  }
621}
622
623static int dwmac_desc_enh_create_rx_desc( dwmac_common_context *self )
624{
625  int          eno        = 0;
626  const size_t NUM_DESCS  = (size_t) self->bsd_config->rbuf_count;
627  const size_t SIZE_DESCS = NUM_DESCS * sizeof( dwmac_desc_ext );
628  void        *desc_mem   = NULL;
629
630
631  assert( NULL == self->dma_rx );
632
633  /* Allocate an array of mbuf pointers */
634  self->mbuf_addr_rx = calloc( NUM_DESCS, sizeof( struct mbuf * ) );
635
636  if ( self->mbuf_addr_rx == NULL ) {
637    eno = ENOMEM;
638  }
639
640  /* Allocate an array of dma descriptors */
641  if ( eno == 0 ) {
642    eno = ( self->CFG->CALLBACK.mem_alloc_nocache )(
643      self->arg,
644      &desc_mem,
645      SIZE_DESCS
646      );
647  }
648
649  if ( eno == 0 ) {
650    if ( desc_mem != NULL ) {
651      memset( desc_mem, 0, SIZE_DESCS );
652      DWMAC_COMMON_DSB();
653    } else {
654      eno = ENOMEM;
655    }
656  }
657
658  if ( eno == 0 ) {
659    self->dma_rx = (volatile dwmac_desc *) desc_mem;
660    DWMAC_COMMON_DSB();
661  }
662
663  return eno;
664}
665
666static void dwmac_desc_enh_init_rx_desc(
667  dwmac_common_context *self,
668  const unsigned int    index )
669{
670  volatile dwmac_desc_ext *p_enh       =
671    (volatile dwmac_desc_ext *) self->dma_rx;
672  const size_t             NUM_DESCS   = (size_t) self->bsd_config->rbuf_count;
673  char                    *clust_start =
674    mtod( self->mbuf_addr_rx[index], char * );
675
676
677  assert( NULL != p_enh );
678
679  DWMAC_COMMON_DSB();
680
681  rtems_cache_invalidate_multiple_data_lines(
682    clust_start,
683    DWMAC_DESC_COM_BUF_SIZE + ETHER_ALIGN
684    );
685
686  if ( self->mbuf_addr_rx[index] != NULL ) {
687    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
688      p_enh->erx.des0_3.des1,
689      DWMAC_DESC_COM_BUF_SIZE );
690  } else {
691    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
692      p_enh->erx.des0_3.des1,
693      0 );
694  }
695
696  p_enh[index].erx.des0_3.des2 = (uint32_t) clust_start;
697
698  /* The network controller supports adding a second data buffer to
699   * p_enh->erx.des0_3.des3. For simplicity reasons we will not do this */
700  dwmac_desc_enh_rx_set_on_ring_chain( &p_enh[index],
701                                       ( index == NUM_DESCS - 1 ) );
702  DWMAC_COMMON_DSB();
703  p_enh[index].erx.des0_3.des0 = DWMAC_DESC_ERX_DES0_OWN_BIT;
704}
705
706static int dwmac_desc_enh_destroy_rx_desc( dwmac_common_context *self )
707{
708  int                  eno    = 0;
709  volatile dwmac_desc *dma_rx = self->dma_rx;
710
711
712  if ( self->mbuf_addr_rx != NULL ) {
713    free( self->mbuf_addr_rx, 0 );
714    self->mbuf_addr_rx = NULL;
715  }
716
717  if ( dma_rx != NULL ) {
718    eno          = self->CFG->CALLBACK.mem_free_nocache( self->arg, dma_rx );
719    self->dma_rx = NULL;
720  }
721
722  DWMAC_COMMON_DSB();
723
724  return eno;
725}
726
727static void dwmac_desc_enh_release_rx_bufs( dwmac_common_context *self )
728{
729  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_rx;
730  const size_t             NUM_DESCS = (size_t) self->bsd_config->rbuf_count;
731  unsigned int             i;
732
733
734  assert( p_enh != NULL );
735
736  for ( i = 0; i < NUM_DESCS; ++i ) {
737    if ( p_enh[i].erx.des0_3.des2 != 0 ) {
738      struct mbuf *dummy;
739
740      assert( self->mbuf_addr_rx[i] != NULL );
741
742      MFREE( self->mbuf_addr_rx[i], dummy );
743      (void) dummy;
744      memset(&p_enh[i].erx, 0, sizeof( dwmac_desc_ext ) );
745    }
746  }
747
748  self->dma_rx = (volatile dwmac_desc *) p_enh;
749  DWMAC_COMMON_DSB();
750}
751
752static int dwmac_desc_enh_create_tx_desc( dwmac_common_context *self )
753{
754  int          eno        = 0;
755  void        *mem_desc   = NULL;
756  const size_t NUM_DESCS  = (size_t) self->bsd_config->xbuf_count;
757  const size_t SIZE_DESCS = NUM_DESCS * sizeof( dwmac_desc_ext );
758
759
760  assert( self->dma_tx == NULL );
761
762  /* Allocate an array of mbuf pointers */
763  self->mbuf_addr_tx = calloc( NUM_DESCS, sizeof( struct mbuf * ) );
764
765  if ( self->mbuf_addr_tx == NULL ) {
766    eno = ENOMEM;
767  }
768
769  if ( eno == 0 ) {
770    eno = ( self->CFG->CALLBACK.mem_alloc_nocache )(
771      self->arg,
772      &mem_desc,
773      SIZE_DESCS
774      );
775  }
776
777  if ( eno == 0 ) {
778    if ( mem_desc != NULL ) {
779      memset( mem_desc, 0, SIZE_DESCS );
780      DWMAC_COMMON_DSB();
781    } else {
782      eno = ENOMEM;
783    }
784  }
785
786  if ( eno == 0 ) {
787    self->dma_tx = mem_desc;
788    DWMAC_COMMON_DSB();
789  }
790
791  return eno;
792}
793
794static void dwmac_desc_enh_init_tx_desc( dwmac_common_context *self )
795{
796  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_tx;
797  const size_t             NUM_DESCS = (size_t) self->bsd_config->xbuf_count;
798  unsigned int             i;
799
800
801  assert( p_enh != NULL );
802
803  for ( i = 0; i < NUM_DESCS; ++i ) {
804    dwmac_desc_enh_tx_set_on_ring_chain( &p_enh[i], ( i == NUM_DESCS - 1 ) );
805  }
806
807  self->dma_tx = (volatile dwmac_desc *) &p_enh[0];
808  DWMAC_COMMON_DSB();
809}
810
811static int dwmac_desc_enh_destroy_tx_desc( dwmac_common_context *self )
812{
813  int   eno      = 0;
814  void *mem_desc = __DEVOLATILE( void *, self->dma_tx );
815
816
817  if ( self->mbuf_addr_tx != NULL ) {
818    free( self->mbuf_addr_tx, 0 );
819    self->mbuf_addr_tx = NULL;
820  }
821
822  if ( mem_desc != NULL ) {
823    eno          = self->CFG->CALLBACK.mem_free_nocache( self->arg, mem_desc );
824    mem_desc     = NULL;
825    self->dma_tx = (volatile dwmac_desc *) mem_desc;
826  }
827
828  DWMAC_COMMON_DSB();
829
830  return eno;
831}
832
833static void dwmac_desc_enh_release_tx_bufs( dwmac_common_context *self )
834{
835  volatile dwmac_desc_ext *p_enh     = (volatile dwmac_desc_ext *) self->dma_tx;
836  const size_t             NUM_DESCS = (size_t) self->bsd_config->xbuf_count;
837  unsigned int             i;
838
839
840  assert( p_enh != NULL );
841
842  for ( i = 0; i < NUM_DESCS; ++i ) {
843    if ( p_enh[i].etx.des0_3.des1 != 0 ) {
844      struct mbuf *dummy;
845
846      assert( self->mbuf_addr_tx[i] != NULL );
847
848      MFREE( self->mbuf_addr_tx[i], dummy );
849      (void) dummy;
850      memset( __DEVOLATILE( void *,
851                            &p_enh[i].etx ), 0, sizeof( dwmac_desc_ext ) );
852    }
853  }
854
855  self->dma_tx = (volatile dwmac_desc *) p_enh;
856  DWMAC_COMMON_DSB();
857}
858
859static inline size_t dwmac_desc_enh_get_rx_frame_len(
860  dwmac_common_context *self,
861  const unsigned int    desc_idx )
862{
863  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_rx;
864
865
866  /* The type-1 checksum offload engines append the checksum at
867   * the end of frame and the two bytes of checksum are added in
868   * the length.
869   * Adjust for that in the framelen for type-1 checksum offload
870   * engines. */
871  if ( self->dmagrp->hw_feature & DMAGRP_HW_FEATURE_RXTYP1COE ) {
872    return DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET( p_enh[desc_idx].erx.des0_3.des0 )
873           - 2U;
874  } else {
875    return DWMAC_DESC_ERX_DES0_FRAME_LENGTH_GET( p_enh[desc_idx].erx.des0_3.des0 );
876  }
877}
878
879static bool dwmac_desc_enh_am_i_rx_owner(
880  dwmac_common_context *self,
881  const unsigned int    desc_idx )
882{
883  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_rx;
884  bool                     am_i_owner;
885
886
887  DWMAC_COMMON_DSB();
888  am_i_owner =
889    ( p_enh[desc_idx].erx.des0_3.des0 & DWMAC_DESC_ERX_DES0_OWN_BIT ) == 0;
890
891  return am_i_owner;
892}
893
894static bool dwmac_desc_enh_am_i_tx_owner(
895  dwmac_common_context *self,
896  const unsigned int    idx_tx )
897{
898  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
899  bool                     am_i_owner;
900
901
902  DWMAC_COMMON_DSB();
903  am_i_owner =
904    ( p_enh[idx_tx].etx.des0_3.des0 & DWMAC_DESC_ETX_DES0_OWN_BIT ) == 0;
905
906  return am_i_owner;
907}
908
909static void dwmac_desc_enh_release_tx_ownership(
910  dwmac_common_context *self,
911  const unsigned int    idx_tx )
912{
913  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
914
915
916  DWMAC_COMMON_DSB();
917  p_enh[idx_tx].erx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_OWN_BIT;
918}
919
920static int dwmac_desc_enh_get_tx_ls(
921  dwmac_common_context *self,
922  const unsigned int    idx_tx )
923{
924  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
925
926
927  return ( ( p_enh[idx_tx].etx.des0_3.des0
928             & DWMAC_DESC_ETX_DES0_LAST_SEGMENT ) != 0 );
929}
930
931static void dwmac_desc_enh_release_tx_desc(
932  dwmac_common_context *self,
933  const unsigned int    idx_tx )
934{
935  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
936
937
938  p_enh[idx_tx].etx.des0_3.des0 =
939    p_enh[idx_tx].etx.des0_3.des0
940    & ( DWMAC_DESC_ETX_DES0_TRANSMIT_END_OF_RING
941        | DWMAC_DESC_ETX_DES0_SECOND_ADDR_CHAINED );
942
943  p_enh[idx_tx].etx.des0_3.des1 = 0;
944}
945
946static void dwmac_desc_enh_prepare_tx_desc(
947  dwmac_common_context *self,
948  const unsigned int    idx,
949  const bool            is_first,
950  const size_t          len,
951  const void           *pdata )
952{
953  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
954
955
956  if ( is_first ) {
957    p_enh[idx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_FIRST_SEGMENT;
958  }
959
960  dwmac_desc_enh_set_tx_desc_len( &p_enh[idx], len );
961
962  p_enh[idx].etx.des0_3.des2 = (uintptr_t) pdata;
963}
964
965static void dwmac_desc_enh_close_tx_desc(
966  dwmac_common_context *self,
967  const unsigned int    idx_tx
968)
969{
970  volatile dwmac_desc_ext *p_enh = (volatile dwmac_desc_ext *) self->dma_tx;
971
972  p_enh[idx_tx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_LAST_SEGMENT;
973  p_enh[idx_tx].etx.des0_3.des0 |= DWMAC_DESC_ETX_DES0_IRQ_ON_COMPLETION;
974}
975
976static bool dwmac_desc_enh_is_first_rx_segment(
977  dwmac_common_context *self,
978  const unsigned int    descriptor_index )
979{
980  volatile dwmac_desc_ext *p_descs = (volatile dwmac_desc_ext *) self->dma_rx;
981
982
983  return ( ( p_descs[descriptor_index].erx.des0_3.des0
984             & DWMAC_DESC_ERX_DES0_FIRST_DESCRIPTOR ) != 0 );
985}
986
987static bool dwmac_desc_enh_is_last_rx_segment(
988  dwmac_common_context *self,
989  const unsigned int    descriptor_index )
990{
991  volatile dwmac_desc_ext *p_descs = (volatile dwmac_desc_ext *) self->dma_rx;
992
993
994  return ( ( p_descs[descriptor_index].erx.des0_3.des0
995             & DWMAC_DESC_ERX_DES0_LAST_DESCRIPTOR ) != 0 );
996}
997
998static int dwmac_desc_enh_validate( dwmac_common_context *self )
999{
1000  /* Does the hardware support enhanced descriptors? */
1001  if ( ( self->dmagrp->hw_feature & DMAGRP_HW_FEATURE_ENHDESSEL ) != 0 ) {
1002    return 0;
1003  } else {
1004    return EINVAL;
1005  }
1006}
1007
1008static bool dwmac_desc_enh_use_enhanced_descs( dwmac_common_context *self )
1009{
1010  (void) self;
1011
1012  /* Yes, we use enhanced descriptors */
1013  return true;
1014}
1015
1016const dwmac_common_desc_ops dwmac_desc_ops_enhanced = {
1017  .validate             = dwmac_desc_enh_validate,
1018  .use_enhanced_descs   = dwmac_desc_enh_use_enhanced_descs,
1019  .tx_status            = dwmac_desc_enh_get_tx_status,
1020  .rx_status            = dwmac_desc_enh_get_rx_status,
1021  .create_rx_desc       = dwmac_desc_enh_create_rx_desc,
1022  .create_tx_desc       = dwmac_desc_enh_create_tx_desc,
1023  .destroy_rx_desc      = dwmac_desc_enh_destroy_rx_desc,
1024  .destroy_tx_desc      = dwmac_desc_enh_destroy_tx_desc,
1025  .init_rx_desc         = dwmac_desc_enh_init_rx_desc,
1026  .init_tx_desc         = dwmac_desc_enh_init_tx_desc,
1027  .release_rx_bufs      = dwmac_desc_enh_release_rx_bufs,
1028  .release_tx_bufs      = dwmac_desc_enh_release_tx_bufs,
1029  .alloc_data_buf       = dwmac_desc_com_new_mbuf,
1030  .am_i_tx_owner        = dwmac_desc_enh_am_i_tx_owner,
1031  .am_i_rx_owner        = dwmac_desc_enh_am_i_rx_owner,
1032  .release_tx_desc      = dwmac_desc_enh_release_tx_desc,
1033  .prepare_tx_desc      = dwmac_desc_enh_prepare_tx_desc,
1034  .close_tx_desc        = dwmac_desc_enh_close_tx_desc,
1035  .get_tx_ls            = dwmac_desc_enh_get_tx_ls,
1036  .release_tx_ownership = dwmac_desc_enh_release_tx_ownership,
1037  .get_rx_frame_len     = dwmac_desc_enh_get_rx_frame_len,
1038  .is_first_rx_segment  = dwmac_desc_enh_is_first_rx_segment,
1039  .is_last_rx_segment   = dwmac_desc_enh_is_last_rx_segment,
1040  .print_tx_desc        = dwmac_desc_enh_print_tx_desc,
1041  .print_rx_desc        = dwmac_desc_enh_print_rx_desc,
1042};
1043
1044/* This wrapped function pointer struct can be passed into the
1045 * configuration initializer for the driver */
1046const dwmac_descriptor_ops DWMAC_DESCRIPTOR_OPS_ENHANCED =
1047  DWMAC_DESCRIPTOR_OPS_INITIALIZER(
1048    &dwmac_desc_ops_enhanced
1049    );
Note: See TracBrowser for help on using the repository browser.