Changeset 1613a01b in rtems


Ignore:
Timestamp:
Apr 10, 2014, 3:06:14 PM (5 years ago)
Author:
Ralf Kirchner <ralf.kirchner@…>
Branches:
4.11, master
Children:
a38d4a37
Parents:
f28b8d45
git-author:
Ralf Kirchner <ralf.kirchner@…> (04/10/14 15:06:14)
git-committer:
Sebastian Huber <sebastian.huber@…> (04/17/14 11:24:07)
Message:

libchip: Reduce tx interrupts

Reduce number of packet transmitted interrupts by using the interrupt mechanism only
if we run out of DMA descriptors.
Under normal conditions regaining DMA descriptors, mbufs and clusters is handled
via a counter.

Location:
c/src/libchip/network
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • c/src/libchip/network/dwmac-1000-dma.c

    rf28b8d45 r1613a01b  
    148148    /* Mask interrupts by writing to CSR7 */
    149149    dwmac_core_enable_dma_irq_rx( self );
    150     dwmac_core_enable_dma_irq_tx( self );
     150    dwmac_core_enable_dma_irq_tx_default( self );
    151151
    152152    /* The base address of the RX/TX descriptor lists must be written into
  • c/src/libchip/network/dwmac-core.c

    rf28b8d45 r1613a01b  
    4242  ( \
    4343    DMAGRP_INTERRUPT_ENABLE_NIE \
    44     | DMAGRP_INTERRUPT_ENABLE_TIE \
    4544    | DMAGRP_INTERRUPT_ENABLE_FBE \
    4645    | DMAGRP_INTERRUPT_ENABLE_UNE \
    4746    | DMAGRP_INTERRUPT_ENABLE_AIE \
     47  )
     48
     49#define DWMAC_CORE_INTR_ENABLE_ALL_MASK_TX \
     50  ( \
     51    DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX \
     52    | DMAGRP_INTERRUPT_ENABLE_TIE \
    4853  )
    4954
     
    6873}
    6974
    70 void dwmac_core_enable_dma_irq_tx( dwmac_common_context *self )
     75void dwmac_core_enable_dma_irq_tx_default( dwmac_common_context *self )
    7176{
    7277  self->dmagrp->interrupt_enable |= DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX;
    7378}
    7479
     80void dwmac_core_enable_dma_irq_tx_transmitted( dwmac_common_context *self )
     81{
     82  self->dmagrp->interrupt_enable |= DMAGRP_INTERRUPT_ENABLE_TIE;
     83}
     84
    7585void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self )
    7686{
     
    7888}
    7989
    80 void dwmac_core_disable_dma_irq_tx( dwmac_common_context *self )
    81 {
    82   self->dmagrp->interrupt_enable &= ~DWMAC_CORE_INTR_ENABLE_DEFAULT_MASK_TX;
     90void dwmac_core_disable_dma_irq_tx_all( dwmac_common_context *self )
     91{
     92  self->dmagrp->interrupt_enable &= ~DWMAC_CORE_INTR_ENABLE_ALL_MASK_TX;
     93}
     94
     95void dwmac_core_disable_dma_irq_tx_transmitted( dwmac_common_context *self )
     96{
     97  self->dmagrp->interrupt_enable &= ~DMAGRP_INTERRUPT_ENABLE_TIE;
    8398}
    8499
  • c/src/libchip/network/dwmac-core.h

    rf28b8d45 r1613a01b  
    5656void dwmac_core_enable_dma_irq_rx( dwmac_common_context *self );
    5757
    58 void dwmac_core_enable_dma_irq_tx( dwmac_common_context *self );
     58void dwmac_core_enable_dma_irq_tx_default( dwmac_common_context *self );
    5959
    60 void dwmac_core_disable_dma_irq_tx( dwmac_common_context *self );
     60void dwmac_core_enable_dma_irq_tx_transmitted( dwmac_common_context *self );
     61
     62void dwmac_core_disable_dma_irq_tx_all( dwmac_common_context *self );
     63
     64void dwmac_core_disable_dma_irq_tx_transmitted( dwmac_common_context *self );
    6165
    6266void dwmac_core_disable_dma_irq_rx( dwmac_common_context *self );
  • c/src/libchip/network/dwmac.c

    rf28b8d45 r1613a01b  
    263263}
    264264
    265 static inline void dwmac_enable_irq_tx( dwmac_common_context *self )
    266 {
    267   dwmac_core_enable_dma_irq_tx( self );
     265static inline void dwmac_enable_irq_tx_default( dwmac_common_context *self )
     266{
     267  dwmac_core_enable_dma_irq_tx_default( self );
     268}
     269
     270static inline void dwmac_enable_irq_tx_transmitted( dwmac_common_context *self )
     271{
     272  dwmac_core_enable_dma_irq_tx_transmitted( self );
    268273}
    269274
     
    273278}
    274279
    275 static inline void dwmac_diable_irq_tx( dwmac_common_context *self )
    276 {
    277   dwmac_core_disable_dma_irq_tx( self );
     280static inline void dwmac_disable_irq_tx_all( dwmac_common_context *self )
     281{
     282  dwmac_core_disable_dma_irq_tx_all( self );
     283}
     284
     285static inline void dwmac_disable_irq_tx_transmitted ( dwmac_common_context *self )
     286{
     287  dwmac_core_disable_dma_irq_tx_transmitted( self );
    278288}
    279289
     
    281291{
    282292  rtems_status_code sc = rtems_event_transient_send( self->task_id_control );
    283 
    284293
    285294  assert( sc == RTEMS_SUCCESSFUL );
     
    13431352    assert( sc == RTEMS_SUCCESSFUL );
    13441353
    1345     /* Handle a status change of the ethernet PHY */
    1346     if ( ( events & DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE ) != 0 ) {
    1347       dwmac_common_phy_status_counts *counts     =
    1348         &self->stats.phy_status_counts;
    1349       dwmac_phy_event                 phy_events = 0;
    1350       int                             eno;
    1351 
    1352       /* Get tripped PHY events */
    1353       eno = CALLBACK->phy_events_get(
    1354         self->arg,
    1355         &phy_events
    1356         );
    1357 
    1358       if ( eno == 0 ) {
    1359         /* Clear the PHY events */
    1360         eno = CALLBACK->phy_event_clear( self->arg );
     1354    while( events != 0 ) {
     1355      /* Handle a status change of the ethernet PHY */
     1356      if ( ( events & DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE ) != 0 ) {
     1357        events &= ~DWMAC_COMMON_EVENT_TX_PHY_STATUS_CHANGE;
     1358        dwmac_common_phy_status_counts *counts     =
     1359          &self->stats.phy_status_counts;
     1360        dwmac_phy_event                 phy_events = 0;
     1361        int                             eno;
     1362
     1363        /* Get tripped PHY events */
     1364        eno = CALLBACK->phy_events_get(
     1365          self->arg,
     1366          &phy_events
     1367          );
     1368
     1369        if ( eno == 0 ) {
     1370          /* Clear the PHY events */
     1371          eno = CALLBACK->phy_event_clear( self->arg );
     1372        }
     1373
     1374        if ( eno == 0 ) {
     1375          if ( ( phy_events & PHY_EVENT_LINK_DOWN ) != 0 ) {
     1376            ++counts->link_down;
     1377          }
     1378
     1379          if ( ( phy_events & PHY_EVENT_LINK_UP ) != 0 ) {
     1380            ++counts->link_up;
     1381
     1382            /* A link up events means that we have a new connection.
     1383            * Thus the autonegotiation paremeters must get updated */
     1384            (void) dwmac_update_autonegotiation_params( self );
     1385          }
     1386        }
     1387
     1388        assert( eno == 0 );
    13611389      }
    13621390
    1363       if ( eno == 0 ) {
    1364         if ( ( phy_events & PHY_EVENT_LINK_DOWN ) != 0 ) {
    1365           ++counts->link_down;
    1366         }
    1367 
    1368         if ( ( phy_events & PHY_EVENT_LINK_UP ) != 0 ) {
    1369           ++counts->link_up;
    1370 
    1371           /* A link up events means that we have a new connection.
    1372            * Thus the autonegotiation paremeters must get updated */
    1373           (void) dwmac_update_autonegotiation_params( self );
     1391      /* Stop the task */
     1392      if ( ( events & DWMAC_COMMON_EVENT_TASK_STOP ) != 0 ) {
     1393        dwmac_core_dma_stop_tx( self );
     1394        dwmac_disable_irq_tx_all( self );
     1395
     1396        /* Release all tx mbufs at the risk of data loss */
     1397        ( DESC_OPS->release_tx_bufs )( self );
     1398
     1399        dwmac_control_request_complete( self );
     1400
     1401        /* Return to events reception without re-enabling the interrupts
     1402        * The task needs a re-initialization to to resume work */
     1403        events = 0;
     1404        continue;
     1405      }
     1406
     1407      /* Ininitialize / Re-initialize transmission handling */
     1408      if ( ( events & DWMAC_COMMON_EVENT_TASK_INIT ) != 0 ) {
     1409        events &= ~DWMAC_COMMON_EVENT_TASK_INIT;
     1410        (void) dwmac_update_autonegotiation_params( self );
     1411        dwmac_core_dma_stop_tx( self );
     1412        ( DESC_OPS->release_tx_bufs )( self );
     1413        idx_transmit       = 0;
     1414        idx_transmit_first = 0;
     1415        idx_transmitted    = 0;
     1416        idx_release        = 0;
     1417        p_m                = NULL;
     1418        is_first           = false;
     1419        is_last            = false;
     1420        size               = 0;
     1421        ( DESC_OPS->init_tx_desc )( self );
     1422        dwmac_core_dma_start_tx( self );
     1423        dwmac_core_dma_restart_tx( self );
     1424
     1425        /* Clear our interrupt statuses */
     1426        dwmac_core_reset_dma_irq_status_tx( self );
     1427        dwmac_enable_irq_tx_default( self );
     1428
     1429        dwmac_control_request_complete( self );
     1430      }
     1431
     1432      /* Try to bump up the dma threshold due to a failure */
     1433      if ( ( events & DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD ) != 0 ) {
     1434        events &= ~DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD;
     1435        if ( self->dma_threshold_control
     1436            != DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD
     1437            && self->dma_threshold_control <= 256 ) {
     1438          self->dma_threshold_control += 64;
     1439          ( DMA_OPS->dma_mode )(
     1440            self,
     1441            self->dma_threshold_control,
     1442            DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD
     1443            );
    13741444        }
    13751445      }
    13761446
    1377       assert( eno == 0 );
    1378     }
    1379 
    1380     /* Stop the task */
    1381     if ( ( events & DWMAC_COMMON_EVENT_TASK_STOP ) != 0 ) {
    1382       dwmac_core_dma_stop_tx( self );
    1383       dwmac_diable_irq_tx( self );
    1384 
    1385       /* Release all tx mbufs at the risk of data loss */
    1386       ( DESC_OPS->release_tx_bufs )( self );
    1387 
    1388       dwmac_control_request_complete( self );
    1389 
    1390       /* Return to events reception without re-enabling the interrupts
    1391        * The task needs a re-initialization to to resume work */
    1392       continue;
    1393     }
    1394 
    1395     /* Ininitialize / Re-initialize transmission handling */
    1396     if ( ( events & DWMAC_COMMON_EVENT_TASK_INIT ) != 0 ) {
    1397       (void) dwmac_update_autonegotiation_params( self );
    1398       dwmac_core_dma_stop_tx( self );
    1399       ( DESC_OPS->release_tx_bufs )( self );
    1400       idx_transmit       = 0;
    1401       idx_transmit_first = 0;
    1402       idx_transmitted    = 0;
    1403       idx_release        = 0;
    1404       p_m                = NULL;
    1405       is_first           = false;
    1406       is_last            = false;
    1407       size               = 0;
    1408       ( DESC_OPS->init_tx_desc )( self );
    1409       dwmac_core_dma_start_tx( self );
    1410       dwmac_core_dma_restart_tx( self );
    1411 
    1412       /* Clear our interrupt statuses */
    1413       dwmac_core_reset_dma_irq_status_tx( self );
    1414 
    1415       dwmac_control_request_complete( self );
    1416     }
    1417 
    1418     /* Try to bump up the dma threshold due to a failure */
    1419     if ( ( events & DWMAC_COMMON_EVENT_TX_BUMP_UP_DMA_THRESHOLD ) != 0 ) {
    1420       if ( self->dma_threshold_control
    1421            != DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD
    1422            && self->dma_threshold_control <= 256 ) {
    1423         self->dma_threshold_control += 64;
    1424         ( DMA_OPS->dma_mode )(
    1425           self,
    1426           self->dma_threshold_control,
    1427           DWMAC_COMMON_DMA_MODE_STORE_AND_FORWARD
    1428           );
     1447      /* Handle one or more transmitted frames */
     1448      if ( ( events & DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED ) != 0 ) {
     1449        events &= ~DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED;
     1450        dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
     1451        dwmac_disable_irq_tx_transmitted( self );
     1452
     1453        /* Next index to be transmitted */
     1454        unsigned int idx_transmitted_next = dwmac_increment(
     1455          idx_transmitted, INDEX_MAX );
     1456
     1457        /* Free consumed fragments */
     1458        if( idx_release != idx_transmitted_next
     1459            && ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) {
     1460          while ( idx_release != idx_transmitted_next
     1461                  && ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) {
     1462            /* Status handling per packet */
     1463            if ( ( DESC_OPS->get_tx_ls )( self, idx_release ) ) {
     1464              int status = ( DESC_OPS->tx_status )(
     1465                self, idx_release
     1466                );
     1467
     1468              if ( status == 0 ) {
     1469                ++counts->packets_tranmitted_by_DMA;
     1470              } else {
     1471                ++counts->packet_errors;
     1472              }
     1473            }
     1474
     1475            DWMAC_PRINT_DBG(
     1476              "tx: release %u\n",
     1477              idx_release
     1478              );
     1479
     1480            /* Release the DMA descriptor */
     1481            ( DESC_OPS->release_tx_desc )( self, idx_release );
     1482
     1483            /* Release mbuf */
     1484            m_free( self->mbuf_addr_tx[idx_release] );
     1485            self->mbuf_addr_tx[idx_release] = NULL;
     1486
     1487            /* Next release index */
     1488            idx_release = dwmac_increment(
     1489              idx_release, INDEX_MAX );
     1490          }
     1491          if ( ( self->arpcom.ac_if.if_flags & IFF_OACTIVE ) != 0 ) {
     1492            /* The last tranmission has been incomplete
     1493            * (for example due to lack of DMA descriptors).
     1494            * Continue it now! */
     1495            events |= DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME;
     1496          }
     1497        } else {
     1498          /* Clear transmit interrupt status */
     1499          self->dmagrp->status = DMAGRP_STATUS_TI;
     1500          /* Get re-activated by the next interrupt */
     1501          dwmac_enable_irq_tx_transmitted( self );
     1502        }
    14291503      }
    1430     }
    1431 
    1432     /* Handle one or more transmitted frames */
    1433     if ( ( events & DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED ) != 0 ) {
    1434       dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
    1435 
    1436       /* Next index to be transmitted */
    1437       unsigned int idx_transmitted_next = dwmac_increment(
    1438         idx_transmitted, INDEX_MAX );
    1439 
    1440       /* Free consumed fragments */
    1441       while ( idx_release != idx_transmitted_next
    1442               && ( DESC_OPS->am_i_tx_owner )( self, idx_release ) ) {
    1443         /* Status handling per packet */
    1444         if ( ( DESC_OPS->get_tx_ls )( self, idx_release ) ) {
    1445           int status = ( DESC_OPS->tx_status )(
    1446             self, idx_release
     1504
     1505      /* There are one or more frames to be transmitted. */
     1506      if ( ( events & DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME ) != 0 ) {
     1507        events &= ~DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME;
     1508        dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
     1509
     1510        if ( p_m != NULL ) {
     1511          /* This frame will get re-counted */
     1512          --counts->frames_from_stack;
     1513        }
     1514
     1515        while ( true ) {
     1516          unsigned int idx = dwmac_increment(
     1517            idx_transmit, INDEX_MAX );
     1518
     1519          p_m = dwmac_next_fragment(
     1520            &self->arpcom.ac_if,
     1521            p_m,
     1522            &is_first,
     1523            &is_last,
     1524            &size
    14471525            );
    14481526
    1449           if ( status == 0 ) {
    1450             ++counts->packets_tranmitted_by_DMA;
     1527          /* New fragment? */
     1528          if ( p_m != NULL ) {
     1529            ++counts->frames_from_stack;
     1530
     1531            /* Queue full? */
     1532            if ( idx == idx_release ) {
     1533              DWMAC_PRINT_DBG( "tx: full queue: 0x%08x\n", p_m );
     1534
     1535              /* The queue is full, wait for transmitted interrupt */
     1536              break;
     1537            }
     1538
     1539            /* Set the transfer data */
     1540            rtems_cache_flush_multiple_data_lines(
     1541              mtod( p_m, const void * ),
     1542              size
     1543              );
     1544
     1545            ( DESC_OPS->prepare_tx_desc )(
     1546              self,
     1547              idx_transmit,
     1548              is_first,
     1549              size,
     1550              mtod( p_m, const void * )
     1551              );
     1552            self->mbuf_addr_tx[idx_transmit] = p_m;
     1553
     1554            ++counts->frames_to_dma;
     1555            counts->bytes_to_dma += size;
     1556            DWMAC_PRINT_DBG(
     1557              "tx: %02" PRIu32 ": %u %s%s\n",
     1558              idx_transmit, size,
     1559              ( is_first != false ) ? ( "F" ) : ( "" ),
     1560              ( is_last != false ) ? ( "L" ) : ( "" )
     1561
     1562              );
     1563
     1564            if ( is_first ) {
     1565              idx_transmit_first = idx_transmit;
     1566              is_first           = false;
     1567            } else {
     1568              /* To avoid race condition */
     1569              ( DESC_OPS->release_tx_ownership )( self, idx_transmit );
     1570            }
     1571
     1572            if ( is_last ) {
     1573              /* Interrupt on completition only for the latest fragment */
     1574              ( DESC_OPS->close_tx_desc )( self, idx_transmit );
     1575
     1576              /* To avoid race condition */
     1577              ( DESC_OPS->release_tx_ownership )( self, idx_transmit_first );
     1578              idx_transmitted = idx_transmit;
     1579
     1580              if ( ( self->dmagrp->status & DMAGRP_STATUS_TU ) != 0 ) {
     1581                /* Re-enable the tranmit DMA */
     1582                dwmac_core_dma_restart_tx( self );
     1583                DWMAC_PRINT_DBG(
     1584                  "tx DMA restart: %02u\n",
     1585                  idx_transmit_first
     1586                  );
     1587              }
     1588            }
     1589
     1590            /* Next transmit index */
     1591            idx_transmit = idx;
     1592
     1593            if ( is_last ) {
     1594              ++counts->packets_to_dma;
     1595            }
     1596
     1597            /* Next fragment of the frame */
     1598            p_m = p_m->m_next;
    14511599          } else {
    1452             ++counts->packet_errors;
     1600            /* Nothing to transmit */
     1601            break;
    14531602          }
    14541603        }
    14551604
    1456         DWMAC_PRINT_DBG(
    1457           "tx: release %u\n",
    1458           idx_release
    1459           );
    1460 
    1461         /* Release the DMA descriptor */
    1462         ( DESC_OPS->release_tx_desc )( self, idx_release );
    1463 
    1464         /* Release mbuf */
    1465         m_free( self->mbuf_addr_tx[idx_release] );
    1466         self->mbuf_addr_tx[idx_release] = NULL;
    1467 
    1468         /* Next release index */
    1469         idx_release = dwmac_increment(
    1470           idx_release, INDEX_MAX );
     1605        /* No more packets and fragments? */
     1606        if ( p_m == NULL ) {
     1607          /* Interface is now inactive */
     1608          self->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
     1609        } else {
     1610          /* There are more packets pending to be sent,
     1611          * but we have run out of DMA descriptors.
     1612          * We will continue sending once descriptors
     1613          * have been freed due to a transmitted interupt */
     1614          DWMAC_PRINT_DBG( "tx: transmission incomplete\n" );
     1615          events |= DWMAC_COMMON_EVENT_TX_FRAME_TRANSMITTED;
     1616        }
     1617
     1618        /* TODO: Add handling */
    14711619      }
    14721620
    1473       /* Clear transmit interrupt status */
    1474       self->dmagrp->status = DMAGRP_STATUS_TI;
    1475 
    1476       if ( ( self->arpcom.ac_if.if_flags & IFF_OACTIVE ) != 0 ) {
    1477         /* The last tranmission has been incomplete
    1478          * (for example due to lack of DMA descriptors).
    1479          * Continue it now! */
    1480         events |= DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME;
    1481       }
    1482     }
    1483 
    1484     /* There are one or more frames to be transmitted. */
    1485     if ( ( events & DWMAC_COMMON_EVENT_TX_TRANSMIT_FRAME ) != 0 ) {
    1486       dwmac_common_tx_frame_counts *counts = &self->stats.frame_counts_tx;
    1487 
    1488       if ( p_m != NULL ) {
    1489         /* This frame will get re-counted */
    1490         --counts->frames_from_stack;
    1491       }
    1492 
    1493       while ( true ) {
    1494         unsigned int idx = dwmac_increment(
    1495           idx_transmit, INDEX_MAX );
    1496 
    1497         p_m = dwmac_next_fragment(
    1498           &self->arpcom.ac_if,
    1499           p_m,
    1500           &is_first,
    1501           &is_last,
    1502           &size
    1503           );
    1504 
    1505         /* New fragment? */
    1506         if ( p_m != NULL ) {
    1507           ++counts->frames_from_stack;
    1508 
    1509           /* Queue full? */
    1510           if ( idx == idx_release ) {
    1511             DWMAC_PRINT_DBG( "tx: full queue: 0x%08x\n", p_m );
    1512 
    1513             /* The queue is full, wait for transmitted interrupt */
    1514             break;
    1515           }
    1516 
    1517           /* Set the transfer data */
    1518           rtems_cache_flush_multiple_data_lines(
    1519             mtod( p_m, const void * ),
    1520             size
    1521             );
    1522 
    1523           ( DESC_OPS->prepare_tx_desc )(
    1524             self,
    1525             idx_transmit,
    1526             is_first,
    1527             size,
    1528             mtod( p_m, const void * )
    1529             );
    1530           self->mbuf_addr_tx[idx_transmit] = p_m;
    1531 
    1532           ++counts->frames_to_dma;
    1533           counts->bytes_to_dma += size;
    1534           DWMAC_PRINT_DBG(
    1535             "tx: %02" PRIu32 ": %u %s%s\n",
    1536             idx_transmit, size,
    1537             ( is_first != false ) ? ( "F" ) : ( "" ),
    1538             ( is_last != false ) ? ( "L" ) : ( "" )
    1539 
    1540             );
    1541 
    1542           if ( is_first ) {
    1543             idx_transmit_first = idx_transmit;
    1544             is_first           = false;
    1545           } else {
    1546             /* To avoid race condition */
    1547             ( DESC_OPS->release_tx_ownership )( self, idx_transmit );
    1548           }
    1549 
    1550           if ( is_last ) {
    1551             /* Interrupt on completition only for the latest fragment */
    1552             ( DESC_OPS->close_tx_desc )( self, idx_transmit );
    1553 
    1554             /* To avoid race condition */
    1555             ( DESC_OPS->release_tx_ownership )( self, idx_transmit_first );
    1556             idx_transmitted = idx_transmit;
    1557 
    1558             if ( ( self->dmagrp->status & DMAGRP_STATUS_TU ) != 0 ) {
    1559               /* Re-enable the tranmit DMA */
    1560               dwmac_core_dma_restart_tx( self );
    1561               DWMAC_PRINT_DBG(
    1562                 "tx DMA restart: %02u\n",
    1563                 idx_transmit_first
    1564                 );
    1565             }
    1566           }
    1567 
    1568           /* Next transmit index */
    1569           idx_transmit = idx;
    1570 
    1571           if ( is_last ) {
    1572             ++counts->packets_to_dma;
    1573           }
    1574 
    1575           /* Next fragment of the frame */
    1576           p_m = p_m->m_next;
    1577         } else {
    1578           /* Nothing to transmit */
    1579           break;
    1580         }
    1581       }
    1582 
    1583       /* No more packets and fragments? */
    1584       if ( p_m == NULL ) {
    1585         /* Interface is now inactive */
    1586         self->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
    1587       } else {
    1588         /* There are more packets pending to be sent,
    1589          * but we have run out of DMA descriptors.
    1590          * We will continue sending once descriptors
    1591          * have been freed due to a transmitted interupt */
    1592         DWMAC_PRINT_DBG( "tx: transmission incomplete\n" );
    1593       }
    1594 
    1595       /* TODO: Add handling */
    1596     }
    1597 
    1598     DWMAC_PRINT_DBG( "tx: enable transmit interrupts\n" );
    1599 
    1600     /* Re-enable transmit interrupts */
    1601     dwmac_enable_irq_tx( self );
     1621      DWMAC_PRINT_DBG( "tx: enable transmit interrupts\n" );
     1622    }
    16021623  }
    16031624}
Note: See TracChangeset for help on using the changeset viewer.