source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ 8acfa94

5
Last change on this file since 8acfa94 was 8acfa94, checked in by Daniel Hellstrom <daniel@…>, on 01/22/17 at 14:43:17

leon, grpsw_pkt: set unique work-task name

  • Property mode set to 100644
File size: 88.8 KB
RevLine 
[0f49c0e]1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
[4a7d1026]12 * http://www.rtems.org/license/LICENSE.
[0f49c0e]13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
[5823bae8]29#include <bsp/grspw_pkt.h>
[0f49c0e]30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
[56fc7809]90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
[0f49c0e]103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
[56fc7809]111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
[0f49c0e]113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
[56fc7809]117#define GRSPW_CTRL_TF_BIT       12
[0f49c0e]118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
[56fc7809]135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
[0f49c0e]137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
[56fc7809]141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
[0f49c0e]142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
[56fc7809]154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
[0f49c0e]160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
[56fc7809]165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
[0f49c0e]166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
[56fc7809]176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
[0f49c0e]177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
[c119c0e]201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
[0f49c0e]205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
[56fc7809]253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
[29c2304]269#define GRSPW_ICCTRL_INUM       (0x1f << GRSPW_ICCTRL_INUM_BIT)
[56fc7809]270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
[0f49c0e]285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
[1ef9caa2]337#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
338#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
[0f49c0e]339#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
340#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
341
342/* Memory and HW Registers Access routines. All 32-bit access routines */
343#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
344/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
345#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
346#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
347#define REG_READ(addr) (*(volatile unsigned int *)(addr))
348
349struct grspw_ring {
350        struct grspw_ring *next;        /* Next Descriptor */
351        union {
352                struct grspw_txbd *tx;  /* Descriptor Address */
353                struct grspw_rxbd *rx;  /* Descriptor Address */
354        } bd;
355        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
356};
357
358/* An entry in the TX descriptor Ring */
359struct grspw_txring {
360        struct grspw_txring *next;      /* Next Descriptor */
361        struct grspw_txbd *bd;          /* Descriptor Address */
362        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
363};
364
365/* An entry in the RX descriptor Ring */
366struct grspw_rxring {
367        struct grspw_rxring *next;      /* Next Descriptor */
368        struct grspw_rxbd *bd;          /* Descriptor Address */
369        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
370};
371
372
373struct grspw_dma_priv {
374        struct grspw_priv *core;        /* GRSPW Core */
375        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
376        int index;                      /* DMA Channel Index @ GRSPW core */
377        int open;                       /* DMA Channel opened by user */
378        int started;                    /* DMA Channel activity (start|stop) */
[0d31dcc]379        rtems_id sem_rxdma;             /* DMA Channel RX Semaphore */
380        rtems_id sem_txdma;             /* DMA Channel TX Semaphore */
[0f49c0e]381        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
382        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
383
384        /*** RX ***/
385
386        /* RX Descriptor Ring */
387        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
388        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
389        struct grspw_rxring *rx_ring_base;
390        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
391        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
392        int rx_irq_en_cnt_curr;
393        struct {
394                int waiting;
395                int ready_cnt;
396                int op;
397                int recv_cnt;
398                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
399        } rx_wait;
400
401        /* Queue of Packets READY to be scheduled */
402        struct grspw_list ready;
403        int ready_cnt;
404
405        /* Scheduled RX Packets Queue */
406        struct grspw_list rx_sched;
407        int rx_sched_cnt;
408
409        /* Queue of Packets that has been RECIEVED */
410        struct grspw_list recv;
411        int recv_cnt;
412
413
414        /*** TX ***/
415
416        /* TX Descriptor Ring */
417        struct grspw_txbd *tx_bds;              /* Descriptor Address */
418        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
419        struct grspw_txring *tx_ring_base;
420        struct grspw_txring *tx_ring_head;
421        struct grspw_txring *tx_ring_tail;
422        int tx_irq_en_cnt_curr;
423        struct {
424                int waiting;
425                int send_cnt;
426                int op;
427                int sent_cnt;
428                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
429        } tx_wait;
430
431        /* Queue of Packets ready to be scheduled for transmission */
432        struct grspw_list send;
433        int send_cnt;
434
435        /* Scheduled TX Packets Queue */
436        struct grspw_list tx_sched;
437        int tx_sched_cnt;
438
439        /* Queue of Packets that has been SENT */
440        struct grspw_list sent;
441        int sent_cnt;
442};
443
444struct grspw_priv {
445        char devname[8];                /* Device name "grspw%d" */
446        struct drvmgr_dev *dev;         /* Device */
447        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
448        int irq;                        /* AMBA IRQ number of core */
449        int index;                      /* Index in order it was probed */
450        int core_index;                 /* Core Bus Index */
451        int open;                       /* If Device is alrady opened (=1) or not (=0) */
452        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
453
454        /* Features supported by Hardware */
455        struct grspw_hw_sup hwsup;
456
457        /* Pointer to an array of Maximally 4 DMA Channels */
458        struct grspw_dma_priv *dma;
459
460        /* Spin-lock ISR protection */
461        SPIN_DECLARE(devlock);
462
463        /* Descriptor Memory Area for TX & RX and all DMA channels */
464        unsigned int bd_mem;
465        unsigned int bd_mem_alloced;
466
467        /*** Time Code Handling ***/
468        void (*tcisr)(void *data, int timecode);
469        void *tcisr_arg;
470
[56fc7809]471        /*** Interrupt-code Handling ***/
472        spwpkt_ic_isr_t icisr;
473        void *icisr_arg;
474
[49cf776e]475        /* Bit mask representing events which shall cause link disable. */
476        unsigned int dis_link_on_err;
[0f49c0e]477
[ac7da5bc]478        /* Bit mask for link status bits to clear by ISR */
479        unsigned int stscfg;
480
[ab9b447]481        /*** Message Queue Handling ***/
482        struct grspw_work_config wc;
483
[0f49c0e]484        /* "Core Global" Statistics gathered, not dependent on DMA channel */
485        struct grspw_core_stats stats;
486};
487
488int grspw_initialized = 0;
489int grspw_count = 0;
490rtems_id grspw_sem;
491static struct grspw_priv *priv_tab[GRSPW_MAX];
492
493/* callback to upper layer when devices are discovered/removed */
494void *(*grspw_dev_add)(int) = NULL;
495void (*grspw_dev_del)(int,void*) = NULL;
496
[ab9b447]497/* Defaults to do nothing - user can override this function.
498 * Called from work-task.
499 */
500void __attribute__((weak)) grspw_work_event(
501        enum grspw_worktask_ev ev,
502        unsigned int msg)
503{
504
505}
506
[0f49c0e]507/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
508 * the work-task and work-queue to save space.
509 */
510int grspw_work_task_priority __attribute__((weak)) = 100;
511rtems_id grspw_work_task;
[ab9b447]512static struct grspw_work_config grspw_wc_def;
[0f49c0e]513
514STATIC void grspw_hw_stop(struct grspw_priv *priv);
515STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
516STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
517STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
518STATIC void grspw_isr(void *data);
519
520void *grspw_open(int dev_no)
521{
522        struct grspw_priv *priv;
523        unsigned int bdtabsize, hwa;
524        int i;
525        union drvmgr_key_value *value;
526
527        if (grspw_initialized != 1 || (dev_no >= grspw_count))
528                return NULL;
529
530        priv = priv_tab[dev_no];
531
532        /* Take GRSPW lock - Wait until we get semaphore */
533        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
534            != RTEMS_SUCCESSFUL)
535                return NULL;
536
537        if (priv->open) {
538                priv = NULL;
539                goto out;
540        }
541
542        /* Initialize Spin-lock for GRSPW Device. This is to protect
543         * CTRL and DMACTRL registers from ISR.
544         */
545        SPIN_INIT(&priv->devlock);
546
547        priv->tcisr = NULL;
548        priv->tcisr_arg = NULL;
[56fc7809]549        priv->icisr = NULL;
550        priv->icisr_arg = NULL;
[ac7da5bc]551        priv->stscfg = LINKSTS_MASK;
[0f49c0e]552
[ab9b447]553        /* Default to common work queue and message queue, if not created
554         * during initialization then its disabled.
555         */
556        grspw_work_cfg(priv, &grspw_wc_def);
557
[0f49c0e]558        grspw_stats_clr(priv);
559
560        /* Allocate TX & RX Descriptor memory area for all DMA
561         * channels. Max-size descriptor area is allocated (or user assigned):
562         *  - 128 RX descriptors per DMA Channel
563         *  - 64 TX descriptors per DMA Channel
[c442647f]564         * Specified address must be in CPU RAM.
[0f49c0e]565         */
566        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
[4d3e70f4]567        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
[0f49c0e]568        if (value) {
569                priv->bd_mem = value->i;
570                priv->bd_mem_alloced = 0;
571                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
572                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
573                                  priv->index);
574                        priv = NULL;
575                        goto out;
576                }
577        } else {
578                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
579                if (priv->bd_mem_alloced == 0) {
580                        priv = NULL;
581                        goto out;
582                }
583                /* Align memory */
584                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
585                               ~(BDTAB_ALIGN-1);
586        }
587
588        /* Translate into DMA address that HW can use to access DMA
589         * descriptors
590         */
591        drvmgr_translate_check(
592                priv->dev,
593                CPUMEM_TO_DMA,
594                (void *)priv->bd_mem,
595                (void **)&hwa,
596                bdtabsize);
597
598        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
599                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
600        for (i=0; i<priv->hwsup.ndma_chans; i++) {
601                /* Do DMA Channel Init, other variables etc. are inited
602                 * when respective DMA channel is opened.
603                 *
604                 * index & core are initialized by probe function.
605                 */
606                priv->dma[i].open = 0;
607                priv->dma[i].rx_bds = (struct grspw_rxbd *)
608                        (priv->bd_mem + i*BDTAB_SIZE*2);
609                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
610                        (hwa + BDTAB_SIZE*(2*i));
611                priv->dma[i].tx_bds = (struct grspw_txbd *)
612                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
613                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
614                        (hwa + BDTAB_SIZE*(2*i+1));
615                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
616                        i,
617                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
618                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
619                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
620                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
621        }
622
623        /* Basic initialization of hardware, clear some registers but
624         * keep Link/RMAP/Node-Address registers intact.
625         */
626        grspw_hw_stop(priv);
627
628        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
629        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
630
631        /* Take the device */
632        priv->open = 1;
633out:
634        rtems_semaphore_release(grspw_sem);
635        return priv;
636}
637
[eb5a42f6]638int grspw_close(void *d)
[0f49c0e]639{
640        struct grspw_priv *priv = d;
641        int i;
642
643        /* Take GRSPW lock - Wait until we get semaphore */
644        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
645            != RTEMS_SUCCESSFUL)
[eb5a42f6]646                return -1;
[0f49c0e]647
[eb5a42f6]648        /* Check that user has stopped and closed all DMA channels
649         * appropriately. At this point the Hardware shall not be doing DMA
650         * or generating Interrupts. We want HW in a "startup-state".
[0f49c0e]651         */
[eb5a42f6]652        for (i=0; i<priv->hwsup.ndma_chans; i++) {
653                if (priv->dma[i].open) {
654                        rtems_semaphore_release(grspw_sem);
655                        return 1;
656                }
657        }
[0f49c0e]658        grspw_hw_stop(priv);
659
660        /* Mark not open */
661        priv->open = 0;
662        rtems_semaphore_release(grspw_sem);
[eb5a42f6]663        return 0;
[0f49c0e]664}
665
666void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
667{
668        struct grspw_priv *priv = d;
669
670        *hw = priv->hwsup;
671}
672
673void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
674{
675        struct grspw_priv *priv = d;
676        struct grspw_regs *regs = priv->regs;
677        unsigned int ctrl, nodeaddr;
678        IRQFLAGS_TYPE irqflags;
679        int i;
680
681        if (!priv || !cfg)
682                return;
683
684        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
685
686        if (cfg->promiscuous != -1) {
687                /* Set Configuration */
688                ctrl = REG_READ(&regs->ctrl);
689                if (cfg->promiscuous)
690                        ctrl |= GRSPW_CTRL_PM;
691                else
692                        ctrl &= ~GRSPW_CTRL_PM;
693                REG_WRITE(&regs->ctrl, ctrl);
694                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
695
696                for (i=0; i<priv->hwsup.ndma_chans; i++) {
697                        ctrl = REG_READ(&regs->dma[i].ctrl);
698                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
699                        if (cfg->dma_nacfg[i].node_en) {
700                                ctrl |= GRSPW_DMACTRL_EN;
701                                REG_WRITE(&regs->dma[i].addr,
702                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
703                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
704                        } else {
705                                ctrl &= ~GRSPW_DMACTRL_EN;
706                        }
707                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
708                }
709        }
710
711        /* Read Current Configuration */
712        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
713        nodeaddr = REG_READ(&regs->nodeaddr);
714        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
715        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
716        for (i=0; i<priv->hwsup.ndma_chans; i++) {
717                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
718                                                GRSPW_DMACTRL_EN;
719                ctrl = REG_READ(&regs->dma[i].addr);
720                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
721                                                GRSPW_DMAADR_ADDR_BIT;
722                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
723                                                GRSPW_DMAADR_MASK_BIT;
724        }
725        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
726        for (; i<4; i++) {
727                cfg->dma_nacfg[i].node_en = 0;
728                cfg->dma_nacfg[i].node_addr = 0;
729                cfg->dma_nacfg[i].node_mask = 0;
730        }
731}
732
[72ec13ef]733/* Return Current DMA CTRL/Status Register */
734unsigned int grspw_dma_ctrlsts(void *c)
735{
736        struct grspw_dma_priv *dma = c;
737
738        return REG_READ(&dma->regs->ctrl);
739}
740
[0f49c0e]741/* Return Current Status Register */
742unsigned int grspw_link_status(void *d)
743{
744        struct grspw_priv *priv = d;
745
746        return REG_READ(&priv->regs->status);
747}
748
[fad4324d]749/* Clear Status Register bits */
750void grspw_link_status_clr(void *d, unsigned int mask)
751{
752        struct grspw_priv *priv = d;
753
754        REG_WRITE(&priv->regs->status, mask);
755}
756
[0f49c0e]757/* Return Current Link State */
758spw_link_state_t grspw_link_state(void *d)
759{
760        struct grspw_priv *priv = d;
761        unsigned int status = REG_READ(&priv->regs->status);
762
763        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
764}
765
[56fc7809]766/* Enable Global IRQ only if some irq source is set */
767static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
768{
769        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
770                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
771}
772
773
[0f49c0e]774/* options and clkdiv [in/out]: set to -1 to only read current config */
[ac7da5bc]775void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
[0f49c0e]776{
777        struct grspw_priv *priv = d;
778        struct grspw_regs *regs = priv->regs;
779        unsigned int ctrl;
780        IRQFLAGS_TYPE irqflags;
781
782        /* Write? */
783        if (clkdiv) {
784                if (*clkdiv != -1)
785                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
786                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
787        }
788        if (options) {
789                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
790                ctrl = REG_READ(&regs->ctrl);
791                if (*options != -1) {
792                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
793                                (*options & GRSPW_LINK_CFG);
794
[56fc7809]795                        /* Enable Global IRQ only if some irq source is set */
796                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]797                                ctrl |= GRSPW_CTRL_IE;
798                        else
799                                ctrl &= ~GRSPW_CTRL_IE;
800
801                        REG_WRITE(&regs->ctrl, ctrl);
[49cf776e]802                        /* Store the link disable events for use in
803                        ISR. The LINKOPTS_DIS_ON_* options are actually the
804                        corresponding bits in the status register, shifted
805                        by 16. */
806                        priv->dis_link_on_err = *options &
807                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
[0f49c0e]808                }
809                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
[49cf776e]810                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
[0f49c0e]811        }
[ac7da5bc]812        if (stscfg) {
813                if (*stscfg != -1) {
814                        priv->stscfg = *stscfg & LINKSTS_MASK;
815                }
816                *stscfg = priv->stscfg;
817        }
[0f49c0e]818}
819
820/* Generate Tick-In (increment Time Counter, Send Time Code) */
821void grspw_tc_tx(void *d)
822{
823        struct grspw_priv *priv = d;
824        struct grspw_regs *regs = priv->regs;
825        IRQFLAGS_TYPE irqflags;
826
827        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
828        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
829        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
830}
831
832void grspw_tc_ctrl(void *d, int *options)
833{
834        struct grspw_priv *priv = d;
835        struct grspw_regs *regs = priv->regs;
836        unsigned int ctrl;
837        IRQFLAGS_TYPE irqflags;
838
839        if (options == NULL)
840                return;
841
842        /* Write? */
843        if (*options != -1) {
844                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
845                ctrl = REG_READ(&regs->ctrl);
846                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
847                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
848
[56fc7809]849                /* Enable Global IRQ only if some irq source is set */
850                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]851                        ctrl |= GRSPW_CTRL_IE;
[56fc7809]852                else
[0f49c0e]853                        ctrl &= ~GRSPW_CTRL_IE;
854
855                REG_WRITE(&regs->ctrl, ctrl);
856                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
857        } else
858                ctrl = REG_READ(&regs->ctrl);
859        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
860}
861
862/* Assign ISR Function to TimeCode RX IRQ */
863void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
864{
865        struct grspw_priv *priv = d;
866
867        priv->tcisr_arg = data;
868        priv->tcisr = tcisr;
869}
870
871/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
872 * TCTRL   = bits 7 and 6
873 * TIMECNT = bits 5 to 0
874 */
875void grspw_tc_time(void *d, int *time)
[56fc7809]876{
[c442647f]877        struct grspw_priv *priv = d;
878        struct grspw_regs *regs = priv->regs;
879
880        if (time == NULL)
881                return;
882        if (*time != -1)
883                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
884        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
[56fc7809]885}
886
887/* Generate Tick-In for the given Interrupt-code and check for generation
888 * error.
889 *
890 * Returns zero on success and non-zero on failure
891 */
892int grspw_ic_tickin(void *d, int ic)
893{
894        struct grspw_priv *priv = d;
895        struct grspw_regs *regs = priv->regs;
896        IRQFLAGS_TYPE irqflags;
897        unsigned int icctrl, mask;
898
899        /* Prepare before turning off IRQ */
900        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
901        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
902             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
903
904        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
905        icctrl = REG_READ(&regs->icctrl);
906        icctrl &= ~mask;
907        icctrl |= ic;
908        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
909        /* the ID bit is valid after two clocks, so we not to wait here */
910        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
911        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
912
913        return icctrl & GRSPW_ICCTRL_ID;
914}
915
916#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
917#define ICOPTS_ICCTRL_MASK                                              \
918        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
919         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
920         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
921         ICOPTS_BASEIRQ)
922
923/* Control Interrupt-code settings of core
924 * Write if not pointing to -1, always read current value
925 *
926 * TODO: A lot of code duplication with grspw_tc_ctrl
927 */
928void grspw_ic_ctrl(void *d, unsigned int *options)
929{
930        struct grspw_priv *priv = d;
931        struct grspw_regs *regs = priv->regs;
932        unsigned int ctrl;
933        unsigned int icctrl;
934        IRQFLAGS_TYPE irqflags;
935
936        if (options == NULL)
937                return;
938
939        if (*options != -1) {
940                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
941
942                ctrl = REG_READ(&regs->ctrl);
943                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
944                                         * irqopts bits and ctrl bits */
945                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
946                        (GRSPW_CTRL_TF_BIT - 0);
947
948                icctrl = REG_READ(&regs->icctrl);
949                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
950                                                * irqopts bits and icctrl bits */
951                icctrl |= *options & ICOPTS_ICCTRL_MASK;
952
953                /* Enable Global IRQ only if some irq source is set */
954                if (grspw_is_irqsource_set(ctrl, icctrl))
955                        ctrl |= GRSPW_CTRL_IE;
956                else
957                        ctrl &= ~GRSPW_CTRL_IE;
958
959                REG_WRITE(&regs->ctrl, ctrl);
960                REG_WRITE(&regs->icctrl, icctrl);
961                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
962        }
963        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
964                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
965}
966
967void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
[0f49c0e]968{
969        struct grspw_priv *priv = d;
970        struct grspw_regs *regs = priv->regs;
971
[56fc7809]972        if (!cfg)
[0f49c0e]973                return;
[56fc7809]974
975        if (rw & 1) {
976                REG_WRITE(&regs->ictickomask, cfg->tomask);
977                REG_WRITE(&regs->icaamask, cfg->aamask);
978                REG_WRITE(&regs->icrlpresc, cfg->scaler);
979                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
980                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
981        }
982        if (rw & 2) {
983                cfg->tomask = REG_READ(&regs->ictickomask);
984                cfg->aamask = REG_READ(&regs->icaamask);
985                cfg->scaler = REG_READ(&regs->icrlpresc);
986                cfg->isr_reload = REG_READ(&regs->icrlisr);
987                cfg->ack_reload = REG_READ(&regs->icrlintack);
988        }
989}
990
991/* Read or Write Interrupt-code status registers */
992void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
993{
994        struct grspw_priv *priv = d;
995        struct grspw_regs *regs = priv->regs;
996
997        /* No locking needed since the status bits are clear-on-write */
998
999        if (rxirq) {
1000                if (*rxirq != 0)
1001                        REG_WRITE(&regs->icrx, *rxirq);
1002                else
1003                        *rxirq = REG_READ(&regs->icrx);
1004        }
1005
1006        if (rxack) {
1007                if (*rxack != 0)
1008                        REG_WRITE(&regs->icack, *rxack);
1009                else
1010                        *rxack = REG_READ(&regs->icack);
1011        }
1012
1013        if (intto) {
1014                if (*intto != 0)
1015                        REG_WRITE(&regs->ictimeout, *intto);
1016                else
1017                        *intto = REG_READ(&regs->ictimeout);
1018        }
1019}
1020
1021/* Assign handler function to Interrupt-code tick out IRQ */
1022void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1023{
1024        struct grspw_priv *priv = d;
1025
1026        priv->icisr_arg = data;
1027        priv->icisr = handler;
[0f49c0e]1028}
1029
1030/* Set (not -1) and/or read RMAP options. */
1031int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1032{
1033        struct grspw_priv *priv = d;
1034        struct grspw_regs *regs = priv->regs;
1035        unsigned int ctrl;
1036        IRQFLAGS_TYPE irqflags;
1037
1038        if (dstkey) {
1039                if (*dstkey != -1)
1040                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1041                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1042        }
1043        if (options) {
1044                if (*options != -1) {
1045                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1046                                return -1;
1047
1048
1049                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1050                        ctrl = REG_READ(&regs->ctrl);
1051                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1052                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1053                        REG_WRITE(&regs->ctrl, ctrl);
1054                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1055                }
1056                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1057        }
1058
1059        return 0;
1060}
1061
1062void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1063{
1064        struct grspw_priv *priv = d;
1065
1066        if (rmap)
1067                *rmap = priv->hwsup.rmap;
1068        if (rmap_crc)
1069                *rmap_crc = priv->hwsup.rmap_crc;
1070}
1071
1072/* Select port, if
1073 * -1=The current selected port is returned
1074 * 0=Port 0
1075 * 1=Port 1
1076 * Others=Both Port0 and Port1
1077 */
1078int grspw_port_ctrl(void *d, int *port)
1079{
1080        struct grspw_priv *priv = d;
1081        struct grspw_regs *regs = priv->regs;
1082        unsigned int ctrl;
1083        IRQFLAGS_TYPE irqflags;
1084
1085        if (port == NULL)
1086                return -1;
1087
1088        if ((*port == 1) || (*port == 0)) {
1089                /* Select port user selected */
1090                if ((*port == 1) && (priv->hwsup.nports < 2))
1091                        return -1; /* Changing to Port 1, but only one port available */
1092                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1093                ctrl = REG_READ(&regs->ctrl);
1094                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1095                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1096                REG_WRITE(&regs->ctrl, ctrl);
1097                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1098        } else if (*port > 1) {
1099                /* Select both ports */
1100                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1101                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1102                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1103        }
1104
1105        /* Get current settings */
1106        ctrl = REG_READ(&regs->ctrl);
1107        if (ctrl & GRSPW_CTRL_NP) {
1108                /* Any port, selected by hardware */
1109                if (priv->hwsup.nports > 1)
1110                        *port = 3;
1111                else
1112                        *port = 0; /* Port0 the only port available */
1113        } else {
1114                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1115        }
1116
1117        return 0;
1118}
1119
1120/* Returns Number ports available in hardware */
1121int grspw_port_count(void *d)
1122{
1123        struct grspw_priv *priv = d;
1124
1125        return priv->hwsup.nports;
1126}
1127
1128/* Current active port: 0 or 1 */
1129int grspw_port_active(void *d)
1130{
1131        struct grspw_priv *priv = d;
1132        unsigned int status;
1133
1134        status = REG_READ(&priv->regs->status);
1135
1136        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1137}
1138
1139void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1140{
1141        struct grspw_priv *priv = d;
1142
1143        if (sts == NULL)
1144                return;
1145        memcpy(sts, &priv->stats, sizeof(priv->stats));
1146}
1147
1148void grspw_stats_clr(void *d)
1149{
1150        struct grspw_priv *priv = d;
1151
1152        /* Clear most of the statistics */     
1153        memset(&priv->stats, 0, sizeof(priv->stats));
1154}
1155
1156/*** DMA Interface ***/
1157
1158/* Initialize the RX and TX Descriptor Ring, empty of packets */
1159STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1160{
1161        struct grspw_ring *r;
1162        int i;
1163
1164        /* Empty BD rings */
1165        dma->rx_ring_head = dma->rx_ring_base;
1166        dma->rx_ring_tail = dma->rx_ring_base;
1167        dma->tx_ring_head = dma->tx_ring_base;
1168        dma->tx_ring_tail = dma->tx_ring_base;
1169
1170        /* Init RX Descriptors */
1171        r = (struct grspw_ring *)dma->rx_ring_base;
1172        for (i=0; i<GRSPW_RXBD_NR; i++) {
1173
1174                /* Init Ring Entry */
1175                r[i].next = &r[i+1];
1176                r[i].bd.rx = &dma->rx_bds[i];
1177                r[i].pkt = NULL;
1178
1179                /* Init HW Descriptor */
1180                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1181                BD_WRITE(&r[i].bd.rx->addr, 0);
1182        }
1183        r[GRSPW_RXBD_NR-1].next = &r[0];
1184
1185        /* Init TX Descriptors */
1186        r = (struct grspw_ring *)dma->tx_ring_base;
1187        for (i=0; i<GRSPW_TXBD_NR; i++) {
1188
1189                /* Init Ring Entry */
1190                r[i].next = &r[i+1];
1191                r[i].bd.tx = &dma->tx_bds[i];
1192                r[i].pkt = NULL;
1193
1194                /* Init HW Descriptor */
1195                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1196                BD_WRITE(&r[i].bd.tx->haddr, 0);
1197                BD_WRITE(&r[i].bd.tx->dlen, 0);
1198                BD_WRITE(&r[i].bd.tx->daddr, 0);
1199        }
1200        r[GRSPW_TXBD_NR-1].next = &r[0];
1201}
1202
1203/* Try to populate descriptor ring with as many as possible READY unused packet
1204 * buffers. The packets assigned with to a descriptor are put in the end of
1205 * the scheduled list.
1206 *
1207 * The number of Packets scheduled is returned.
1208 *
1209 *  - READY List -> RX-SCHED List
1210 *  - Descriptors are initialized and enabled for reception
1211 */
1212STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1213{
1214        int cnt;
1215        unsigned int ctrl, dmactrl;
1216        void *hwaddr;
1217        struct grspw_rxring *curr_bd;
1218        struct grspw_pkt *curr_pkt, *last_pkt;
1219        struct grspw_list lst;
1220        IRQFLAGS_TYPE irqflags;
1221
1222        /* Is Ready Q empty? */
1223        if (grspw_list_is_empty(&dma->ready))
1224                return 0;
1225
1226        cnt = 0;
1227        lst.head = curr_pkt = dma->ready.head;
1228        curr_bd = dma->rx_ring_head;
1229        while (!curr_bd->pkt) {
1230
1231                /* Assign Packet to descriptor */
1232                curr_bd->pkt = curr_pkt;
1233
1234                /* Prepare descriptor address. */
1235                hwaddr = curr_pkt->data;
1236                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1237                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1238                                         hwaddr, &hwaddr);
1239                        if (curr_pkt->data == hwaddr) /* translation needed? */
1240                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1241                }
1242                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1243
1244                ctrl = GRSPW_RXBD_EN;
1245                if (curr_bd->next == dma->rx_ring_base) {
1246                        /* Wrap around (only needed when smaller descriptor
1247                         * table)
1248                         */
1249                        ctrl |= GRSPW_RXBD_WR;
1250                }
1251
1252                /* Is this Packet going to be an interrupt Packet? */
1253                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1254                        if (dma->cfg.rx_irq_en_cnt == 0) {
1255                                /* IRQ is disabled. A big number to avoid
1256                                 * equal to zero too often
1257                                 */
1258                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1259                        } else {
1260                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1261                                ctrl |= GRSPW_RXBD_IE;
1262                        }
1263                }
1264
1265                if (curr_pkt->flags & RXPKT_FLAG_IE)
1266                        ctrl |= GRSPW_RXBD_IE;
1267
1268                /* Enable descriptor */
1269                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1270
1271                last_pkt = curr_pkt;
1272                curr_bd = curr_bd->next;
1273                cnt++;
1274
1275                /* Get Next Packet from Ready Queue */
1276                if (curr_pkt == dma->ready.tail) {
1277                        /* Handled all in ready queue. */
1278                        curr_pkt = NULL;
1279                        break;
1280                }
1281                curr_pkt = curr_pkt->next;
1282        }
1283
1284        /* Has Packets been scheduled? */
1285        if (cnt > 0) {
1286                /* Prepare list for insertion/deleation */
1287                lst.tail = last_pkt;
1288
1289                /* Remove scheduled packets from ready queue */
1290                grspw_list_remove_head_list(&dma->ready, &lst);
1291                dma->ready_cnt -= cnt;
1292                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1293                        dma->stats.ready_cnt_min = dma->ready_cnt;
1294
1295                /* Insert scheduled packets into scheduled queue */
1296                grspw_list_append_list(&dma->rx_sched, &lst);
1297                dma->rx_sched_cnt += cnt;
1298                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1299                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1300
1301                /* Update TX ring posistion */
1302                dma->rx_ring_head = curr_bd;
1303
1304                /* Make hardware aware of the newly enabled descriptors
1305                 * We must protect from ISR which writes RI|TI
1306                 */
1307                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1308                dmactrl = REG_READ(&dma->regs->ctrl);
1309                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1310                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1311                REG_WRITE(&dma->regs->ctrl, dmactrl);
1312                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1313        }
1314
1315        return cnt;
1316}
1317
1318/* Scans the RX desciptor table for scheduled Packet that has been received,
1319 * and moves these Packet from the head of the scheduled queue to the
1320 * tail of the recv queue.
1321 *
1322 * Also, for all packets the status is updated.
1323 *
1324 *  - SCHED List -> SENT List
1325 *
1326 * Return Value
1327 * Number of packets moved
1328 */
1329STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1330{
1331        struct grspw_rxring *curr;
1332        struct grspw_pkt *last_pkt;
1333        int recv_pkt_cnt = 0;
1334        unsigned int ctrl;
1335        struct grspw_list lst;
1336
1337        curr = dma->rx_ring_tail;
1338
1339        /* Step into RX ring to find if packets have been scheduled for
1340         * reception.
1341         */
1342        if (!curr->pkt)
1343                return 0; /* No scheduled packets, thus no received, abort */
1344
1345        /* There has been Packets scheduled ==> scheduled Packets may have been
1346         * received and needs to be collected into RECV List.
1347         *
1348         * A temporary list "lst" with all received packets is created.
1349         */
1350        lst.head = curr->pkt;
1351
1352        /* Loop until first enabled "unrecveived" SpW Packet is found.
1353         * An unused descriptor is indicated by an unassigned pkt field.
1354         */
1355        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1356                /* Handle one received Packet */
1357
1358                /* Remember last handled Packet so that insertion/removal from
1359                 * Packet lists go fast.
1360                 */
1361                last_pkt = curr->pkt;
1362
1363                /* Get Length of Packet in bytes, and reception options */
1364                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1365
1366                /* Set flags to indicate error(s) and CRC information,
1367                 * and Mark Received.
1368                 */
1369                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1370                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1371                                  RXPKT_FLAG_RX;
1372
1373                /* Packet was Truncated? */
1374                if (ctrl & GRSPW_RXBD_TR)
1375                        dma->stats.rx_err_trunk++;
1376
1377                /* Error End-Of-Packet? */
1378                if (ctrl & GRSPW_RXBD_EP)
1379                        dma->stats.rx_err_endpkt++;
1380                curr->pkt = NULL; /* Mark descriptor unused */
1381
1382                /* Increment */
1383                curr = curr->next;
1384                recv_pkt_cnt++;
1385        }
1386
1387        /* 1. Remove all handled packets from scheduled queue
1388         * 2. Put all handled packets into recv queue
1389         */
1390        if (recv_pkt_cnt > 0) {
1391
1392                /* Update Stats, Number of Received Packets */
1393                dma->stats.rx_pkts += recv_pkt_cnt;
1394
1395                /* Save RX ring posistion */
1396                dma->rx_ring_tail = curr;
1397
1398                /* Prepare list for insertion/deleation */
1399                lst.tail = last_pkt;
1400
1401                /* Remove received Packets from RX-SCHED queue */
1402                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1403                dma->rx_sched_cnt -= recv_pkt_cnt;
1404                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1405                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1406
1407                /* Insert received Packets into RECV queue */
1408                grspw_list_append_list(&dma->recv, &lst);
1409                dma->recv_cnt += recv_pkt_cnt;
1410                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1411                        dma->stats.recv_cnt_max = dma->recv_cnt;
1412        }
1413
1414        return recv_pkt_cnt;
1415}
1416
1417/* Try to populate descriptor ring with as many SEND packets as possible. The
1418 * packets assigned with to a descriptor are put in the end of
1419 * the scheduled list.
1420 *
1421 * The number of Packets scheduled is returned.
1422 *
1423 *  - SEND List -> TX-SCHED List
1424 *  - Descriptors are initialized and enabled for transmission
1425 */
1426STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1427{
1428        int cnt;
1429        unsigned int ctrl, dmactrl;
1430        void *hwaddr;
1431        struct grspw_txring *curr_bd;
1432        struct grspw_pkt *curr_pkt, *last_pkt;
1433        struct grspw_list lst;
1434        IRQFLAGS_TYPE irqflags;
1435
1436        /* Is Ready Q empty? */
1437        if (grspw_list_is_empty(&dma->send))
1438                return 0;
1439
1440        cnt = 0;
1441        lst.head = curr_pkt = dma->send.head;
1442        curr_bd = dma->tx_ring_head;
1443        while (!curr_bd->pkt) {
1444
1445                /* Assign Packet to descriptor */
1446                curr_bd->pkt = curr_pkt;
1447
1448                /* Set up header transmission */
1449                if (curr_pkt->hdr && curr_pkt->hlen) {
1450                        hwaddr = curr_pkt->hdr;
1451                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1452                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1453                                                 hwaddr, &hwaddr);
1454                                /* translation needed? */
1455                                if (curr_pkt->hdr == hwaddr)
1456                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1457                        }
1458                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1459                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1460                } else {
1461                        ctrl = GRSPW_TXBD_EN;
1462                }
1463                /* Enable IRQ generation and CRC options as specified
1464                 * by user.
1465                 */
1466                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1467
1468                if (curr_bd->next == dma->tx_ring_base) {
1469                        /* Wrap around (only needed when smaller descriptor table) */
1470                        ctrl |= GRSPW_TXBD_WR;
1471                }
1472
1473                /* Is this Packet going to be an interrupt Packet? */
1474                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1475                        if (dma->cfg.tx_irq_en_cnt == 0) {
1476                                /* IRQ is disabled.
1477                                 * A big number to avoid equal to zero too often
1478                                 */
1479                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1480                        } else {
1481                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1482                                ctrl |= GRSPW_TXBD_IE;
1483                        }
1484                }
1485
1486                /* Prepare descriptor address. Parts of CTRL is written to
1487                 * DLEN for debug-only (CTRL is cleared by HW).
1488                 */
1489                if (curr_pkt->data && curr_pkt->dlen) {
1490                        hwaddr = curr_pkt->data;
1491                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1492                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1493                                                 hwaddr, &hwaddr);
1494                                /* translation needed? */
1495                                if (curr_pkt->data == hwaddr)
1496                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1497                        }
1498                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1499                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1500                                                     ((ctrl & 0x3f000) << 12));
1501                } else {
1502                        BD_WRITE(&curr_bd->bd->daddr, 0);
1503                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1504                }
1505
1506                /* Enable descriptor */
1507                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1508
1509                last_pkt = curr_pkt;
1510                curr_bd = curr_bd->next;
1511                cnt++;
1512
1513                /* Get Next Packet from Ready Queue */
1514                if (curr_pkt == dma->send.tail) {
1515                        /* Handled all in ready queue. */
1516                        curr_pkt = NULL;
1517                        break;
1518                }
1519                curr_pkt = curr_pkt->next;
1520        }
1521
1522        /* Have Packets been scheduled? */
1523        if (cnt > 0) {
1524                /* Prepare list for insertion/deleation */
1525                lst.tail = last_pkt;
1526
1527                /* Remove scheduled packets from ready queue */
1528                grspw_list_remove_head_list(&dma->send, &lst);
1529                dma->send_cnt -= cnt;
1530                if (dma->stats.send_cnt_min > dma->send_cnt)
1531                        dma->stats.send_cnt_min = dma->send_cnt;
1532
1533                /* Insert scheduled packets into scheduled queue */
1534                grspw_list_append_list(&dma->tx_sched, &lst);
1535                dma->tx_sched_cnt += cnt;
1536                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1537                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1538
1539                /* Update TX ring posistion */
1540                dma->tx_ring_head = curr_bd;
1541
1542                /* Make hardware aware of the newly enabled descriptors */
1543                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1544                dmactrl = REG_READ(&dma->regs->ctrl);
1545                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1546                dmactrl |= GRSPW_DMACTRL_TE;
1547                REG_WRITE(&dma->regs->ctrl, dmactrl);
1548                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1549        }
1550        return cnt;
1551}
1552
1553/* Scans the TX desciptor table for transmitted packets, and moves these
1554 * packets from the head of the scheduled queue to the tail of the sent queue.
1555 *
1556 * Also, for all packets the status is updated.
1557 *
1558 *  - SCHED List -> SENT List
1559 *
1560 * Return Value
1561 * Number of packet moved
1562 */
1563STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1564{
1565        struct grspw_txring *curr;
1566        struct grspw_pkt *last_pkt;
1567        int sent_pkt_cnt = 0;
1568        unsigned int ctrl;
1569        struct grspw_list lst;
1570
1571        curr = dma->tx_ring_tail;
1572
1573        /* Step into TX ring to find if packets have been scheduled for
1574         * transmission.
1575         */
1576        if (!curr->pkt)
1577                return 0; /* No scheduled packets, thus no sent, abort */
1578
1579        /* There has been Packets scheduled ==> scheduled Packets may have been
1580         * transmitted and needs to be collected into SENT List.
1581         *
1582         * A temporary list "lst" with all sent packets is created.
1583         */
1584        lst.head = curr->pkt;
1585
1586        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1587         * An unused descriptor is indicated by an unassigned pkt field.
1588         */
1589        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1590                /* Handle one sent Packet */
1591
1592                /* Remember last handled Packet so that insertion/removal from
1593                 * packet lists go fast.
1594                 */
1595                last_pkt = curr->pkt;
1596
1597                /* Set flags to indicate error(s) and Mark Sent.
1598                 */
1599                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1600                                        (ctrl & TXPKT_FLAG_LINKERR) |
1601                                        TXPKT_FLAG_TX;
1602
1603                /* Sent packet experienced link error? */
1604                if (ctrl & GRSPW_TXBD_LE)
1605                        dma->stats.tx_err_link++;
1606
1607                curr->pkt = NULL; /* Mark descriptor unused */
1608
1609                /* Increment */
1610                curr = curr->next;
1611                sent_pkt_cnt++;
1612        }
1613
1614        /* 1. Remove all handled packets from TX-SCHED queue
1615         * 2. Put all handled packets into SENT queue
1616         */
1617        if (sent_pkt_cnt > 0) {
1618                /* Update Stats, Number of Transmitted Packets */
1619                dma->stats.tx_pkts += sent_pkt_cnt;
1620
1621                /* Save TX ring posistion */
1622                dma->tx_ring_tail = curr;
1623
1624                /* Prepare list for insertion/deleation */
1625                lst.tail = last_pkt;
1626
1627                /* Remove sent packets from TX-SCHED queue */
1628                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1629                dma->tx_sched_cnt -= sent_pkt_cnt;
1630                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1631                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1632
1633                /* Insert received packets into SENT queue */
1634                grspw_list_append_list(&dma->sent, &lst);
1635                dma->sent_cnt += sent_pkt_cnt;
1636                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1637                        dma->stats.sent_cnt_max = dma->sent_cnt;
1638        }
1639
1640        return sent_pkt_cnt;
1641}
1642
1643void *grspw_dma_open(void *d, int chan_no)
1644{
1645        struct grspw_priv *priv = d;
1646        struct grspw_dma_priv *dma;
1647        int size;
1648
[3395ca99]1649        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
[0f49c0e]1650                return NULL;
1651
1652        dma = &priv->dma[chan_no];
1653
1654        /* Take GRSPW lock */
1655        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1656            != RTEMS_SUCCESSFUL)
1657                return NULL;
1658
1659        if (dma->open) {
1660                dma = NULL;
1661                goto out;
1662        }
1663
1664        dma->started = 0;
1665
1666        /* Set Default Configuration:
1667         *
1668         *  - MAX RX Packet Length =
1669         *  - Disable IRQ generation
1670         *  -
1671         */
1672        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1673        dma->cfg.rx_irq_en_cnt = 0;
1674        dma->cfg.tx_irq_en_cnt = 0;
1675        dma->cfg.flags = DMAFLAG_NO_SPILL;
1676
[57e1f4c3]1677        /* set to NULL so that error exit works correctly */
[0d31dcc]1678        dma->sem_rxdma = RTEMS_ID_NONE;
1679        dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1680        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1681        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1682        dma->rx_ring_base = NULL;
1683
[0f49c0e]1684        /* DMA Channel Semaphore created with count = 1 */
1685        if (rtems_semaphore_create(
[0d31dcc]1686            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
[0f49c0e]1687            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1688            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
[0d31dcc]1689            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1690                dma->sem_rxdma = RTEMS_ID_NONE;
1691                goto err;
1692        }
1693        if (rtems_semaphore_create(
1694            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1695            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1696            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1697            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1698                dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1699                goto err;
[0f49c0e]1700        }
1701
1702        /* Allocate memory for the two descriptor rings */
1703        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1704        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1705        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
[57e1f4c3]1706        if (dma->rx_ring_base == NULL)
1707                goto err;
[0f49c0e]1708
1709        /* Create DMA RX and TX Channel sempahore with count = 0 */
1710        if (rtems_semaphore_create(
1711            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1712            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1713            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1714            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1715                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1716                goto err;
[0f49c0e]1717        }
1718        if (rtems_semaphore_create(
1719            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1720            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1721            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1722            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1723                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1724                goto err;
[0f49c0e]1725        }
1726
1727        /* Reset software structures */
1728        grspw_dma_reset(dma);
1729
1730        /* Take the device */
1731        dma->open = 1;
1732out:
1733        /* Return GRSPW Lock */
1734        rtems_semaphore_release(grspw_sem);
1735
1736        return dma;
[57e1f4c3]1737
1738        /* initialization error happended */
1739err:
[0d31dcc]1740        if (dma->sem_rxdma != RTEMS_ID_NONE)
1741                rtems_semaphore_delete(dma->sem_rxdma);
1742        if (dma->sem_txdma != RTEMS_ID_NONE)
1743                rtems_semaphore_delete(dma->sem_txdma);
[57e1f4c3]1744        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1745                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1746        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1747                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1748        if (dma->rx_ring_base)
1749                free(dma->rx_ring_base);
1750        dma = NULL;
1751        goto out;
[0f49c0e]1752}
1753
1754/* Initialize Software Structures:
1755 *  - Clear all Queues
1756 *  - init BD ring
1757 *  - init IRQ counter
1758 *  - clear statistics counters
1759 *  - init wait structures and semaphores
1760 */
1761STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1762{
1763        /* Empty RX and TX queues */
1764        grspw_list_clr(&dma->ready);
1765        grspw_list_clr(&dma->rx_sched);
1766        grspw_list_clr(&dma->recv);
1767        grspw_list_clr(&dma->send);
1768        grspw_list_clr(&dma->tx_sched);
1769        grspw_list_clr(&dma->sent);
1770        dma->ready_cnt = 0;
1771        dma->rx_sched_cnt = 0;
1772        dma->recv_cnt = 0;
1773        dma->send_cnt = 0;
1774        dma->tx_sched_cnt = 0;
1775        dma->sent_cnt = 0;
1776
1777        dma->rx_irq_en_cnt_curr = 0;
1778        dma->tx_irq_en_cnt_curr = 0;
1779
1780        grspw_bdrings_init(dma);
1781
1782        dma->rx_wait.waiting = 0;
1783        dma->tx_wait.waiting = 0;
1784
1785        grspw_dma_stats_clr(dma);
1786}
1787
[eb5a42f6]1788int grspw_dma_close(void *c)
[0f49c0e]1789{
1790        struct grspw_dma_priv *dma = c;
1791
1792        if (!dma->open)
[eb5a42f6]1793                return 0;
[0f49c0e]1794
1795        /* Take device lock - Wait until we get semaphore */
[0d31dcc]1796        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1797            != RTEMS_SUCCESSFUL)
[eb5a42f6]1798                return -1;
[0d31dcc]1799        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1800            != RTEMS_SUCCESSFUL) {
1801                rtems_semaphore_release(dma->sem_rxdma);
1802                return -1;
1803        }
[0f49c0e]1804
[eb5a42f6]1805        /* Can not close active DMA channel. User must stop DMA and make sure
1806         * no threads are active/blocked within driver.
1807         */
1808        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
[0d31dcc]1809                rtems_semaphore_release(dma->sem_txdma);
1810                rtems_semaphore_release(dma->sem_rxdma);
[eb5a42f6]1811                return 1;
1812        }
[0f49c0e]1813
1814        /* Free resources */
1815        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1816        rtems_semaphore_delete(dma->tx_wait.sem_wait);
[57e1f4c3]1817        /* Release and delete lock. Operations requiring lock will fail */
[0d31dcc]1818        rtems_semaphore_delete(dma->sem_txdma);
1819        rtems_semaphore_delete(dma->sem_rxdma);
1820        dma->sem_txdma = RTEMS_ID_NONE;
1821        dma->sem_rxdma = RTEMS_ID_NONE;
[0f49c0e]1822
1823        /* Free memory */
1824        if (dma->rx_ring_base)
1825                free(dma->rx_ring_base);
1826        dma->rx_ring_base = NULL;
1827        dma->tx_ring_base = NULL;
1828
1829        dma->open = 0;
[eb5a42f6]1830        return 0;
[0f49c0e]1831}
1832
[72ec13ef]1833unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
1834{
1835        struct grspw_dma_priv *dma = c;
1836        int rc = 0;
1837        unsigned int ctrl, ctrl_old;
1838        IRQFLAGS_TYPE irqflags;
1839
1840        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1841        if (dma->started == 0) {
1842                rc = 1; /* DMA stopped */
1843                goto out;
1844        }
1845        ctrl = REG_READ(&dma->regs->ctrl);
1846        ctrl_old = ctrl;
1847
1848        /* Read/Write DMA error ? */
1849        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
1850                rc = 2; /* DMA error */
1851                goto out;
1852        }
1853
1854        /* DMA has finished a TX/RX packet and user wants work-task to
1855         * take care of DMA table processing.
1856         */
1857        ctrl &= ~GRSPW_DMACTRL_AT;
1858
1859        if ((rxtx & 1) == 0)
1860                ctrl &= ~GRSPW_DMACTRL_PR;
1861        else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
1862                 (dma->cfg.flags & DMAFLAG2_RXIE)))
1863                ctrl |= GRSPW_DMACTRL_RI;
1864
1865        if ((rxtx & 2) == 0)
1866                ctrl &= ~GRSPW_DMACTRL_PS;
1867        else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
1868                 (dma->cfg.flags & DMAFLAG2_TXIE)))
1869                ctrl |= GRSPW_DMACTRL_TI;
1870
1871        REG_WRITE(&dma->regs->ctrl, ctrl);
1872        /* Re-enabled interrupts previously enabled */
1873        rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
1874out:
1875        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1876        return rc;
1877}
1878
[0f49c0e]1879/* Schedule List of packets for transmission at some point in
1880 * future.
1881 *
1882 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1883 * 2. Add the requested packets to the SEND List (USER->SEND)
1884 * 3. Schedule as many packets as possible (SEND->SCHED)
1885 */
1886int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1887{
1888        struct grspw_dma_priv *dma = c;
1889        int ret;
1890
1891        /* Take DMA channel lock */
[0d31dcc]1892        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1893            != RTEMS_SUCCESSFUL)
1894                return -1;
1895
1896        if (dma->started == 0) {
1897                ret = 1; /* signal DMA has been stopped */
1898                goto out;
1899        }
1900        ret = 0;
1901
1902        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1903        if ((opts & 1) == 0)
1904                grspw_tx_process_scheduled(dma);
1905
1906        /* 2. Add the requested packets to the SEND List (USER->SEND) */
[ef94150f]1907        if (pkts && (count > 0)) {
[0f49c0e]1908                grspw_list_append_list(&dma->send, pkts);
1909                dma->send_cnt += count;
1910                if (dma->stats.send_cnt_max < dma->send_cnt)
1911                        dma->stats.send_cnt_max = dma->send_cnt;
1912        }
1913
1914        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1915        if ((opts & 2) == 0)
1916                grspw_tx_schedule_send(dma);
1917
1918out:
1919        /* Unlock DMA channel */
[0d31dcc]1920        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1921
1922        return ret;
1923}
1924
1925int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1926{
1927        struct grspw_dma_priv *dma = c;
1928        struct grspw_pkt *pkt, *lastpkt;
1929        int cnt, started;
1930
1931        /* Take DMA channel lock */
[0d31dcc]1932        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1933            != RTEMS_SUCCESSFUL)
1934                return -1;
1935
1936        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1937        started = dma->started;
1938        if ((started > 0) && ((opts & 1) == 0))
1939                grspw_tx_process_scheduled(dma);
1940
1941        /* Move all/count SENT packet to the callers list (SENT->USER) */
1942        if (pkts) {
1943                if ((count == NULL) || (*count == -1) ||
1944                    (*count >= dma->sent_cnt)) {
1945                        /* Move all SENT Packets */
1946                        *pkts = dma->sent;
1947                        grspw_list_clr(&dma->sent);
1948                        if (count)
1949                                *count = dma->sent_cnt;
1950                        dma->sent_cnt = 0;
1951                } else {
1952                        /* Move a number of SENT Packets */
1953                        pkts->head = pkt = lastpkt = dma->sent.head;
1954                        cnt = 0;
1955                        while (cnt < *count) {
1956                                lastpkt = pkt;
1957                                pkt = pkt->next;
1958                                cnt++;
1959                        }
1960                        if (cnt > 0) {
1961                                pkts->tail = lastpkt;
1962                                grspw_list_remove_head_list(&dma->sent, pkts);
1963                                dma->sent_cnt -= cnt;
1964                        } else {
1965                                grspw_list_clr(pkts);
1966                        }
1967                }
1968        } else if (count) {
1969                *count = 0;
1970        }
1971
1972        /* 3. Schedule as many packets as possible (SEND->SCHED) */
[c442647f]1973        if ((started > 0) && ((opts & 2) == 0))
[0f49c0e]1974                grspw_tx_schedule_send(dma);
1975
1976        /* Unlock DMA channel */
[0d31dcc]1977        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1978
1979        return (~started) & 1; /* signal DMA has been stopped */
1980}
1981
[1ef9caa2]1982void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
[0f49c0e]1983{
1984        struct grspw_dma_priv *dma = c;
[1ef9caa2]1985        int sched_cnt, diff;
1986        unsigned int hwbd;
1987        struct grspw_txbd *tailbd;
1988
1989        /* Take device lock - Wait until we get semaphore.
1990         * The lock is taken so that the counters are in sync with each other
1991         * and that DMA descriptor table and tx_ring_tail is not being updated
1992         * during HW counter processing in this function.
1993         */
[0d31dcc]1994        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]1995            != RTEMS_SUCCESSFUL)
1996                return;
[0f49c0e]1997
1998        if (send)
1999                *send = dma->send_cnt;
[1ef9caa2]2000        sched_cnt = dma->tx_sched_cnt;
[0f49c0e]2001        if (sched)
[1ef9caa2]2002                *sched = sched_cnt;
[0f49c0e]2003        if (sent)
2004                *sent = dma->sent_cnt;
[1ef9caa2]2005        if (hw) {
2006                /* Calculate number of descriptors (processed by HW) between
2007                 * HW pointer and oldest SW pointer.
2008                 */
2009                hwbd = REG_READ(&dma->regs->txdesc);
2010                tailbd = dma->tx_ring_tail->bd;
2011                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
2012                        (GRSPW_TXBD_NR - 1);
2013                /* Handle special case when HW and SW pointers are equal
2014                 * because all TX descriptors have been processed by HW.
2015                 */
2016                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
2017                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
2018                        diff = GRSPW_TXBD_NR;
2019                }
2020                *hw = diff;
2021        }
2022
2023        /* Unlock DMA channel */
[0d31dcc]2024        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2025}
2026
2027static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
2028{
2029        int send_val, sent_val;
2030
2031        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
2032                send_val = 1;
2033        else
2034                send_val = 0;
2035
2036        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
2037                sent_val = 1;
2038        else
2039                sent_val = 0;
2040
2041        /* AND or OR ? */
2042        if (dma->tx_wait.op == 0)
2043                return send_val & sent_val; /* AND */
2044        else
2045                return send_val | sent_val; /* OR */
2046}
2047
2048/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
2049 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
2050 * is met.
2051 * If a link error occurs and the Stop on Link error is defined, this function
2052 * will also return to caller.
2053 */
2054int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
2055{
2056        struct grspw_dma_priv *dma = c;
[9cb7e5d]2057        int ret, rc, initialized = 0;
[0f49c0e]2058
2059        if (timeout == 0)
2060                timeout = RTEMS_NO_TIMEOUT;
2061
2062check_condition:
2063
2064        /* Take DMA channel lock */
[0d31dcc]2065        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2066            != RTEMS_SUCCESSFUL)
2067                return -1;
2068
2069        /* Check so that no other thread is waiting, this driver only supports
2070         * one waiter at a time.
2071         */
[9cb7e5d]2072        if (initialized == 0 && dma->tx_wait.waiting) {
2073                ret = 3;
2074                goto out_release;
[0f49c0e]2075        }
2076
[9cb7e5d]2077        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2078        if (dma->started == 0) {
2079                ret = 1;
[9cb7e5d]2080                goto out_release;
[0f49c0e]2081        }
2082
2083        /* Set up Condition */
2084        dma->tx_wait.send_cnt = send_cnt;
2085        dma->tx_wait.op = op;
2086        dma->tx_wait.sent_cnt = sent_cnt;
2087
2088        if (grspw_tx_wait_eval(dma) == 0) {
2089                /* Prepare Wait */
[9cb7e5d]2090                initialized = 1;
[0f49c0e]2091                dma->tx_wait.waiting = 1;
2092
2093                /* Release DMA channel lock */
[0d31dcc]2094                rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2095
2096                /* Try to take Wait lock, if this fail link may have gone down
2097                 * or user stopped this DMA channel
2098                 */
2099                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2100                                                timeout);
2101                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2102                        ret = 2;
2103                        goto out;
[0f49c0e]2104                } else if (rc == RTEMS_UNSATISFIED ||
2105                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2106                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2107                        goto out;
2108                } else if (rc != RTEMS_SUCCESSFUL) {
2109                        /* Unknown Error */
2110                        ret = -1;
2111                        goto out;
2112                } else if (dma->started == 0) {
2113                        ret = 1;
2114                        goto out;
2115                }
[0f49c0e]2116
2117                /* Check condition once more */
2118                goto check_condition;
2119        }
2120
2121        ret = 0;
[9cb7e5d]2122
2123out_release:
[0f49c0e]2124        /* Unlock DMA channel */
[0d31dcc]2125        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2126
[9cb7e5d]2127out:
2128        if (initialized)
2129                dma->tx_wait.waiting = 0;
[0f49c0e]2130        return ret;
2131}
2132
2133int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2134{
2135        struct grspw_dma_priv *dma = c;
2136        struct grspw_pkt *pkt, *lastpkt;
2137        int cnt, started;
2138
2139        /* Take DMA channel lock */
[0d31dcc]2140        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2141            != RTEMS_SUCCESSFUL)
2142                return -1;
2143
2144        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2145        started = dma->started;
2146        if (((opts & 1) == 0) && (started > 0))
2147                grspw_rx_process_scheduled(dma);
2148
2149        /* Move all RECV packet to the callers list */
2150        if (pkts) {
2151                if ((count == NULL) || (*count == -1) ||
2152                    (*count >= dma->recv_cnt)) {
2153                        /* Move all Received packets */
2154                        *pkts = dma->recv;
2155                        grspw_list_clr(&dma->recv);
2156                        if ( count )
2157                                *count = dma->recv_cnt;
2158                        dma->recv_cnt = 0;
2159                } else {
2160                        /* Move a number of RECV Packets */
2161                        pkts->head = pkt = lastpkt = dma->recv.head;
2162                        cnt = 0;
2163                        while (cnt < *count) {
2164                                lastpkt = pkt;
2165                                pkt = pkt->next;
2166                                cnt++;
2167                        }
2168                        if (cnt > 0) {
2169                                pkts->tail = lastpkt;
2170                                grspw_list_remove_head_list(&dma->recv, pkts);
2171                                dma->recv_cnt -= cnt;
2172                        } else {
2173                                grspw_list_clr(pkts);
2174                        }
2175                }
2176        } else if (count) {
2177                *count = 0;
2178        }
2179
2180        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2181        if (((opts & 2) == 0) && (started > 0))
2182                grspw_rx_schedule_ready(dma);
2183
2184        /* Unlock DMA channel */
[0d31dcc]2185        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2186
2187        return (~started) & 1;
2188}
2189
2190int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2191{
2192        struct grspw_dma_priv *dma = c;
2193        int ret;
2194
2195        /* Take DMA channel lock */
[0d31dcc]2196        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2197            != RTEMS_SUCCESSFUL)
2198                return -1;
2199
2200        if (dma->started == 0) {
2201                ret = 1;
2202                goto out;
2203        }
2204
2205        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2206        if ((opts & 1) == 0)
2207                grspw_rx_process_scheduled(dma);
2208
2209        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2210        if (pkts && (count > 0)) {
2211                grspw_list_append_list(&dma->ready, pkts);
2212                dma->ready_cnt += count;
2213                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2214                        dma->stats.ready_cnt_max = dma->ready_cnt;
2215        }
2216
2217        /* 3. Schedule as many packets as possible (READY->SCHED) */
2218        if ((opts & 2) == 0)
2219                grspw_rx_schedule_ready(dma);
2220
2221        ret = 0;
2222out:
2223        /* Unlock DMA channel */
[0d31dcc]2224        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2225
2226        return ret;
2227}
2228
[1ef9caa2]2229void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
[0f49c0e]2230{
2231        struct grspw_dma_priv *dma = c;
[1ef9caa2]2232        int sched_cnt, diff;
2233        unsigned int hwbd;
2234        struct grspw_rxbd *tailbd;
2235
2236        /* Take device lock - Wait until we get semaphore.
2237         * The lock is taken so that the counters are in sync with each other
2238         * and that DMA descriptor table and rx_ring_tail is not being updated
2239         * during HW counter processing in this function.
2240         */
[0d31dcc]2241        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]2242            != RTEMS_SUCCESSFUL)
2243                return;
[0f49c0e]2244
2245        if (ready)
2246                *ready = dma->ready_cnt;
[1ef9caa2]2247        sched_cnt = dma->rx_sched_cnt;
[0f49c0e]2248        if (sched)
[1ef9caa2]2249                *sched = sched_cnt;
[0f49c0e]2250        if (recv)
2251                *recv = dma->recv_cnt;
[1ef9caa2]2252        if (hw) {
2253                /* Calculate number of descriptors (processed by HW) between
2254                 * HW pointer and oldest SW pointer.
2255                 */
2256                hwbd = REG_READ(&dma->regs->rxdesc);
2257                tailbd = dma->rx_ring_tail->bd;
2258                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2259                        (GRSPW_RXBD_NR - 1);
2260                /* Handle special case when HW and SW pointers are equal
2261                 * because all RX descriptors have been processed by HW.
2262                 */
2263                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2264                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2265                        diff = GRSPW_RXBD_NR;
2266                }
2267                *hw = diff;
2268        }
2269
2270        /* Unlock DMA channel */
[0d31dcc]2271        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2272}
2273
2274static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2275{
2276        int ready_val, recv_val;
2277
2278        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2279                ready_val = 1;
2280        else
2281                ready_val = 0;
2282
2283        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2284                recv_val = 1;
2285        else
2286                recv_val = 0;
2287
2288        /* AND or OR ? */
2289        if (dma->rx_wait.op == 0)
2290                return ready_val & recv_val; /* AND */
2291        else
2292                return ready_val | recv_val; /* OR */
2293}
2294
2295/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2296 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2297 * condition is met.
2298 * If a link error occurs and the Stop on Link error is defined, this function
2299 * will also return to caller, however with an error.
2300 */
2301int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2302{
2303        struct grspw_dma_priv *dma = c;
[9cb7e5d]2304        int ret, rc, initialized = 0;
[0f49c0e]2305
2306        if (timeout == 0)
2307                timeout = RTEMS_NO_TIMEOUT;
2308
2309check_condition:
2310
2311        /* Take DMA channel lock */
[0d31dcc]2312        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2313            != RTEMS_SUCCESSFUL)
2314                return -1;
2315
2316        /* Check so that no other thread is waiting, this driver only supports
2317         * one waiter at a time.
2318         */
[9cb7e5d]2319        if (initialized == 0 && dma->rx_wait.waiting) {
2320                ret = 3;
2321                goto out_release;
[0f49c0e]2322        }
2323
[9cb7e5d]2324        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2325        if (dma->started == 0) {
2326                ret = 1;
[9cb7e5d]2327                goto out_release;
[0f49c0e]2328        }
2329
2330        /* Set up Condition */
2331        dma->rx_wait.recv_cnt = recv_cnt;
2332        dma->rx_wait.op = op;
2333        dma->rx_wait.ready_cnt = ready_cnt;
2334
2335        if (grspw_rx_wait_eval(dma) == 0) {
2336                /* Prepare Wait */
[9cb7e5d]2337                initialized = 1;
[0f49c0e]2338                dma->rx_wait.waiting = 1;
2339
2340                /* Release channel lock */
[0d31dcc]2341                rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2342
2343                /* Try to take Wait lock, if this fail link may have gone down
2344                 * or user stopped this DMA channel
2345                 */
2346                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2347                                           timeout);
2348                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2349                        ret = 2;
2350                        goto out;
[0f49c0e]2351                } else if (rc == RTEMS_UNSATISFIED ||
2352                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2353                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2354                        goto out;
2355                } else if (rc != RTEMS_SUCCESSFUL) {
2356                        /* Unknown Error */
2357                        ret = -1;
2358                        goto out;
2359                } else if (dma->started == 0) {
2360                        ret = 1;
2361                        goto out;
2362                }
[0f49c0e]2363
2364                /* Check condition once more */
2365                goto check_condition;
2366        }
[9cb7e5d]2367
[0f49c0e]2368        ret = 0;
2369
[9cb7e5d]2370out_release:
[0f49c0e]2371        /* Unlock DMA channel */
[0d31dcc]2372        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2373
[9cb7e5d]2374out:
2375        if (initialized)
2376                dma->rx_wait.waiting = 0;
[0f49c0e]2377        return ret;
2378}
2379
2380int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2381{
2382        struct grspw_dma_priv *dma = c;
2383
2384        if (dma->started || !cfg)
2385                return -1;
2386
[77856f6]2387        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
[0f49c0e]2388                return -1;
2389
2390        /* Update Configuration */
2391        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2392
2393        return 0;
2394}
2395
2396void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2397{
2398        struct grspw_dma_priv *dma = c;
2399
2400        /* Copy Current Configuration */
2401        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2402}
2403
2404void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2405{
2406        struct grspw_dma_priv *dma = c;
2407
2408        memcpy(sts, &dma->stats, sizeof(dma->stats));
2409}
2410
2411void grspw_dma_stats_clr(void *c)
2412{
2413        struct grspw_dma_priv *dma = c;
2414
2415        /* Clear most of the statistics */     
2416        memset(&dma->stats, 0, sizeof(dma->stats));
2417
2418        /* Init proper default values so that comparisons will work the
2419         * first time.
2420         */
2421        dma->stats.send_cnt_min = 0x3fffffff;
2422        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2423        dma->stats.ready_cnt_min = 0x3fffffff;
2424        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2425}
2426
2427int grspw_dma_start(void *c)
2428{
2429        struct grspw_dma_priv *dma = c;
2430        struct grspw_dma_regs *dregs = dma->regs;
2431        unsigned int ctrl;
[6ecad1d]2432        IRQFLAGS_TYPE irqflags;
[0f49c0e]2433
2434        if (dma->started)
2435                return 0;
2436
2437        /* Initialize Software Structures:
2438         *  - Clear all Queues
2439         *  - init BD ring
2440         *  - init IRQ counter
2441         *  - clear statistics counters
2442         *  - init wait structures and semaphores
2443         */
2444        grspw_dma_reset(dma);
2445
2446        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2447         * with SpaceWire Packet buffers. So we do not have to worry about
2448         * IRQs for this channel just yet. However other DMA channels
2449         * may be active.
2450         *
2451         * Some functionality that is not changed during started mode is set up
2452         * once and for all here:
2453         *
2454         *   - RX MAX Packet length
2455         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2456         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2457         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2458         *   - Strip PID
2459         *   - Strip Address
2460         *   - No Spill
2461         *   - Receiver Enable
2462         *   - disable on link error (LE)
2463         *
2464         * Note that the address register and the address enable bit in DMACTRL
2465         * register must be left untouched, they are configured on a GRSPW
2466         * core level.
2467         *
2468         * Note that the receiver is enabled here, but since descriptors are
2469         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2470         * descriptors are enabled or it may ignore RX packets (NS=0) until
2471         * descriptors are enabled (writing RD bit).
2472         */
2473        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2474        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2475
2476        /* MAX Packet length */
2477        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2478
2479        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2480                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2481                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
[49cf776e]2482        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
[0f49c0e]2483                ctrl |= GRSPW_DMACTRL_LE;
[77856f6]2484        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
[0f49c0e]2485                ctrl |= GRSPW_DMACTRL_RI;
[77856f6]2486        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
[0f49c0e]2487                ctrl |= GRSPW_DMACTRL_TI;
[6ecad1d]2488        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2489        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
[0f49c0e]2490        REG_WRITE(&dregs->ctrl, ctrl);
[6ecad1d]2491        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
[0f49c0e]2492
2493        dma->started = 1; /* open up other DMA interfaces */
2494
2495        return 0;
2496}
2497
2498STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2499{
2500        IRQFLAGS_TYPE irqflags;
2501
2502        if (dma->started == 0)
2503                return;
2504        dma->started = 0;
2505
2506        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2507        grspw_hw_dma_stop(dma);
2508        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2509
2510        /* From here no more packets will be sent, however
2511         * there may still exist scheduled packets that has been
2512         * sent, and packets in the SEND Queue waiting for free
2513         * descriptors. All packets are moved to the SENT Queue
2514         * so that the user may get its buffers back, the user
2515         * must look at the TXPKT_FLAG_TX in order to determine
2516         * if the packet was sent or not.
2517         */
2518
2519        /* Retreive scheduled all sent packets */
2520        grspw_tx_process_scheduled(dma);
2521
2522        /* Move un-sent packets in SEND and SCHED queue to the
2523         * SENT Queue. (never marked sent)
2524         */
2525        if (!grspw_list_is_empty(&dma->tx_sched)) {
2526                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2527                grspw_list_clr(&dma->tx_sched);
2528                dma->sent_cnt += dma->tx_sched_cnt;
2529                dma->tx_sched_cnt = 0;
2530        }
2531        if (!grspw_list_is_empty(&dma->send)) {
2532                grspw_list_append_list(&dma->sent, &dma->send);
2533                grspw_list_clr(&dma->send);
2534                dma->sent_cnt += dma->send_cnt;
2535                dma->send_cnt = 0;
2536        }
2537
2538        /* Similar for RX */
2539        grspw_rx_process_scheduled(dma);
2540        if (!grspw_list_is_empty(&dma->rx_sched)) {
2541                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2542                grspw_list_clr(&dma->rx_sched);
2543                dma->recv_cnt += dma->rx_sched_cnt;
2544                dma->rx_sched_cnt = 0;
2545        }
2546        if (!grspw_list_is_empty(&dma->ready)) {
2547                grspw_list_append_list(&dma->recv, &dma->ready);
2548                grspw_list_clr(&dma->ready);
2549                dma->recv_cnt += dma->ready_cnt;
2550                dma->ready_cnt = 0;
2551        }
2552
2553        /* Throw out blocked threads */
2554        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2555        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2556}
2557
2558void grspw_dma_stop(void *c)
2559{
2560        struct grspw_dma_priv *dma = c;
2561
[eb5a42f6]2562        /* If DMA channel is closed we should not access the semaphore */
2563        if (!dma->open)
2564                return;
2565
[0f49c0e]2566        /* Take DMA Channel lock */
[0d31dcc]2567        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2568            != RTEMS_SUCCESSFUL)
2569                return;
[0d31dcc]2570        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2571            != RTEMS_SUCCESSFUL) {
2572                rtems_semaphore_release(dma->sem_rxdma);
2573                return;
2574        }
[0f49c0e]2575
2576        grspw_dma_stop_locked(dma);
2577
[0d31dcc]2578        rtems_semaphore_release(dma->sem_txdma);
2579        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2580}
2581
2582/* Do general work, invoked indirectly from ISR */
2583static void grspw_work_shutdown_func(struct grspw_priv *priv)
2584{
2585        int i;
2586
2587        /* Link is down for some reason, and the user has configured
[9cb7e5d]2588         * that we stop all (open) DMA channels and throw out all their
2589         * blocked threads.
[0f49c0e]2590         */
2591        for (i=0; i<priv->hwsup.ndma_chans; i++)
2592                grspw_dma_stop(&priv->dma[i]);
2593        grspw_hw_stop(priv);
2594}
2595
2596/* Do DMA work on one channel, invoked indirectly from ISR */
[ab9b447]2597static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
[0f49c0e]2598{
[72ec13ef]2599        int tx_cond_true, rx_cond_true, rxtx;
[0f49c0e]2600
[eb5a42f6]2601        /* If DMA channel is closed we should not access the semaphore */
2602        if (dma->open == 0)
2603                return;
2604
[0f49c0e]2605        dma->stats.irq_cnt++;
2606
2607        /* Look at cause we were woken up and clear source */
[72ec13ef]2608        rxtx = 0;
2609        if (msg & WORK_DMA_RX_MASK)
2610                rxtx |= 1;
2611        if (msg & WORK_DMA_TX_MASK)
2612                rxtx |= 2;
2613        switch (grspw_dma_enable_int(dma, rxtx, 0)) {
2614        case 1:
2615                /* DMA stopped */
[0d31dcc]2616                return;
[72ec13ef]2617        case 2:
[0f49c0e]2618                /* DMA error -> Stop DMA channel (both RX and TX) */
[ab9b447]2619                if (msg & WORK_DMA_ER_MASK) {
2620                        /* DMA error and user wants work-task to handle error */
2621                        grspw_dma_stop(dma);
2622                        grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
2623                }
[72ec13ef]2624                return;
2625        default:
2626                break;
2627        }
2628
2629        rx_cond_true = 0;
2630        tx_cond_true = 0;
2631
2632        if (msg & WORK_DMA_RX_MASK) {
2633                /* Do RX Work */
2634
2635                /* Take DMA channel RX lock */
2636                if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2637                    != RTEMS_SUCCESSFUL)
2638                        return;
2639
2640                dma->stats.rx_work_cnt++;
2641                grspw_rx_process_scheduled(dma);
2642                if (dma->started) {
2643                        dma->stats.rx_work_enabled +=
2644                                grspw_rx_schedule_ready(dma);
2645                        /* Check to see if condition for waking blocked
2646                         * USER task is fullfilled.
2647                         */
2648                        if (dma->rx_wait.waiting)
2649                                rx_cond_true = grspw_rx_wait_eval(dma);
[0f49c0e]2650                }
[72ec13ef]2651                rtems_semaphore_release(dma->sem_rxdma);
2652        }
2653
2654        if (msg & WORK_DMA_TX_MASK) {
2655                /* Do TX Work */
2656
2657                /* Take DMA channel TX lock */
2658                if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2659                    != RTEMS_SUCCESSFUL)
2660                        return;
2661
2662                dma->stats.tx_work_cnt++;
2663                grspw_tx_process_scheduled(dma);
2664                if (dma->started) {
2665                        dma->stats.tx_work_enabled +=
2666                                grspw_tx_schedule_send(dma);
2667                        /* Check to see if condition for waking blocked
2668                         * USER task is fullfilled.
2669                         */
2670                        if (dma->tx_wait.waiting)
2671                                tx_cond_true = grspw_tx_wait_eval(dma);
[0f49c0e]2672                }
[72ec13ef]2673                rtems_semaphore_release(dma->sem_txdma);
2674        }
[0f49c0e]2675
2676        if (rx_cond_true)
2677                rtems_semaphore_release(dma->rx_wait.sem_wait);
2678
2679        if (tx_cond_true)
2680                rtems_semaphore_release(dma->tx_wait.sem_wait);
2681}
2682
2683/* Work task is receiving work for the work message queue posted from
2684 * the ISR.
2685 */
[ab9b447]2686void grspw_work_func(rtems_id msgQ)
[0f49c0e]2687{
[ab9b447]2688        unsigned int message = 0, msg;
[0f49c0e]2689        size_t size;
2690        struct grspw_priv *priv;
2691        int i;
2692
[ab9b447]2693        /* Wait for ISR to schedule work */
2694        while (rtems_message_queue_receive(msgQ, &message, &size,
2695               RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
2696                if (message & WORK_QUIT_TASK)
[0f49c0e]2697                        break;
2698
2699                /* Handle work */
2700                priv = priv_tab[message >> WORK_CORE_BIT];
[ab9b447]2701                if (message & WORK_SHUTDOWN) {
[0f49c0e]2702                        grspw_work_shutdown_func(priv);
[ab9b447]2703                               
2704                        grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
2705                } else if (message & WORK_DMA_MASK) {
2706                        for (i = 0; i < priv->hwsup.ndma_chans; i++) {
2707                                msg = message &
2708                                      (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
2709                                if (msg)
2710                                        grspw_work_dma_func(&priv->dma[i], msg);
[0f49c0e]2711                        }
2712                }
[ab9b447]2713                message = 0;
[0f49c0e]2714        }
[ab9b447]2715
2716        if (message & WORK_FREE_MSGQ)
2717                rtems_message_queue_delete(msgQ);
2718
2719        grspw_work_event(WORKTASK_EV_QUIT, message);
[0f49c0e]2720        rtems_task_delete(RTEMS_SELF);
2721}
2722
2723STATIC void grspw_isr(void *data)
2724{
2725        struct grspw_priv *priv = data;
[ab9b447]2726        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
[56fc7809]2727        unsigned int rxirq, rxack, intto;
[ab9b447]2728        int i, handled = 0, call_user_int_isr;
2729        unsigned int message = WORK_NONE;
[0f49c0e]2730#ifdef RTEMS_HAS_SMP
2731        IRQFLAGS_TYPE irqflags;
2732#endif
2733
2734        /* Get Status from Hardware */
2735        stat = REG_READ(&priv->regs->status);
[a7cc0da9]2736        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2737                        (GRSPW_STS_TO | priv->stscfg);
[0f49c0e]2738
2739        /* Make sure to put the timecode handling first in order to get the
2740         * smallest possible interrupt latency
2741         */
2742        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
[56fc7809]2743                ctrl = REG_READ(&priv->regs->ctrl);
2744                if (ctrl & GRSPW_CTRL_TQ) {
2745                        /* Timecode received. Let custom function handle this */
2746                        timecode = REG_READ(&priv->regs->time) &
2747                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2748                        (priv->tcisr)(priv->tcisr_arg, timecode);
2749                }
2750        }
2751
2752        /* Get Interrupt status from hardware */
2753        icctrl = REG_READ(&priv->regs->icctrl);
2754        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2755                call_user_int_isr = 0;
2756                rxirq = rxack = intto = 0;
2757
2758                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2759                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2760                        call_user_int_isr = 1;
2761
2762                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2763                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2764                        call_user_int_isr = 1;
2765
2766                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2767                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2768                        call_user_int_isr = 1;                 
2769
2770                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2771                 * user function is called even if no such IRQ has happened!
2772                 * User must make sure to clear all interrupts that have been
2773                 * handled from the three registers by writing a one.
2774                 */
2775                if (call_user_int_isr)
2776                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
[0f49c0e]2777        }
2778
2779        /* An Error occured? */
2780        if (stat & GRSPW_STAT_ERROR) {
2781                /* Wake Global WorkQ */
2782                handled = 1;
2783
2784                if (stat & GRSPW_STS_EE)
2785                        priv->stats.err_eeop++;
2786
2787                if (stat & GRSPW_STS_IA)
2788                        priv->stats.err_addr++;
2789
2790                if (stat & GRSPW_STS_PE)
2791                        priv->stats.err_parity++;
2792
[ac7da5bc]2793                if (stat & GRSPW_STS_DE)
2794                        priv->stats.err_disconnect++;
2795
[0f49c0e]2796                if (stat & GRSPW_STS_ER)
2797                        priv->stats.err_escape++;
2798
2799                if (stat & GRSPW_STS_CE)
2800                        priv->stats.err_credit++;
2801
2802                if (stat & GRSPW_STS_WE)
2803                        priv->stats.err_wsync++;
2804
[49cf776e]2805                if ((priv->dis_link_on_err >> 16) & stat) {
[0f49c0e]2806                        /* Disable the link, no more transfers are expected
2807                         * on any DMA channel.
2808                         */
2809                        SPIN_LOCK(&priv->devlock, irqflags);
2810                        ctrl = REG_READ(&priv->regs->ctrl);
2811                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2812                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2813                        SPIN_UNLOCK(&priv->devlock, irqflags);
2814                        /* Signal to work-thread to stop DMA and clean up */
2815                        message = WORK_SHUTDOWN;
2816                }
2817        }
2818
2819        /* Clear Status Flags */
2820        if (stat_clrmsk) {
2821                handled = 1;
2822                REG_WRITE(&priv->regs->status, stat_clrmsk);
2823        }
2824
2825        /* A DMA transfer or Error occured? In that case disable more IRQs
2826         * from the DMA channel, then invoke the workQ.
2827         *
2828         * Also the GI interrupt flag may not be available for older
2829         * designs where (was added together with mutiple DMA channels).
2830         */
2831        SPIN_LOCK(&priv->devlock, irqflags);
2832        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2833                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2834                /* Check for Errors and if Packets been sent or received if
2835                 * respective IRQ are enabled
2836                 */
[ab9b447]2837                irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2838                        | GRSPW_DMA_STATUS_ERROR) & dma_stat;
2839                if (!irqs)
2840                        continue;
2841
2842                /* Disable Further IRQs (until enabled again)
2843                 * from this DMA channel. Let the status
2844                 * bit remain so that they can be handled by
2845                 * work function.
2846                 */
2847                REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2848                        ~(GRSPW_DMACTRL_RI|GRSPW_DMACTRL_TI|
2849                        GRSPW_DMACTRL_PR|GRSPW_DMACTRL_PS|
2850                        GRSPW_DMACTRL_RA|GRSPW_DMACTRL_TA|
2851                        GRSPW_DMACTRL_AT));
2852                handled = 1;
2853
2854                /* DMA error has priority, if error happens it is assumed that
2855                 * the common work-queue stops the DMA operation for that
2856                 * channel and makes the DMA tasks exit from their waiting
2857                 * functions (both RX and TX tasks).
2858                 */
2859                if (irqs & GRSPW_DMA_STATUS_ERROR) {
2860                        message |= WORK_DMA_ER(i);
2861                } else {
2862                        message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
[0f49c0e]2863                }
2864        }
2865        SPIN_UNLOCK(&priv->devlock, irqflags);
2866
2867        if (handled != 0)
2868                priv->stats.irq_cnt++;
2869
2870        /* Schedule work by sending message to work thread */
[ab9b447]2871        if (message != WORK_NONE && priv->wc.msgisr) {
2872                int status;
[0f49c0e]2873                message |= WORK_CORE(priv->index);
[ab9b447]2874                /* func interface compatible with msgQSend() on purpose, but
2875                 * at the same time the user can assign a custom function to
2876                 * handle DMA RX/TX operations as indicated by the "message"
2877                 * and clear the handled bits before given to msgQSend().
2878                 */
2879                status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
2880                if (status != RTEMS_SUCCESSFUL) {
[0f49c0e]2881                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
[ab9b447]2882                                priv->index, status, message);
2883                }
[0f49c0e]2884        }
2885}
2886
2887STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2888{
2889        unsigned int ctrl;
2890        struct grspw_dma_regs *dregs = dma->regs;
2891
2892        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2893               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2894        ctrl |= GRSPW_DMACTRL_AT;
2895        REG_WRITE(&dregs->ctrl, ctrl);
2896}
2897
2898STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2899{
2900        unsigned int ctrl;
2901        struct grspw_dma_regs *dregs = dma->regs;
2902
2903        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2904        REG_WRITE(&dregs->ctrl, ctrl);
2905
2906        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2907        REG_WRITE(&dregs->txdesc, 0);
2908        REG_WRITE(&dregs->rxdesc, 0);
2909}
2910
2911/* Hardware Action:
2912 *  - stop DMA
2913 *  - do not bring down the link (RMAP may be active)
2914 *  - RMAP settings untouched (RMAP may be active)
2915 *  - port select untouched (RMAP may be active)
2916 *  - timecodes are disabled
2917 *  - IRQ generation disabled
2918 *  - status not cleared (let user analyze it if requested later on)
2919 *  - Node address / First DMA channels Node address
2920 *    is untouched (RMAP may be active)
2921 */
2922STATIC void grspw_hw_stop(struct grspw_priv *priv)
2923{
2924        int i;
2925        unsigned int ctrl;
2926        IRQFLAGS_TYPE irqflags;
2927
2928        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2929
2930        for (i=0; i<priv->hwsup.ndma_chans; i++)
2931                grspw_hw_dma_stop(&priv->dma[i]);
2932
2933        ctrl = REG_READ(&priv->regs->ctrl);
2934        REG_WRITE(&priv->regs->ctrl, ctrl & (
2935                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2936                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2937                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2938
2939        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2940}
2941
2942/* Soft reset of GRSPW core registers */
2943STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2944{
2945        int i;
[56fc7809]2946        unsigned int tmp;
[0f49c0e]2947
2948        for (i=0; i<priv->hwsup.ndma_chans; i++)
2949                grspw_hw_dma_softreset(&priv->dma[i]);
2950
2951        REG_WRITE(&priv->regs->status, 0xffffffff);
2952        REG_WRITE(&priv->regs->time, 0);
[56fc7809]2953        /* Clear all but valuable reset values of ICCTRL */
2954        tmp = REG_READ(&priv->regs->icctrl);
2955        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2956        tmp |= GRSPW_ICCTRL_ID;
2957        REG_WRITE(&priv->regs->icctrl, tmp);
2958        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2959        REG_WRITE(&priv->regs->icack, 0xffffffff);
2960        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
[0f49c0e]2961}
2962
2963int grspw_dev_count(void)
2964{
2965        return grspw_count;
2966}
2967
2968void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2969{
2970        int i;
2971        struct grspw_priv *priv;
2972
2973        /* Set new Device Found Handler */
2974        grspw_dev_add = devfound;
2975        grspw_dev_del = devremove;
2976
2977        if (grspw_initialized == 1 && grspw_dev_add) {
2978                /* Call callback for every previously found device */
2979                for (i=0; i<grspw_count; i++) {
2980                        priv = priv_tab[i];
2981                        if (priv)
2982                                priv->data = grspw_dev_add(i);
2983                }
2984        }
2985}
2986
2987/******************* Driver manager interface ***********************/
2988
2989/* Driver prototypes */
2990static int grspw_common_init(void);
2991static int grspw2_init3(struct drvmgr_dev *dev);
2992
2993static struct drvmgr_drv_ops grspw2_ops =
2994{
2995        .init = {NULL,  NULL, grspw2_init3, NULL},
2996        .remove = NULL,
2997        .info = NULL
2998};
2999
3000static struct amba_dev_id grspw2_ids[] =
3001{
3002        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
3003        {VENDOR_GAISLER, GAISLER_SPW2},
3004        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
3005        {0, 0}          /* Mark end of table */
3006};
3007
3008static struct amba_drv_info grspw2_drv_info =
3009{
3010        {
3011                DRVMGR_OBJ_DRV,                 /* Driver */
3012                NULL,                           /* Next driver */
3013                NULL,                           /* Device list */
3014                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
3015                "GRSPW_PKT_DRV",                /* Driver Name */
3016                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
3017                &grspw2_ops,
3018                NULL,                           /* Funcs */
3019                0,                              /* No devices yet */
3020                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
3021        },
3022        &grspw2_ids[0]
3023};
3024
3025void grspw2_register_drv (void)
3026{
3027        GRSPW_DBG("Registering GRSPW2 packet driver\n");
3028        drvmgr_drv_register(&grspw2_drv_info.general);
3029}
3030
3031static int grspw2_init3(struct drvmgr_dev *dev)
3032{
3033        struct grspw_priv *priv;
3034        struct amba_dev_info *ambadev;
3035        struct ambapp_core *pnpinfo;
3036        int i, size;
[56fc7809]3037        unsigned int ctrl, icctrl, numi;
[0f49c0e]3038        union drvmgr_key_value *value;
3039
3040        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
3041                dev->parent->dev->name);
3042
3043        if (grspw_count > GRSPW_MAX)
3044                return DRVMGR_ENORES;
3045
3046        priv = dev->priv;
3047        if (priv == NULL)
3048                return DRVMGR_NOMEM;
3049        priv->dev = dev;
3050
3051        /* If first device init common part of driver */
3052        if (grspw_common_init())
3053                return DRVMGR_FAIL;
3054
3055        /*** Now we take care of device initialization ***/
3056
3057        /* Get device information from AMBA PnP information */
3058        ambadev = (struct amba_dev_info *)dev->businfo;
3059        if (ambadev == NULL)
3060                return -1;
3061        pnpinfo = &ambadev->info;
3062        priv->irq = pnpinfo->irq;
3063        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
3064
3065        /* Read Hardware Support from Control Register */
3066        ctrl = REG_READ(&priv->regs->ctrl);
3067        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
3068        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
3069        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
3070        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
3071        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
[56fc7809]3072        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
3073        icctrl = REG_READ(&priv->regs->icctrl);
3074        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
3075        if (numi > 0)
3076                priv->hwsup.irq_num = 1 << (numi - 1);
3077        else
3078                priv->hwsup.irq_num = 0;
[0f49c0e]3079
3080        /* Construct hardware version identification */
3081        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
3082
3083        if ((pnpinfo->device == GAISLER_SPW2) ||
3084            (pnpinfo->device == GAISLER_SPW2_DMA)) {
3085                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3086                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3087        } else {
3088                /* Autodetect GRSPW1 features? */
3089                priv->hwsup.strip_adr = 0;
3090                priv->hwsup.strip_pid = 0;
3091        }
3092
[56fc7809]3093        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3094         * width... so only the first is probed, if no timer result will be
3095         * zero.
3096         */
3097        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3098        ctrl = REG_READ(&priv->regs->icrlpresc);
3099        REG_WRITE(&priv->regs->icrlpresc, 0);
3100        priv->hwsup.itmr_width = 0;
3101        while (ctrl & 1) {
3102                priv->hwsup.itmr_width++;
3103                ctrl = ctrl >> 1;
3104        }
3105
[0f49c0e]3106        /* Let user limit the number of DMA channels on this core to save
3107         * space. Only the first nDMA channels will be available.
3108         */
[4d3e70f4]3109        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
[0f49c0e]3110        if (value && (value->i < priv->hwsup.ndma_chans))
3111                priv->hwsup.ndma_chans = value->i;
3112
3113        /* Allocate and init Memory for all DMA channels */
3114        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
3115        priv->dma = (struct grspw_dma_priv *) malloc(size);
3116        if (priv->dma == NULL)
3117                return DRVMGR_NOMEM;
3118        memset(priv->dma, 0, size);
3119        for (i=0; i<priv->hwsup.ndma_chans; i++) {
3120                priv->dma[i].core = priv;
3121                priv->dma[i].index = i;
3122                priv->dma[i].regs = &priv->regs->dma[i];
3123        }
3124
3125        /* Startup Action:
3126         *  - stop DMA
3127         *  - do not bring down the link (RMAP may be active)
3128         *  - RMAP settings untouched (RMAP may be active)
3129         *  - port select untouched (RMAP may be active)
3130         *  - timecodes are diabled
3131         *  - IRQ generation disabled
3132         *  - status cleared
3133         *  - Node address / First DMA channels Node address
3134         *    is untouched (RMAP may be active)
3135         */
3136        grspw_hw_stop(priv);
3137        grspw_hw_softreset(priv);
3138
3139        /* Register character device in registered region */
3140        priv->index = grspw_count;
3141        priv_tab[priv->index] = priv;
3142        grspw_count++;
3143
3144        /* Device name */
3145        sprintf(priv->devname, "grspw%d", priv->index);
3146
3147        /* Tell above layer about new device */
3148        if (grspw_dev_add)
3149                priv->data = grspw_dev_add(priv->index);
3150
3151        return DRVMGR_OK;
3152}
3153
3154/******************* Driver Implementation ***********************/
[ab9b447]3155/* Creates a MsgQ (optional) and spawns a worker task associated with the
3156 * message Q. The task can also be associated with a custom msgQ if *msgQ.
3157 * is non-zero.
3158 */
3159rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
3160{
3161        rtems_id tid;
3162        int created_msgq = 0;
[8acfa94]3163        static char work_name = 'A';
[ab9b447]3164
3165        if (pMsgQ == NULL)
3166                return OBJECTS_ID_NONE;
3167
3168        if (*pMsgQ == OBJECTS_ID_NONE) {
3169                if (msgMax <= 0)
3170                        msgMax = 32;
3171
3172                if (rtems_message_queue_create(
[8acfa94]3173                        rtems_build_name('S', 'G', 'Q', work_name),
[ab9b447]3174                        msgMax, 4, RTEMS_FIFO, pMsgQ) !=
3175                        RTEMS_SUCCESSFUL)
3176                        return OBJECTS_ID_NONE;
3177                created_msgq = 1;
3178        }
3179
3180        if (prio < 0)
3181                prio = grspw_work_task_priority; /* default prio */
3182        if (stack < 0x800)
3183                stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
3184
[8acfa94]3185        if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
[ab9b447]3186                prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
3187                RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
3188                tid = OBJECTS_ID_NONE;
3189        else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
3190                    RTEMS_SUCCESSFUL) {
3191                rtems_task_delete(tid);
3192                tid = OBJECTS_ID_NONE;
3193        }
3194
3195        if (tid == OBJECTS_ID_NONE && created_msgq) {
3196                rtems_message_queue_delete(*pMsgQ);
3197                *pMsgQ = OBJECTS_ID_NONE;
[8acfa94]3198        } else {
3199                if (++work_name > 'Z')
3200                        work_name = 'A';
[ab9b447]3201        }
3202        return tid;
3203}
3204
3205/* Free task associated with message queue and optionally also the message
3206 * queue itself. The message queue is deleted by the work task and is therefore
3207 * delayed until it the work task resumes its execution.
3208 */
3209rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
3210{
3211        int msg = WORK_QUIT_TASK;
3212        if (freeMsgQ)
3213                msg |= WORK_FREE_MSGQ;
3214        return rtems_message_queue_send(msgQ, &msg, 4);
3215}
3216
3217void grspw_work_cfg(void *d, struct grspw_work_config *wc)
3218{
3219        struct grspw_priv *priv = (struct grspw_priv *)d;
3220
3221        if (wc == NULL)
3222                wc = &grspw_wc_def; /* use default config */
3223        priv->wc = *wc;
3224}
[0f49c0e]3225
3226static int grspw_common_init(void)
3227{
3228        if (grspw_initialized == 1)
3229                return 0;
3230        if (grspw_initialized == -1)
3231                return -1;
3232        grspw_initialized = -1;
3233
3234        /* Device Semaphore created with count = 1 */
3235        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3236            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3237            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3238            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3239                return -1;
3240
3241        /* Work queue, Work thread. Not created if user disables it.
3242         * user can disable it when interrupt is not used to save resources
3243         */
3244        if (grspw_work_task_priority != -1) {
[ab9b447]3245                grspw_work_task = grspw_work_spawn(-1, 0,
3246                        (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
3247                if (grspw_work_task == OBJECTS_ID_NONE)
3248                        return -2;
3249                grspw_wc_def.msgisr =
3250                        (grspw_msgqisr_t) rtems_message_queue_send;
3251        } else {
3252                grspw_wc_def.msgisr = NULL;
3253                grspw_wc_def.msgisr_arg = NULL;
3254        }
[0f49c0e]3255
3256        grspw_initialized = 1;
3257        return 0;
3258}
Note: See TracBrowser for help on using the repository browser.