source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ 090016a

5
Last change on this file since 090016a was 090016a, checked in by Daniel Hellstrom <daniel@…>, on 01/22/17 at 10:34:25

leon, grspw_pkt: ISR activate shutdown work only if errintr enabled

In shared interrupt systems it might be a problem to handle the interrupt
regardless of the interrupt is enabled. Now the same approach to the
DMA RX/TX interrupt in the ISR is taken.

  • Property mode set to 100644
File size: 90.6 KB
RevLine 
[0f49c0e]1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
[4a7d1026]12 * http://www.rtems.org/license/LICENSE.
[0f49c0e]13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
[5823bae8]29#include <bsp/grspw_pkt.h>
[0f49c0e]30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
[56fc7809]90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
[0f49c0e]103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
[56fc7809]111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
[0f49c0e]113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
[56fc7809]117#define GRSPW_CTRL_TF_BIT       12
[0f49c0e]118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
[56fc7809]135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
[0f49c0e]137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
[56fc7809]141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
[0f49c0e]142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
[56fc7809]154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
[0f49c0e]160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
[56fc7809]165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
[0f49c0e]166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
[56fc7809]176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
[0f49c0e]177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
[c119c0e]201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
[0f49c0e]205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
[56fc7809]253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
[29c2304]269#define GRSPW_ICCTRL_INUM       (0x1f << GRSPW_ICCTRL_INUM_BIT)
[56fc7809]270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
[0f49c0e]285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
[1ef9caa2]337#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
338#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
[0f49c0e]339#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
340#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
341
342/* Memory and HW Registers Access routines. All 32-bit access routines */
343#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
344/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
345#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
346#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
347#define REG_READ(addr) (*(volatile unsigned int *)(addr))
348
349struct grspw_ring {
350        struct grspw_ring *next;        /* Next Descriptor */
351        union {
352                struct grspw_txbd *tx;  /* Descriptor Address */
353                struct grspw_rxbd *rx;  /* Descriptor Address */
354        } bd;
355        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
356};
357
358/* An entry in the TX descriptor Ring */
359struct grspw_txring {
360        struct grspw_txring *next;      /* Next Descriptor */
361        struct grspw_txbd *bd;          /* Descriptor Address */
362        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
363};
364
365/* An entry in the RX descriptor Ring */
366struct grspw_rxring {
367        struct grspw_rxring *next;      /* Next Descriptor */
368        struct grspw_rxbd *bd;          /* Descriptor Address */
369        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
370};
371
372
373struct grspw_dma_priv {
374        struct grspw_priv *core;        /* GRSPW Core */
375        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
376        int index;                      /* DMA Channel Index @ GRSPW core */
377        int open;                       /* DMA Channel opened by user */
378        int started;                    /* DMA Channel activity (start|stop) */
[0d31dcc]379        rtems_id sem_rxdma;             /* DMA Channel RX Semaphore */
380        rtems_id sem_txdma;             /* DMA Channel TX Semaphore */
[0f49c0e]381        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
382        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
383
384        /*** RX ***/
385
386        /* RX Descriptor Ring */
387        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
388        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
389        struct grspw_rxring *rx_ring_base;
390        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
391        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
392        int rx_irq_en_cnt_curr;
393        struct {
394                int waiting;
395                int ready_cnt;
396                int op;
397                int recv_cnt;
398                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
399        } rx_wait;
400
401        /* Queue of Packets READY to be scheduled */
402        struct grspw_list ready;
403        int ready_cnt;
404
405        /* Scheduled RX Packets Queue */
406        struct grspw_list rx_sched;
407        int rx_sched_cnt;
408
409        /* Queue of Packets that has been RECIEVED */
410        struct grspw_list recv;
411        int recv_cnt;
412
413
414        /*** TX ***/
415
416        /* TX Descriptor Ring */
417        struct grspw_txbd *tx_bds;              /* Descriptor Address */
418        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
419        struct grspw_txring *tx_ring_base;
420        struct grspw_txring *tx_ring_head;
421        struct grspw_txring *tx_ring_tail;
422        int tx_irq_en_cnt_curr;
423        struct {
424                int waiting;
425                int send_cnt;
426                int op;
427                int sent_cnt;
428                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
429        } tx_wait;
430
431        /* Queue of Packets ready to be scheduled for transmission */
432        struct grspw_list send;
433        int send_cnt;
434
435        /* Scheduled TX Packets Queue */
436        struct grspw_list tx_sched;
437        int tx_sched_cnt;
438
439        /* Queue of Packets that has been SENT */
440        struct grspw_list sent;
441        int sent_cnt;
442};
443
444struct grspw_priv {
445        char devname[8];                /* Device name "grspw%d" */
446        struct drvmgr_dev *dev;         /* Device */
447        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
448        int irq;                        /* AMBA IRQ number of core */
449        int index;                      /* Index in order it was probed */
450        int core_index;                 /* Core Bus Index */
451        int open;                       /* If Device is alrady opened (=1) or not (=0) */
452        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
453
454        /* Features supported by Hardware */
455        struct grspw_hw_sup hwsup;
456
457        /* Pointer to an array of Maximally 4 DMA Channels */
458        struct grspw_dma_priv *dma;
459
460        /* Spin-lock ISR protection */
461        SPIN_DECLARE(devlock);
462
463        /* Descriptor Memory Area for TX & RX and all DMA channels */
464        unsigned int bd_mem;
465        unsigned int bd_mem_alloced;
466
467        /*** Time Code Handling ***/
468        void (*tcisr)(void *data, int timecode);
469        void *tcisr_arg;
470
[56fc7809]471        /*** Interrupt-code Handling ***/
472        spwpkt_ic_isr_t icisr;
473        void *icisr_arg;
474
[49cf776e]475        /* Bit mask representing events which shall cause link disable. */
476        unsigned int dis_link_on_err;
[0f49c0e]477
[ac7da5bc]478        /* Bit mask for link status bits to clear by ISR */
479        unsigned int stscfg;
480
[ab9b447]481        /*** Message Queue Handling ***/
482        struct grspw_work_config wc;
483
[0f49c0e]484        /* "Core Global" Statistics gathered, not dependent on DMA channel */
485        struct grspw_core_stats stats;
486};
487
488int grspw_initialized = 0;
489int grspw_count = 0;
490rtems_id grspw_sem;
491static struct grspw_priv *priv_tab[GRSPW_MAX];
492
493/* callback to upper layer when devices are discovered/removed */
494void *(*grspw_dev_add)(int) = NULL;
495void (*grspw_dev_del)(int,void*) = NULL;
496
[ab9b447]497/* Defaults to do nothing - user can override this function.
498 * Called from work-task.
499 */
500void __attribute__((weak)) grspw_work_event(
501        enum grspw_worktask_ev ev,
502        unsigned int msg)
503{
504
505}
506
[0f49c0e]507/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
508 * the work-task and work-queue to save space.
509 */
510int grspw_work_task_priority __attribute__((weak)) = 100;
511rtems_id grspw_work_task;
[ab9b447]512static struct grspw_work_config grspw_wc_def;
[0f49c0e]513
514STATIC void grspw_hw_stop(struct grspw_priv *priv);
515STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
516STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
517STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
518STATIC void grspw_isr(void *data);
519
520void *grspw_open(int dev_no)
521{
522        struct grspw_priv *priv;
523        unsigned int bdtabsize, hwa;
524        int i;
525        union drvmgr_key_value *value;
526
527        if (grspw_initialized != 1 || (dev_no >= grspw_count))
528                return NULL;
529
530        priv = priv_tab[dev_no];
531
532        /* Take GRSPW lock - Wait until we get semaphore */
533        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
534            != RTEMS_SUCCESSFUL)
535                return NULL;
536
537        if (priv->open) {
538                priv = NULL;
539                goto out;
540        }
541
542        /* Initialize Spin-lock for GRSPW Device. This is to protect
543         * CTRL and DMACTRL registers from ISR.
544         */
545        SPIN_INIT(&priv->devlock);
546
547        priv->tcisr = NULL;
548        priv->tcisr_arg = NULL;
[56fc7809]549        priv->icisr = NULL;
550        priv->icisr_arg = NULL;
[ac7da5bc]551        priv->stscfg = LINKSTS_MASK;
[0f49c0e]552
[ab9b447]553        /* Default to common work queue and message queue, if not created
554         * during initialization then its disabled.
555         */
556        grspw_work_cfg(priv, &grspw_wc_def);
557
[0f49c0e]558        grspw_stats_clr(priv);
559
560        /* Allocate TX & RX Descriptor memory area for all DMA
561         * channels. Max-size descriptor area is allocated (or user assigned):
562         *  - 128 RX descriptors per DMA Channel
563         *  - 64 TX descriptors per DMA Channel
[c442647f]564         * Specified address must be in CPU RAM.
[0f49c0e]565         */
566        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
[4d3e70f4]567        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
[0f49c0e]568        if (value) {
569                priv->bd_mem = value->i;
570                priv->bd_mem_alloced = 0;
571                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
572                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
573                                  priv->index);
574                        priv = NULL;
575                        goto out;
576                }
577        } else {
578                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
579                if (priv->bd_mem_alloced == 0) {
580                        priv = NULL;
581                        goto out;
582                }
583                /* Align memory */
584                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
585                               ~(BDTAB_ALIGN-1);
586        }
587
588        /* Translate into DMA address that HW can use to access DMA
589         * descriptors
590         */
591        drvmgr_translate_check(
592                priv->dev,
593                CPUMEM_TO_DMA,
594                (void *)priv->bd_mem,
595                (void **)&hwa,
596                bdtabsize);
597
598        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
599                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
600        for (i=0; i<priv->hwsup.ndma_chans; i++) {
601                /* Do DMA Channel Init, other variables etc. are inited
602                 * when respective DMA channel is opened.
603                 *
604                 * index & core are initialized by probe function.
605                 */
606                priv->dma[i].open = 0;
607                priv->dma[i].rx_bds = (struct grspw_rxbd *)
608                        (priv->bd_mem + i*BDTAB_SIZE*2);
609                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
610                        (hwa + BDTAB_SIZE*(2*i));
611                priv->dma[i].tx_bds = (struct grspw_txbd *)
612                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
613                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
614                        (hwa + BDTAB_SIZE*(2*i+1));
615                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
616                        i,
617                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
618                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
619                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
620                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
621        }
622
623        /* Basic initialization of hardware, clear some registers but
624         * keep Link/RMAP/Node-Address registers intact.
625         */
626        grspw_hw_stop(priv);
627
628        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
629        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
630
631        /* Take the device */
632        priv->open = 1;
633out:
634        rtems_semaphore_release(grspw_sem);
635        return priv;
636}
637
[eb5a42f6]638int grspw_close(void *d)
[0f49c0e]639{
640        struct grspw_priv *priv = d;
641        int i;
642
643        /* Take GRSPW lock - Wait until we get semaphore */
644        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
645            != RTEMS_SUCCESSFUL)
[eb5a42f6]646                return -1;
[0f49c0e]647
[eb5a42f6]648        /* Check that user has stopped and closed all DMA channels
649         * appropriately. At this point the Hardware shall not be doing DMA
650         * or generating Interrupts. We want HW in a "startup-state".
[0f49c0e]651         */
[eb5a42f6]652        for (i=0; i<priv->hwsup.ndma_chans; i++) {
653                if (priv->dma[i].open) {
654                        rtems_semaphore_release(grspw_sem);
655                        return 1;
656                }
657        }
[0f49c0e]658        grspw_hw_stop(priv);
659
[029ce4d1]660        /* Free descriptor table memory if allocated using malloc() */
661        if (priv->bd_mem_alloced) {
662                free((void *)priv->bd_mem_alloced);
663                priv->bd_mem_alloced = 0;
664        }
665
[0f49c0e]666        /* Mark not open */
667        priv->open = 0;
668        rtems_semaphore_release(grspw_sem);
[eb5a42f6]669        return 0;
[0f49c0e]670}
671
672void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
673{
674        struct grspw_priv *priv = d;
675
676        *hw = priv->hwsup;
677}
678
679void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
680{
681        struct grspw_priv *priv = d;
682        struct grspw_regs *regs = priv->regs;
683        unsigned int ctrl, nodeaddr;
684        IRQFLAGS_TYPE irqflags;
685        int i;
686
687        if (!priv || !cfg)
688                return;
689
690        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
691
692        if (cfg->promiscuous != -1) {
693                /* Set Configuration */
694                ctrl = REG_READ(&regs->ctrl);
695                if (cfg->promiscuous)
696                        ctrl |= GRSPW_CTRL_PM;
697                else
698                        ctrl &= ~GRSPW_CTRL_PM;
699                REG_WRITE(&regs->ctrl, ctrl);
700                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
701
702                for (i=0; i<priv->hwsup.ndma_chans; i++) {
703                        ctrl = REG_READ(&regs->dma[i].ctrl);
704                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
705                        if (cfg->dma_nacfg[i].node_en) {
706                                ctrl |= GRSPW_DMACTRL_EN;
707                                REG_WRITE(&regs->dma[i].addr,
708                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
709                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
710                        } else {
711                                ctrl &= ~GRSPW_DMACTRL_EN;
712                        }
713                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
714                }
715        }
716
717        /* Read Current Configuration */
718        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
719        nodeaddr = REG_READ(&regs->nodeaddr);
720        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
721        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
722        for (i=0; i<priv->hwsup.ndma_chans; i++) {
723                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
724                                                GRSPW_DMACTRL_EN;
725                ctrl = REG_READ(&regs->dma[i].addr);
726                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
727                                                GRSPW_DMAADR_ADDR_BIT;
728                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
729                                                GRSPW_DMAADR_MASK_BIT;
730        }
731        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
732        for (; i<4; i++) {
733                cfg->dma_nacfg[i].node_en = 0;
734                cfg->dma_nacfg[i].node_addr = 0;
735                cfg->dma_nacfg[i].node_mask = 0;
736        }
737}
738
[72ec13ef]739/* Return Current DMA CTRL/Status Register */
740unsigned int grspw_dma_ctrlsts(void *c)
741{
742        struct grspw_dma_priv *dma = c;
743
744        return REG_READ(&dma->regs->ctrl);
745}
746
[0f49c0e]747/* Return Current Status Register */
748unsigned int grspw_link_status(void *d)
749{
750        struct grspw_priv *priv = d;
751
752        return REG_READ(&priv->regs->status);
753}
754
[fad4324d]755/* Clear Status Register bits */
756void grspw_link_status_clr(void *d, unsigned int mask)
757{
758        struct grspw_priv *priv = d;
759
760        REG_WRITE(&priv->regs->status, mask);
761}
762
[0f49c0e]763/* Return Current Link State */
764spw_link_state_t grspw_link_state(void *d)
765{
766        struct grspw_priv *priv = d;
767        unsigned int status = REG_READ(&priv->regs->status);
768
769        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
770}
771
[56fc7809]772/* Enable Global IRQ only if some irq source is set */
773static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
774{
775        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
776                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
777}
778
779
[0f49c0e]780/* options and clkdiv [in/out]: set to -1 to only read current config */
[ac7da5bc]781void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
[0f49c0e]782{
783        struct grspw_priv *priv = d;
784        struct grspw_regs *regs = priv->regs;
785        unsigned int ctrl;
786        IRQFLAGS_TYPE irqflags;
787
788        /* Write? */
789        if (clkdiv) {
790                if (*clkdiv != -1)
791                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
792                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
793        }
794        if (options) {
795                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
796                ctrl = REG_READ(&regs->ctrl);
797                if (*options != -1) {
798                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
799                                (*options & GRSPW_LINK_CFG);
800
[56fc7809]801                        /* Enable Global IRQ only if some irq source is set */
802                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]803                                ctrl |= GRSPW_CTRL_IE;
804                        else
805                                ctrl &= ~GRSPW_CTRL_IE;
806
807                        REG_WRITE(&regs->ctrl, ctrl);
[49cf776e]808                        /* Store the link disable events for use in
809                        ISR. The LINKOPTS_DIS_ON_* options are actually the
810                        corresponding bits in the status register, shifted
811                        by 16. */
812                        priv->dis_link_on_err = *options &
813                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
[0f49c0e]814                }
815                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
[49cf776e]816                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
[0f49c0e]817        }
[ac7da5bc]818        if (stscfg) {
819                if (*stscfg != -1) {
820                        priv->stscfg = *stscfg & LINKSTS_MASK;
821                }
822                *stscfg = priv->stscfg;
823        }
[0f49c0e]824}
825
826/* Generate Tick-In (increment Time Counter, Send Time Code) */
827void grspw_tc_tx(void *d)
828{
829        struct grspw_priv *priv = d;
830        struct grspw_regs *regs = priv->regs;
831        IRQFLAGS_TYPE irqflags;
832
833        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
834        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
835        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
836}
837
838void grspw_tc_ctrl(void *d, int *options)
839{
840        struct grspw_priv *priv = d;
841        struct grspw_regs *regs = priv->regs;
842        unsigned int ctrl;
843        IRQFLAGS_TYPE irqflags;
844
845        if (options == NULL)
846                return;
847
848        /* Write? */
849        if (*options != -1) {
850                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
851                ctrl = REG_READ(&regs->ctrl);
852                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
853                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
854
[56fc7809]855                /* Enable Global IRQ only if some irq source is set */
856                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]857                        ctrl |= GRSPW_CTRL_IE;
[56fc7809]858                else
[0f49c0e]859                        ctrl &= ~GRSPW_CTRL_IE;
860
861                REG_WRITE(&regs->ctrl, ctrl);
862                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
863        } else
864                ctrl = REG_READ(&regs->ctrl);
865        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
866}
867
868/* Assign ISR Function to TimeCode RX IRQ */
869void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
870{
871        struct grspw_priv *priv = d;
872
873        priv->tcisr_arg = data;
874        priv->tcisr = tcisr;
875}
876
877/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
878 * TCTRL   = bits 7 and 6
879 * TIMECNT = bits 5 to 0
880 */
881void grspw_tc_time(void *d, int *time)
[56fc7809]882{
[c442647f]883        struct grspw_priv *priv = d;
884        struct grspw_regs *regs = priv->regs;
885
886        if (time == NULL)
887                return;
888        if (*time != -1)
889                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
890        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
[56fc7809]891}
892
893/* Generate Tick-In for the given Interrupt-code and check for generation
894 * error.
895 *
896 * Returns zero on success and non-zero on failure
897 */
898int grspw_ic_tickin(void *d, int ic)
899{
900        struct grspw_priv *priv = d;
901        struct grspw_regs *regs = priv->regs;
902        IRQFLAGS_TYPE irqflags;
903        unsigned int icctrl, mask;
904
905        /* Prepare before turning off IRQ */
906        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
907        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
908             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
909
910        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
911        icctrl = REG_READ(&regs->icctrl);
912        icctrl &= ~mask;
913        icctrl |= ic;
914        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
915        /* the ID bit is valid after two clocks, so we not to wait here */
916        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
917        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
918
919        return icctrl & GRSPW_ICCTRL_ID;
920}
921
922#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
923#define ICOPTS_ICCTRL_MASK                                              \
924        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
925         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
926         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
927         ICOPTS_BASEIRQ)
928
929/* Control Interrupt-code settings of core
930 * Write if not pointing to -1, always read current value
931 *
932 * TODO: A lot of code duplication with grspw_tc_ctrl
933 */
934void grspw_ic_ctrl(void *d, unsigned int *options)
935{
936        struct grspw_priv *priv = d;
937        struct grspw_regs *regs = priv->regs;
938        unsigned int ctrl;
939        unsigned int icctrl;
940        IRQFLAGS_TYPE irqflags;
941
942        if (options == NULL)
943                return;
944
945        if (*options != -1) {
946                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
947
948                ctrl = REG_READ(&regs->ctrl);
949                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
950                                         * irqopts bits and ctrl bits */
951                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
952                        (GRSPW_CTRL_TF_BIT - 0);
953
954                icctrl = REG_READ(&regs->icctrl);
955                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
956                                                * irqopts bits and icctrl bits */
957                icctrl |= *options & ICOPTS_ICCTRL_MASK;
958
959                /* Enable Global IRQ only if some irq source is set */
960                if (grspw_is_irqsource_set(ctrl, icctrl))
961                        ctrl |= GRSPW_CTRL_IE;
962                else
963                        ctrl &= ~GRSPW_CTRL_IE;
964
965                REG_WRITE(&regs->ctrl, ctrl);
966                REG_WRITE(&regs->icctrl, icctrl);
967                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
968        }
969        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
970                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
971}
972
973void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
[0f49c0e]974{
975        struct grspw_priv *priv = d;
976        struct grspw_regs *regs = priv->regs;
977
[56fc7809]978        if (!cfg)
[0f49c0e]979                return;
[56fc7809]980
981        if (rw & 1) {
982                REG_WRITE(&regs->ictickomask, cfg->tomask);
983                REG_WRITE(&regs->icaamask, cfg->aamask);
984                REG_WRITE(&regs->icrlpresc, cfg->scaler);
985                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
986                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
987        }
988        if (rw & 2) {
989                cfg->tomask = REG_READ(&regs->ictickomask);
990                cfg->aamask = REG_READ(&regs->icaamask);
991                cfg->scaler = REG_READ(&regs->icrlpresc);
992                cfg->isr_reload = REG_READ(&regs->icrlisr);
993                cfg->ack_reload = REG_READ(&regs->icrlintack);
994        }
995}
996
997/* Read or Write Interrupt-code status registers */
998void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
999{
1000        struct grspw_priv *priv = d;
1001        struct grspw_regs *regs = priv->regs;
1002
1003        /* No locking needed since the status bits are clear-on-write */
1004
1005        if (rxirq) {
1006                if (*rxirq != 0)
1007                        REG_WRITE(&regs->icrx, *rxirq);
1008                else
1009                        *rxirq = REG_READ(&regs->icrx);
1010        }
1011
1012        if (rxack) {
1013                if (*rxack != 0)
1014                        REG_WRITE(&regs->icack, *rxack);
1015                else
1016                        *rxack = REG_READ(&regs->icack);
1017        }
1018
1019        if (intto) {
1020                if (*intto != 0)
1021                        REG_WRITE(&regs->ictimeout, *intto);
1022                else
1023                        *intto = REG_READ(&regs->ictimeout);
1024        }
1025}
1026
1027/* Assign handler function to Interrupt-code tick out IRQ */
1028void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1029{
1030        struct grspw_priv *priv = d;
1031
1032        priv->icisr_arg = data;
1033        priv->icisr = handler;
[0f49c0e]1034}
1035
1036/* Set (not -1) and/or read RMAP options. */
1037int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1038{
1039        struct grspw_priv *priv = d;
1040        struct grspw_regs *regs = priv->regs;
1041        unsigned int ctrl;
1042        IRQFLAGS_TYPE irqflags;
1043
1044        if (dstkey) {
1045                if (*dstkey != -1)
1046                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1047                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1048        }
1049        if (options) {
1050                if (*options != -1) {
1051                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1052                                return -1;
1053
1054
1055                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1056                        ctrl = REG_READ(&regs->ctrl);
1057                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1058                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1059                        REG_WRITE(&regs->ctrl, ctrl);
1060                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1061                }
1062                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1063        }
1064
1065        return 0;
1066}
1067
1068void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1069{
1070        struct grspw_priv *priv = d;
1071
1072        if (rmap)
1073                *rmap = priv->hwsup.rmap;
1074        if (rmap_crc)
1075                *rmap_crc = priv->hwsup.rmap_crc;
1076}
1077
1078/* Select port, if
1079 * -1=The current selected port is returned
1080 * 0=Port 0
1081 * 1=Port 1
1082 * Others=Both Port0 and Port1
1083 */
1084int grspw_port_ctrl(void *d, int *port)
1085{
1086        struct grspw_priv *priv = d;
1087        struct grspw_regs *regs = priv->regs;
1088        unsigned int ctrl;
1089        IRQFLAGS_TYPE irqflags;
1090
1091        if (port == NULL)
1092                return -1;
1093
1094        if ((*port == 1) || (*port == 0)) {
1095                /* Select port user selected */
1096                if ((*port == 1) && (priv->hwsup.nports < 2))
1097                        return -1; /* Changing to Port 1, but only one port available */
1098                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1099                ctrl = REG_READ(&regs->ctrl);
1100                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1101                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1102                REG_WRITE(&regs->ctrl, ctrl);
1103                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1104        } else if (*port > 1) {
1105                /* Select both ports */
1106                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1107                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1108                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1109        }
1110
1111        /* Get current settings */
1112        ctrl = REG_READ(&regs->ctrl);
1113        if (ctrl & GRSPW_CTRL_NP) {
1114                /* Any port, selected by hardware */
1115                if (priv->hwsup.nports > 1)
1116                        *port = 3;
1117                else
1118                        *port = 0; /* Port0 the only port available */
1119        } else {
1120                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1121        }
1122
1123        return 0;
1124}
1125
1126/* Returns Number ports available in hardware */
1127int grspw_port_count(void *d)
1128{
1129        struct grspw_priv *priv = d;
1130
1131        return priv->hwsup.nports;
1132}
1133
1134/* Current active port: 0 or 1 */
1135int grspw_port_active(void *d)
1136{
1137        struct grspw_priv *priv = d;
1138        unsigned int status;
1139
1140        status = REG_READ(&priv->regs->status);
1141
1142        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1143}
1144
1145void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1146{
1147        struct grspw_priv *priv = d;
1148
1149        if (sts == NULL)
1150                return;
1151        memcpy(sts, &priv->stats, sizeof(priv->stats));
1152}
1153
1154void grspw_stats_clr(void *d)
1155{
1156        struct grspw_priv *priv = d;
1157
1158        /* Clear most of the statistics */     
1159        memset(&priv->stats, 0, sizeof(priv->stats));
1160}
1161
1162/*** DMA Interface ***/
1163
1164/* Initialize the RX and TX Descriptor Ring, empty of packets */
1165STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1166{
1167        struct grspw_ring *r;
1168        int i;
1169
1170        /* Empty BD rings */
1171        dma->rx_ring_head = dma->rx_ring_base;
1172        dma->rx_ring_tail = dma->rx_ring_base;
1173        dma->tx_ring_head = dma->tx_ring_base;
1174        dma->tx_ring_tail = dma->tx_ring_base;
1175
1176        /* Init RX Descriptors */
1177        r = (struct grspw_ring *)dma->rx_ring_base;
1178        for (i=0; i<GRSPW_RXBD_NR; i++) {
1179
1180                /* Init Ring Entry */
1181                r[i].next = &r[i+1];
1182                r[i].bd.rx = &dma->rx_bds[i];
1183                r[i].pkt = NULL;
1184
1185                /* Init HW Descriptor */
1186                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1187                BD_WRITE(&r[i].bd.rx->addr, 0);
1188        }
1189        r[GRSPW_RXBD_NR-1].next = &r[0];
1190
1191        /* Init TX Descriptors */
1192        r = (struct grspw_ring *)dma->tx_ring_base;
1193        for (i=0; i<GRSPW_TXBD_NR; i++) {
1194
1195                /* Init Ring Entry */
1196                r[i].next = &r[i+1];
1197                r[i].bd.tx = &dma->tx_bds[i];
1198                r[i].pkt = NULL;
1199
1200                /* Init HW Descriptor */
1201                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1202                BD_WRITE(&r[i].bd.tx->haddr, 0);
1203                BD_WRITE(&r[i].bd.tx->dlen, 0);
1204                BD_WRITE(&r[i].bd.tx->daddr, 0);
1205        }
1206        r[GRSPW_TXBD_NR-1].next = &r[0];
1207}
1208
1209/* Try to populate descriptor ring with as many as possible READY unused packet
1210 * buffers. The packets assigned with to a descriptor are put in the end of
1211 * the scheduled list.
1212 *
1213 * The number of Packets scheduled is returned.
1214 *
1215 *  - READY List -> RX-SCHED List
1216 *  - Descriptors are initialized and enabled for reception
1217 */
1218STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1219{
1220        int cnt;
1221        unsigned int ctrl, dmactrl;
1222        void *hwaddr;
1223        struct grspw_rxring *curr_bd;
1224        struct grspw_pkt *curr_pkt, *last_pkt;
1225        struct grspw_list lst;
1226        IRQFLAGS_TYPE irqflags;
1227
1228        /* Is Ready Q empty? */
1229        if (grspw_list_is_empty(&dma->ready))
1230                return 0;
1231
1232        cnt = 0;
1233        lst.head = curr_pkt = dma->ready.head;
1234        curr_bd = dma->rx_ring_head;
1235        while (!curr_bd->pkt) {
1236
1237                /* Assign Packet to descriptor */
1238                curr_bd->pkt = curr_pkt;
1239
1240                /* Prepare descriptor address. */
1241                hwaddr = curr_pkt->data;
1242                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1243                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1244                                         hwaddr, &hwaddr);
1245                        if (curr_pkt->data == hwaddr) /* translation needed? */
1246                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1247                }
1248                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1249
1250                ctrl = GRSPW_RXBD_EN;
1251                if (curr_bd->next == dma->rx_ring_base) {
1252                        /* Wrap around (only needed when smaller descriptor
1253                         * table)
1254                         */
1255                        ctrl |= GRSPW_RXBD_WR;
1256                }
1257
1258                /* Is this Packet going to be an interrupt Packet? */
1259                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1260                        if (dma->cfg.rx_irq_en_cnt == 0) {
1261                                /* IRQ is disabled. A big number to avoid
1262                                 * equal to zero too often
1263                                 */
1264                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1265                        } else {
1266                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1267                                ctrl |= GRSPW_RXBD_IE;
1268                        }
1269                }
1270
1271                if (curr_pkt->flags & RXPKT_FLAG_IE)
1272                        ctrl |= GRSPW_RXBD_IE;
1273
1274                /* Enable descriptor */
1275                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1276
1277                last_pkt = curr_pkt;
1278                curr_bd = curr_bd->next;
1279                cnt++;
1280
1281                /* Get Next Packet from Ready Queue */
1282                if (curr_pkt == dma->ready.tail) {
1283                        /* Handled all in ready queue. */
1284                        curr_pkt = NULL;
1285                        break;
1286                }
1287                curr_pkt = curr_pkt->next;
1288        }
1289
1290        /* Has Packets been scheduled? */
1291        if (cnt > 0) {
1292                /* Prepare list for insertion/deleation */
1293                lst.tail = last_pkt;
1294
1295                /* Remove scheduled packets from ready queue */
1296                grspw_list_remove_head_list(&dma->ready, &lst);
1297                dma->ready_cnt -= cnt;
1298                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1299                        dma->stats.ready_cnt_min = dma->ready_cnt;
1300
1301                /* Insert scheduled packets into scheduled queue */
1302                grspw_list_append_list(&dma->rx_sched, &lst);
1303                dma->rx_sched_cnt += cnt;
1304                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1305                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1306
1307                /* Update TX ring posistion */
1308                dma->rx_ring_head = curr_bd;
1309
1310                /* Make hardware aware of the newly enabled descriptors
1311                 * We must protect from ISR which writes RI|TI
1312                 */
1313                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1314                dmactrl = REG_READ(&dma->regs->ctrl);
1315                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1316                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1317                REG_WRITE(&dma->regs->ctrl, dmactrl);
1318                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1319        }
1320
1321        return cnt;
1322}
1323
1324/* Scans the RX desciptor table for scheduled Packet that has been received,
1325 * and moves these Packet from the head of the scheduled queue to the
1326 * tail of the recv queue.
1327 *
1328 * Also, for all packets the status is updated.
1329 *
1330 *  - SCHED List -> SENT List
1331 *
1332 * Return Value
1333 * Number of packets moved
1334 */
1335STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1336{
1337        struct grspw_rxring *curr;
1338        struct grspw_pkt *last_pkt;
1339        int recv_pkt_cnt = 0;
1340        unsigned int ctrl;
1341        struct grspw_list lst;
1342
1343        curr = dma->rx_ring_tail;
1344
1345        /* Step into RX ring to find if packets have been scheduled for
1346         * reception.
1347         */
1348        if (!curr->pkt)
1349                return 0; /* No scheduled packets, thus no received, abort */
1350
1351        /* There has been Packets scheduled ==> scheduled Packets may have been
1352         * received and needs to be collected into RECV List.
1353         *
1354         * A temporary list "lst" with all received packets is created.
1355         */
1356        lst.head = curr->pkt;
1357
1358        /* Loop until first enabled "unrecveived" SpW Packet is found.
1359         * An unused descriptor is indicated by an unassigned pkt field.
1360         */
1361        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1362                /* Handle one received Packet */
1363
1364                /* Remember last handled Packet so that insertion/removal from
1365                 * Packet lists go fast.
1366                 */
1367                last_pkt = curr->pkt;
1368
1369                /* Get Length of Packet in bytes, and reception options */
1370                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1371
1372                /* Set flags to indicate error(s) and CRC information,
1373                 * and Mark Received.
1374                 */
1375                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1376                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1377                                  RXPKT_FLAG_RX;
1378
1379                /* Packet was Truncated? */
1380                if (ctrl & GRSPW_RXBD_TR)
1381                        dma->stats.rx_err_trunk++;
1382
1383                /* Error End-Of-Packet? */
1384                if (ctrl & GRSPW_RXBD_EP)
1385                        dma->stats.rx_err_endpkt++;
1386                curr->pkt = NULL; /* Mark descriptor unused */
1387
1388                /* Increment */
1389                curr = curr->next;
1390                recv_pkt_cnt++;
1391        }
1392
1393        /* 1. Remove all handled packets from scheduled queue
1394         * 2. Put all handled packets into recv queue
1395         */
1396        if (recv_pkt_cnt > 0) {
1397
1398                /* Update Stats, Number of Received Packets */
1399                dma->stats.rx_pkts += recv_pkt_cnt;
1400
1401                /* Save RX ring posistion */
1402                dma->rx_ring_tail = curr;
1403
1404                /* Prepare list for insertion/deleation */
1405                lst.tail = last_pkt;
1406
1407                /* Remove received Packets from RX-SCHED queue */
1408                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1409                dma->rx_sched_cnt -= recv_pkt_cnt;
1410                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1411                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1412
1413                /* Insert received Packets into RECV queue */
1414                grspw_list_append_list(&dma->recv, &lst);
1415                dma->recv_cnt += recv_pkt_cnt;
1416                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1417                        dma->stats.recv_cnt_max = dma->recv_cnt;
1418        }
1419
1420        return recv_pkt_cnt;
1421}
1422
1423/* Try to populate descriptor ring with as many SEND packets as possible. The
1424 * packets assigned with to a descriptor are put in the end of
1425 * the scheduled list.
1426 *
1427 * The number of Packets scheduled is returned.
1428 *
1429 *  - SEND List -> TX-SCHED List
1430 *  - Descriptors are initialized and enabled for transmission
1431 */
1432STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1433{
1434        int cnt;
1435        unsigned int ctrl, dmactrl;
1436        void *hwaddr;
1437        struct grspw_txring *curr_bd;
1438        struct grspw_pkt *curr_pkt, *last_pkt;
1439        struct grspw_list lst;
1440        IRQFLAGS_TYPE irqflags;
1441
1442        /* Is Ready Q empty? */
1443        if (grspw_list_is_empty(&dma->send))
1444                return 0;
1445
1446        cnt = 0;
1447        lst.head = curr_pkt = dma->send.head;
1448        curr_bd = dma->tx_ring_head;
1449        while (!curr_bd->pkt) {
1450
1451                /* Assign Packet to descriptor */
1452                curr_bd->pkt = curr_pkt;
1453
1454                /* Set up header transmission */
1455                if (curr_pkt->hdr && curr_pkt->hlen) {
1456                        hwaddr = curr_pkt->hdr;
1457                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1458                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1459                                                 hwaddr, &hwaddr);
1460                                /* translation needed? */
1461                                if (curr_pkt->hdr == hwaddr)
1462                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1463                        }
1464                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1465                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1466                } else {
1467                        ctrl = GRSPW_TXBD_EN;
1468                }
1469                /* Enable IRQ generation and CRC options as specified
1470                 * by user.
1471                 */
1472                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1473
1474                if (curr_bd->next == dma->tx_ring_base) {
1475                        /* Wrap around (only needed when smaller descriptor table) */
1476                        ctrl |= GRSPW_TXBD_WR;
1477                }
1478
1479                /* Is this Packet going to be an interrupt Packet? */
1480                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1481                        if (dma->cfg.tx_irq_en_cnt == 0) {
1482                                /* IRQ is disabled.
1483                                 * A big number to avoid equal to zero too often
1484                                 */
1485                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1486                        } else {
1487                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1488                                ctrl |= GRSPW_TXBD_IE;
1489                        }
1490                }
1491
1492                /* Prepare descriptor address. Parts of CTRL is written to
1493                 * DLEN for debug-only (CTRL is cleared by HW).
1494                 */
1495                if (curr_pkt->data && curr_pkt->dlen) {
1496                        hwaddr = curr_pkt->data;
1497                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1498                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1499                                                 hwaddr, &hwaddr);
1500                                /* translation needed? */
1501                                if (curr_pkt->data == hwaddr)
1502                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1503                        }
1504                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1505                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1506                                                     ((ctrl & 0x3f000) << 12));
1507                } else {
1508                        BD_WRITE(&curr_bd->bd->daddr, 0);
1509                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1510                }
1511
1512                /* Enable descriptor */
1513                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1514
1515                last_pkt = curr_pkt;
1516                curr_bd = curr_bd->next;
1517                cnt++;
1518
1519                /* Get Next Packet from Ready Queue */
1520                if (curr_pkt == dma->send.tail) {
1521                        /* Handled all in ready queue. */
1522                        curr_pkt = NULL;
1523                        break;
1524                }
1525                curr_pkt = curr_pkt->next;
1526        }
1527
1528        /* Have Packets been scheduled? */
1529        if (cnt > 0) {
1530                /* Prepare list for insertion/deleation */
1531                lst.tail = last_pkt;
1532
1533                /* Remove scheduled packets from ready queue */
1534                grspw_list_remove_head_list(&dma->send, &lst);
1535                dma->send_cnt -= cnt;
1536                if (dma->stats.send_cnt_min > dma->send_cnt)
1537                        dma->stats.send_cnt_min = dma->send_cnt;
1538
1539                /* Insert scheduled packets into scheduled queue */
1540                grspw_list_append_list(&dma->tx_sched, &lst);
1541                dma->tx_sched_cnt += cnt;
1542                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1543                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1544
1545                /* Update TX ring posistion */
1546                dma->tx_ring_head = curr_bd;
1547
1548                /* Make hardware aware of the newly enabled descriptors */
1549                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1550                dmactrl = REG_READ(&dma->regs->ctrl);
1551                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1552                dmactrl |= GRSPW_DMACTRL_TE;
1553                REG_WRITE(&dma->regs->ctrl, dmactrl);
1554                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1555        }
1556        return cnt;
1557}
1558
1559/* Scans the TX desciptor table for transmitted packets, and moves these
1560 * packets from the head of the scheduled queue to the tail of the sent queue.
1561 *
1562 * Also, for all packets the status is updated.
1563 *
1564 *  - SCHED List -> SENT List
1565 *
1566 * Return Value
1567 * Number of packet moved
1568 */
1569STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1570{
1571        struct grspw_txring *curr;
1572        struct grspw_pkt *last_pkt;
1573        int sent_pkt_cnt = 0;
1574        unsigned int ctrl;
1575        struct grspw_list lst;
1576
1577        curr = dma->tx_ring_tail;
1578
1579        /* Step into TX ring to find if packets have been scheduled for
1580         * transmission.
1581         */
1582        if (!curr->pkt)
1583                return 0; /* No scheduled packets, thus no sent, abort */
1584
1585        /* There has been Packets scheduled ==> scheduled Packets may have been
1586         * transmitted and needs to be collected into SENT List.
1587         *
1588         * A temporary list "lst" with all sent packets is created.
1589         */
1590        lst.head = curr->pkt;
1591
1592        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1593         * An unused descriptor is indicated by an unassigned pkt field.
1594         */
1595        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1596                /* Handle one sent Packet */
1597
1598                /* Remember last handled Packet so that insertion/removal from
1599                 * packet lists go fast.
1600                 */
1601                last_pkt = curr->pkt;
1602
1603                /* Set flags to indicate error(s) and Mark Sent.
1604                 */
1605                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1606                                        (ctrl & TXPKT_FLAG_LINKERR) |
1607                                        TXPKT_FLAG_TX;
1608
1609                /* Sent packet experienced link error? */
1610                if (ctrl & GRSPW_TXBD_LE)
1611                        dma->stats.tx_err_link++;
1612
1613                curr->pkt = NULL; /* Mark descriptor unused */
1614
1615                /* Increment */
1616                curr = curr->next;
1617                sent_pkt_cnt++;
1618        }
1619
1620        /* 1. Remove all handled packets from TX-SCHED queue
1621         * 2. Put all handled packets into SENT queue
1622         */
1623        if (sent_pkt_cnt > 0) {
1624                /* Update Stats, Number of Transmitted Packets */
1625                dma->stats.tx_pkts += sent_pkt_cnt;
1626
1627                /* Save TX ring posistion */
1628                dma->tx_ring_tail = curr;
1629
1630                /* Prepare list for insertion/deleation */
1631                lst.tail = last_pkt;
1632
1633                /* Remove sent packets from TX-SCHED queue */
1634                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1635                dma->tx_sched_cnt -= sent_pkt_cnt;
1636                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1637                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1638
1639                /* Insert received packets into SENT queue */
1640                grspw_list_append_list(&dma->sent, &lst);
1641                dma->sent_cnt += sent_pkt_cnt;
1642                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1643                        dma->stats.sent_cnt_max = dma->sent_cnt;
1644        }
1645
1646        return sent_pkt_cnt;
1647}
1648
1649void *grspw_dma_open(void *d, int chan_no)
1650{
1651        struct grspw_priv *priv = d;
1652        struct grspw_dma_priv *dma;
1653        int size;
1654
[3395ca99]1655        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
[0f49c0e]1656                return NULL;
1657
1658        dma = &priv->dma[chan_no];
1659
1660        /* Take GRSPW lock */
1661        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1662            != RTEMS_SUCCESSFUL)
1663                return NULL;
1664
1665        if (dma->open) {
1666                dma = NULL;
1667                goto out;
1668        }
1669
1670        dma->started = 0;
1671
1672        /* Set Default Configuration:
1673         *
1674         *  - MAX RX Packet Length =
1675         *  - Disable IRQ generation
1676         *  -
1677         */
1678        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1679        dma->cfg.rx_irq_en_cnt = 0;
1680        dma->cfg.tx_irq_en_cnt = 0;
1681        dma->cfg.flags = DMAFLAG_NO_SPILL;
1682
[57e1f4c3]1683        /* set to NULL so that error exit works correctly */
[0d31dcc]1684        dma->sem_rxdma = RTEMS_ID_NONE;
1685        dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1686        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1687        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1688        dma->rx_ring_base = NULL;
1689
[0f49c0e]1690        /* DMA Channel Semaphore created with count = 1 */
1691        if (rtems_semaphore_create(
[0d31dcc]1692            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
[0f49c0e]1693            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1694            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
[0d31dcc]1695            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1696                dma->sem_rxdma = RTEMS_ID_NONE;
1697                goto err;
1698        }
1699        if (rtems_semaphore_create(
1700            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1701            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1702            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1703            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1704                dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1705                goto err;
[0f49c0e]1706        }
1707
1708        /* Allocate memory for the two descriptor rings */
1709        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1710        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1711        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
[57e1f4c3]1712        if (dma->rx_ring_base == NULL)
1713                goto err;
[0f49c0e]1714
1715        /* Create DMA RX and TX Channel sempahore with count = 0 */
1716        if (rtems_semaphore_create(
1717            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1718            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1719            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1720            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1721                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1722                goto err;
[0f49c0e]1723        }
1724        if (rtems_semaphore_create(
1725            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1726            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1727            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1728            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1729                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1730                goto err;
[0f49c0e]1731        }
1732
1733        /* Reset software structures */
1734        grspw_dma_reset(dma);
1735
1736        /* Take the device */
1737        dma->open = 1;
1738out:
1739        /* Return GRSPW Lock */
1740        rtems_semaphore_release(grspw_sem);
1741
1742        return dma;
[57e1f4c3]1743
1744        /* initialization error happended */
1745err:
[0d31dcc]1746        if (dma->sem_rxdma != RTEMS_ID_NONE)
1747                rtems_semaphore_delete(dma->sem_rxdma);
1748        if (dma->sem_txdma != RTEMS_ID_NONE)
1749                rtems_semaphore_delete(dma->sem_txdma);
[57e1f4c3]1750        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1751                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1752        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1753                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1754        if (dma->rx_ring_base)
1755                free(dma->rx_ring_base);
1756        dma = NULL;
1757        goto out;
[0f49c0e]1758}
1759
1760/* Initialize Software Structures:
1761 *  - Clear all Queues
1762 *  - init BD ring
1763 *  - init IRQ counter
1764 *  - clear statistics counters
1765 *  - init wait structures and semaphores
1766 */
1767STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1768{
1769        /* Empty RX and TX queues */
1770        grspw_list_clr(&dma->ready);
1771        grspw_list_clr(&dma->rx_sched);
1772        grspw_list_clr(&dma->recv);
1773        grspw_list_clr(&dma->send);
1774        grspw_list_clr(&dma->tx_sched);
1775        grspw_list_clr(&dma->sent);
1776        dma->ready_cnt = 0;
1777        dma->rx_sched_cnt = 0;
1778        dma->recv_cnt = 0;
1779        dma->send_cnt = 0;
1780        dma->tx_sched_cnt = 0;
1781        dma->sent_cnt = 0;
1782
1783        dma->rx_irq_en_cnt_curr = 0;
1784        dma->tx_irq_en_cnt_curr = 0;
1785
1786        grspw_bdrings_init(dma);
1787
1788        dma->rx_wait.waiting = 0;
1789        dma->tx_wait.waiting = 0;
1790
1791        grspw_dma_stats_clr(dma);
1792}
1793
[eb5a42f6]1794int grspw_dma_close(void *c)
[0f49c0e]1795{
1796        struct grspw_dma_priv *dma = c;
1797
1798        if (!dma->open)
[eb5a42f6]1799                return 0;
[0f49c0e]1800
1801        /* Take device lock - Wait until we get semaphore */
[0d31dcc]1802        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1803            != RTEMS_SUCCESSFUL)
[eb5a42f6]1804                return -1;
[0d31dcc]1805        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1806            != RTEMS_SUCCESSFUL) {
1807                rtems_semaphore_release(dma->sem_rxdma);
1808                return -1;
1809        }
[0f49c0e]1810
[eb5a42f6]1811        /* Can not close active DMA channel. User must stop DMA and make sure
1812         * no threads are active/blocked within driver.
1813         */
1814        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
[0d31dcc]1815                rtems_semaphore_release(dma->sem_txdma);
1816                rtems_semaphore_release(dma->sem_rxdma);
[eb5a42f6]1817                return 1;
1818        }
[0f49c0e]1819
1820        /* Free resources */
1821        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1822        rtems_semaphore_delete(dma->tx_wait.sem_wait);
[57e1f4c3]1823        /* Release and delete lock. Operations requiring lock will fail */
[0d31dcc]1824        rtems_semaphore_delete(dma->sem_txdma);
1825        rtems_semaphore_delete(dma->sem_rxdma);
1826        dma->sem_txdma = RTEMS_ID_NONE;
1827        dma->sem_rxdma = RTEMS_ID_NONE;
[0f49c0e]1828
1829        /* Free memory */
1830        if (dma->rx_ring_base)
1831                free(dma->rx_ring_base);
1832        dma->rx_ring_base = NULL;
1833        dma->tx_ring_base = NULL;
1834
1835        dma->open = 0;
[eb5a42f6]1836        return 0;
[0f49c0e]1837}
1838
[72ec13ef]1839unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
1840{
1841        struct grspw_dma_priv *dma = c;
1842        int rc = 0;
1843        unsigned int ctrl, ctrl_old;
1844        IRQFLAGS_TYPE irqflags;
1845
1846        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1847        if (dma->started == 0) {
1848                rc = 1; /* DMA stopped */
1849                goto out;
1850        }
1851        ctrl = REG_READ(&dma->regs->ctrl);
1852        ctrl_old = ctrl;
1853
1854        /* Read/Write DMA error ? */
1855        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
1856                rc = 2; /* DMA error */
1857                goto out;
1858        }
1859
1860        /* DMA has finished a TX/RX packet and user wants work-task to
1861         * take care of DMA table processing.
1862         */
1863        ctrl &= ~GRSPW_DMACTRL_AT;
1864
1865        if ((rxtx & 1) == 0)
1866                ctrl &= ~GRSPW_DMACTRL_PR;
1867        else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
1868                 (dma->cfg.flags & DMAFLAG2_RXIE)))
1869                ctrl |= GRSPW_DMACTRL_RI;
1870
1871        if ((rxtx & 2) == 0)
1872                ctrl &= ~GRSPW_DMACTRL_PS;
1873        else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
1874                 (dma->cfg.flags & DMAFLAG2_TXIE)))
1875                ctrl |= GRSPW_DMACTRL_TI;
1876
1877        REG_WRITE(&dma->regs->ctrl, ctrl);
1878        /* Re-enabled interrupts previously enabled */
1879        rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
1880out:
1881        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1882        return rc;
1883}
1884
[0f49c0e]1885/* Schedule List of packets for transmission at some point in
1886 * future.
1887 *
1888 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1889 * 2. Add the requested packets to the SEND List (USER->SEND)
1890 * 3. Schedule as many packets as possible (SEND->SCHED)
1891 */
1892int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1893{
1894        struct grspw_dma_priv *dma = c;
1895        int ret;
1896
1897        /* Take DMA channel lock */
[0d31dcc]1898        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1899            != RTEMS_SUCCESSFUL)
1900                return -1;
1901
1902        if (dma->started == 0) {
1903                ret = 1; /* signal DMA has been stopped */
1904                goto out;
1905        }
1906        ret = 0;
1907
1908        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1909        if ((opts & 1) == 0)
1910                grspw_tx_process_scheduled(dma);
1911
1912        /* 2. Add the requested packets to the SEND List (USER->SEND) */
[ef94150f]1913        if (pkts && (count > 0)) {
[0f49c0e]1914                grspw_list_append_list(&dma->send, pkts);
1915                dma->send_cnt += count;
1916                if (dma->stats.send_cnt_max < dma->send_cnt)
1917                        dma->stats.send_cnt_max = dma->send_cnt;
1918        }
1919
1920        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1921        if ((opts & 2) == 0)
1922                grspw_tx_schedule_send(dma);
1923
1924out:
1925        /* Unlock DMA channel */
[0d31dcc]1926        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1927
1928        return ret;
1929}
1930
1931int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1932{
1933        struct grspw_dma_priv *dma = c;
1934        struct grspw_pkt *pkt, *lastpkt;
1935        int cnt, started;
1936
1937        /* Take DMA channel lock */
[0d31dcc]1938        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1939            != RTEMS_SUCCESSFUL)
1940                return -1;
1941
1942        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1943        started = dma->started;
1944        if ((started > 0) && ((opts & 1) == 0))
1945                grspw_tx_process_scheduled(dma);
1946
1947        /* Move all/count SENT packet to the callers list (SENT->USER) */
1948        if (pkts) {
1949                if ((count == NULL) || (*count == -1) ||
1950                    (*count >= dma->sent_cnt)) {
1951                        /* Move all SENT Packets */
1952                        *pkts = dma->sent;
1953                        grspw_list_clr(&dma->sent);
1954                        if (count)
1955                                *count = dma->sent_cnt;
1956                        dma->sent_cnt = 0;
1957                } else {
1958                        /* Move a number of SENT Packets */
1959                        pkts->head = pkt = lastpkt = dma->sent.head;
1960                        cnt = 0;
1961                        while (cnt < *count) {
1962                                lastpkt = pkt;
1963                                pkt = pkt->next;
1964                                cnt++;
1965                        }
1966                        if (cnt > 0) {
1967                                pkts->tail = lastpkt;
1968                                grspw_list_remove_head_list(&dma->sent, pkts);
1969                                dma->sent_cnt -= cnt;
1970                        } else {
1971                                grspw_list_clr(pkts);
1972                        }
1973                }
1974        } else if (count) {
1975                *count = 0;
1976        }
1977
1978        /* 3. Schedule as many packets as possible (SEND->SCHED) */
[c442647f]1979        if ((started > 0) && ((opts & 2) == 0))
[0f49c0e]1980                grspw_tx_schedule_send(dma);
1981
1982        /* Unlock DMA channel */
[0d31dcc]1983        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1984
1985        return (~started) & 1; /* signal DMA has been stopped */
1986}
1987
[1ef9caa2]1988void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
[0f49c0e]1989{
1990        struct grspw_dma_priv *dma = c;
[1ef9caa2]1991        int sched_cnt, diff;
1992        unsigned int hwbd;
1993        struct grspw_txbd *tailbd;
1994
1995        /* Take device lock - Wait until we get semaphore.
1996         * The lock is taken so that the counters are in sync with each other
1997         * and that DMA descriptor table and tx_ring_tail is not being updated
1998         * during HW counter processing in this function.
1999         */
[0d31dcc]2000        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]2001            != RTEMS_SUCCESSFUL)
2002                return;
[0f49c0e]2003
2004        if (send)
2005                *send = dma->send_cnt;
[1ef9caa2]2006        sched_cnt = dma->tx_sched_cnt;
[0f49c0e]2007        if (sched)
[1ef9caa2]2008                *sched = sched_cnt;
[0f49c0e]2009        if (sent)
2010                *sent = dma->sent_cnt;
[1ef9caa2]2011        if (hw) {
2012                /* Calculate number of descriptors (processed by HW) between
2013                 * HW pointer and oldest SW pointer.
2014                 */
2015                hwbd = REG_READ(&dma->regs->txdesc);
2016                tailbd = dma->tx_ring_tail->bd;
2017                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
2018                        (GRSPW_TXBD_NR - 1);
2019                /* Handle special case when HW and SW pointers are equal
2020                 * because all TX descriptors have been processed by HW.
2021                 */
2022                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
2023                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
2024                        diff = GRSPW_TXBD_NR;
2025                }
2026                *hw = diff;
2027        }
2028
2029        /* Unlock DMA channel */
[0d31dcc]2030        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2031}
2032
2033static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
2034{
2035        int send_val, sent_val;
2036
2037        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
2038                send_val = 1;
2039        else
2040                send_val = 0;
2041
2042        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
2043                sent_val = 1;
2044        else
2045                sent_val = 0;
2046
2047        /* AND or OR ? */
2048        if (dma->tx_wait.op == 0)
2049                return send_val & sent_val; /* AND */
2050        else
2051                return send_val | sent_val; /* OR */
2052}
2053
2054/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
2055 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
2056 * is met.
2057 * If a link error occurs and the Stop on Link error is defined, this function
2058 * will also return to caller.
2059 */
2060int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
2061{
2062        struct grspw_dma_priv *dma = c;
[9cb7e5d]2063        int ret, rc, initialized = 0;
[0f49c0e]2064
2065        if (timeout == 0)
2066                timeout = RTEMS_NO_TIMEOUT;
2067
2068check_condition:
2069
2070        /* Take DMA channel lock */
[0d31dcc]2071        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2072            != RTEMS_SUCCESSFUL)
2073                return -1;
2074
2075        /* Check so that no other thread is waiting, this driver only supports
2076         * one waiter at a time.
2077         */
[9cb7e5d]2078        if (initialized == 0 && dma->tx_wait.waiting) {
2079                ret = 3;
2080                goto out_release;
[0f49c0e]2081        }
2082
[9cb7e5d]2083        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2084        if (dma->started == 0) {
2085                ret = 1;
[9cb7e5d]2086                goto out_release;
[0f49c0e]2087        }
2088
2089        /* Set up Condition */
2090        dma->tx_wait.send_cnt = send_cnt;
2091        dma->tx_wait.op = op;
2092        dma->tx_wait.sent_cnt = sent_cnt;
2093
2094        if (grspw_tx_wait_eval(dma) == 0) {
2095                /* Prepare Wait */
[9cb7e5d]2096                initialized = 1;
[0f49c0e]2097                dma->tx_wait.waiting = 1;
2098
2099                /* Release DMA channel lock */
[0d31dcc]2100                rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2101
2102                /* Try to take Wait lock, if this fail link may have gone down
2103                 * or user stopped this DMA channel
2104                 */
2105                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2106                                                timeout);
2107                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2108                        ret = 2;
2109                        goto out;
[0f49c0e]2110                } else if (rc == RTEMS_UNSATISFIED ||
2111                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2112                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2113                        goto out;
2114                } else if (rc != RTEMS_SUCCESSFUL) {
2115                        /* Unknown Error */
2116                        ret = -1;
2117                        goto out;
2118                } else if (dma->started == 0) {
2119                        ret = 1;
2120                        goto out;
2121                }
[0f49c0e]2122
2123                /* Check condition once more */
2124                goto check_condition;
2125        }
2126
2127        ret = 0;
[9cb7e5d]2128
2129out_release:
[0f49c0e]2130        /* Unlock DMA channel */
[0d31dcc]2131        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2132
[9cb7e5d]2133out:
2134        if (initialized)
2135                dma->tx_wait.waiting = 0;
[0f49c0e]2136        return ret;
2137}
2138
2139int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2140{
2141        struct grspw_dma_priv *dma = c;
2142        struct grspw_pkt *pkt, *lastpkt;
2143        int cnt, started;
2144
2145        /* Take DMA channel lock */
[0d31dcc]2146        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2147            != RTEMS_SUCCESSFUL)
2148                return -1;
2149
2150        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2151        started = dma->started;
2152        if (((opts & 1) == 0) && (started > 0))
2153                grspw_rx_process_scheduled(dma);
2154
2155        /* Move all RECV packet to the callers list */
2156        if (pkts) {
2157                if ((count == NULL) || (*count == -1) ||
2158                    (*count >= dma->recv_cnt)) {
2159                        /* Move all Received packets */
2160                        *pkts = dma->recv;
2161                        grspw_list_clr(&dma->recv);
2162                        if ( count )
2163                                *count = dma->recv_cnt;
2164                        dma->recv_cnt = 0;
2165                } else {
2166                        /* Move a number of RECV Packets */
2167                        pkts->head = pkt = lastpkt = dma->recv.head;
2168                        cnt = 0;
2169                        while (cnt < *count) {
2170                                lastpkt = pkt;
2171                                pkt = pkt->next;
2172                                cnt++;
2173                        }
2174                        if (cnt > 0) {
2175                                pkts->tail = lastpkt;
2176                                grspw_list_remove_head_list(&dma->recv, pkts);
2177                                dma->recv_cnt -= cnt;
2178                        } else {
2179                                grspw_list_clr(pkts);
2180                        }
2181                }
2182        } else if (count) {
2183                *count = 0;
2184        }
2185
2186        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2187        if (((opts & 2) == 0) && (started > 0))
2188                grspw_rx_schedule_ready(dma);
2189
2190        /* Unlock DMA channel */
[0d31dcc]2191        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2192
2193        return (~started) & 1;
2194}
2195
2196int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2197{
2198        struct grspw_dma_priv *dma = c;
2199        int ret;
2200
2201        /* Take DMA channel lock */
[0d31dcc]2202        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2203            != RTEMS_SUCCESSFUL)
2204                return -1;
2205
2206        if (dma->started == 0) {
2207                ret = 1;
2208                goto out;
2209        }
2210
2211        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2212        if ((opts & 1) == 0)
2213                grspw_rx_process_scheduled(dma);
2214
2215        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2216        if (pkts && (count > 0)) {
2217                grspw_list_append_list(&dma->ready, pkts);
2218                dma->ready_cnt += count;
2219                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2220                        dma->stats.ready_cnt_max = dma->ready_cnt;
2221        }
2222
2223        /* 3. Schedule as many packets as possible (READY->SCHED) */
2224        if ((opts & 2) == 0)
2225                grspw_rx_schedule_ready(dma);
2226
2227        ret = 0;
2228out:
2229        /* Unlock DMA channel */
[0d31dcc]2230        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2231
2232        return ret;
2233}
2234
[1ef9caa2]2235void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
[0f49c0e]2236{
2237        struct grspw_dma_priv *dma = c;
[1ef9caa2]2238        int sched_cnt, diff;
2239        unsigned int hwbd;
2240        struct grspw_rxbd *tailbd;
2241
2242        /* Take device lock - Wait until we get semaphore.
2243         * The lock is taken so that the counters are in sync with each other
2244         * and that DMA descriptor table and rx_ring_tail is not being updated
2245         * during HW counter processing in this function.
2246         */
[0d31dcc]2247        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]2248            != RTEMS_SUCCESSFUL)
2249                return;
[0f49c0e]2250
2251        if (ready)
2252                *ready = dma->ready_cnt;
[1ef9caa2]2253        sched_cnt = dma->rx_sched_cnt;
[0f49c0e]2254        if (sched)
[1ef9caa2]2255                *sched = sched_cnt;
[0f49c0e]2256        if (recv)
2257                *recv = dma->recv_cnt;
[1ef9caa2]2258        if (hw) {
2259                /* Calculate number of descriptors (processed by HW) between
2260                 * HW pointer and oldest SW pointer.
2261                 */
2262                hwbd = REG_READ(&dma->regs->rxdesc);
2263                tailbd = dma->rx_ring_tail->bd;
2264                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2265                        (GRSPW_RXBD_NR - 1);
2266                /* Handle special case when HW and SW pointers are equal
2267                 * because all RX descriptors have been processed by HW.
2268                 */
2269                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2270                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2271                        diff = GRSPW_RXBD_NR;
2272                }
2273                *hw = diff;
2274        }
2275
2276        /* Unlock DMA channel */
[0d31dcc]2277        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2278}
2279
2280static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2281{
2282        int ready_val, recv_val;
2283
2284        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2285                ready_val = 1;
2286        else
2287                ready_val = 0;
2288
2289        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2290                recv_val = 1;
2291        else
2292                recv_val = 0;
2293
2294        /* AND or OR ? */
2295        if (dma->rx_wait.op == 0)
2296                return ready_val & recv_val; /* AND */
2297        else
2298                return ready_val | recv_val; /* OR */
2299}
2300
2301/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2302 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2303 * condition is met.
2304 * If a link error occurs and the Stop on Link error is defined, this function
2305 * will also return to caller, however with an error.
2306 */
2307int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2308{
2309        struct grspw_dma_priv *dma = c;
[9cb7e5d]2310        int ret, rc, initialized = 0;
[0f49c0e]2311
2312        if (timeout == 0)
2313                timeout = RTEMS_NO_TIMEOUT;
2314
2315check_condition:
2316
2317        /* Take DMA channel lock */
[0d31dcc]2318        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2319            != RTEMS_SUCCESSFUL)
2320                return -1;
2321
2322        /* Check so that no other thread is waiting, this driver only supports
2323         * one waiter at a time.
2324         */
[9cb7e5d]2325        if (initialized == 0 && dma->rx_wait.waiting) {
2326                ret = 3;
2327                goto out_release;
[0f49c0e]2328        }
2329
[9cb7e5d]2330        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2331        if (dma->started == 0) {
2332                ret = 1;
[9cb7e5d]2333                goto out_release;
[0f49c0e]2334        }
2335
2336        /* Set up Condition */
2337        dma->rx_wait.recv_cnt = recv_cnt;
2338        dma->rx_wait.op = op;
2339        dma->rx_wait.ready_cnt = ready_cnt;
2340
2341        if (grspw_rx_wait_eval(dma) == 0) {
2342                /* Prepare Wait */
[9cb7e5d]2343                initialized = 1;
[0f49c0e]2344                dma->rx_wait.waiting = 1;
2345
2346                /* Release channel lock */
[0d31dcc]2347                rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2348
2349                /* Try to take Wait lock, if this fail link may have gone down
2350                 * or user stopped this DMA channel
2351                 */
2352                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2353                                           timeout);
2354                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2355                        ret = 2;
2356                        goto out;
[0f49c0e]2357                } else if (rc == RTEMS_UNSATISFIED ||
2358                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2359                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2360                        goto out;
2361                } else if (rc != RTEMS_SUCCESSFUL) {
2362                        /* Unknown Error */
2363                        ret = -1;
2364                        goto out;
2365                } else if (dma->started == 0) {
2366                        ret = 1;
2367                        goto out;
2368                }
[0f49c0e]2369
2370                /* Check condition once more */
2371                goto check_condition;
2372        }
[9cb7e5d]2373
[0f49c0e]2374        ret = 0;
2375
[9cb7e5d]2376out_release:
[0f49c0e]2377        /* Unlock DMA channel */
[0d31dcc]2378        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2379
[9cb7e5d]2380out:
2381        if (initialized)
2382                dma->rx_wait.waiting = 0;
[0f49c0e]2383        return ret;
2384}
2385
2386int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2387{
2388        struct grspw_dma_priv *dma = c;
2389
2390        if (dma->started || !cfg)
2391                return -1;
2392
[77856f6]2393        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
[0f49c0e]2394                return -1;
2395
2396        /* Update Configuration */
2397        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2398
2399        return 0;
2400}
2401
2402void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2403{
2404        struct grspw_dma_priv *dma = c;
2405
2406        /* Copy Current Configuration */
2407        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2408}
2409
2410void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2411{
2412        struct grspw_dma_priv *dma = c;
2413
2414        memcpy(sts, &dma->stats, sizeof(dma->stats));
2415}
2416
2417void grspw_dma_stats_clr(void *c)
2418{
2419        struct grspw_dma_priv *dma = c;
2420
2421        /* Clear most of the statistics */     
2422        memset(&dma->stats, 0, sizeof(dma->stats));
2423
2424        /* Init proper default values so that comparisons will work the
2425         * first time.
2426         */
2427        dma->stats.send_cnt_min = 0x3fffffff;
2428        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2429        dma->stats.ready_cnt_min = 0x3fffffff;
2430        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2431}
2432
2433int grspw_dma_start(void *c)
2434{
2435        struct grspw_dma_priv *dma = c;
2436        struct grspw_dma_regs *dregs = dma->regs;
2437        unsigned int ctrl;
[6ecad1d]2438        IRQFLAGS_TYPE irqflags;
[0f49c0e]2439
2440        if (dma->started)
2441                return 0;
2442
2443        /* Initialize Software Structures:
2444         *  - Clear all Queues
2445         *  - init BD ring
2446         *  - init IRQ counter
2447         *  - clear statistics counters
2448         *  - init wait structures and semaphores
2449         */
2450        grspw_dma_reset(dma);
2451
2452        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2453         * with SpaceWire Packet buffers. So we do not have to worry about
2454         * IRQs for this channel just yet. However other DMA channels
2455         * may be active.
2456         *
2457         * Some functionality that is not changed during started mode is set up
2458         * once and for all here:
2459         *
2460         *   - RX MAX Packet length
2461         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2462         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2463         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2464         *   - Strip PID
2465         *   - Strip Address
2466         *   - No Spill
2467         *   - Receiver Enable
2468         *   - disable on link error (LE)
2469         *
2470         * Note that the address register and the address enable bit in DMACTRL
2471         * register must be left untouched, they are configured on a GRSPW
2472         * core level.
2473         *
2474         * Note that the receiver is enabled here, but since descriptors are
2475         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2476         * descriptors are enabled or it may ignore RX packets (NS=0) until
2477         * descriptors are enabled (writing RD bit).
2478         */
2479        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2480        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2481
2482        /* MAX Packet length */
2483        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2484
2485        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2486                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2487                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
[49cf776e]2488        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
[0f49c0e]2489                ctrl |= GRSPW_DMACTRL_LE;
[77856f6]2490        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
[0f49c0e]2491                ctrl |= GRSPW_DMACTRL_RI;
[77856f6]2492        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
[0f49c0e]2493                ctrl |= GRSPW_DMACTRL_TI;
[6ecad1d]2494        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2495        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
[0f49c0e]2496        REG_WRITE(&dregs->ctrl, ctrl);
[6ecad1d]2497        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
[0f49c0e]2498
2499        dma->started = 1; /* open up other DMA interfaces */
2500
2501        return 0;
2502}
2503
2504STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2505{
2506        IRQFLAGS_TYPE irqflags;
2507
2508        if (dma->started == 0)
2509                return;
2510        dma->started = 0;
2511
2512        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2513        grspw_hw_dma_stop(dma);
2514        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2515
2516        /* From here no more packets will be sent, however
2517         * there may still exist scheduled packets that has been
2518         * sent, and packets in the SEND Queue waiting for free
2519         * descriptors. All packets are moved to the SENT Queue
2520         * so that the user may get its buffers back, the user
2521         * must look at the TXPKT_FLAG_TX in order to determine
2522         * if the packet was sent or not.
2523         */
2524
2525        /* Retreive scheduled all sent packets */
2526        grspw_tx_process_scheduled(dma);
2527
2528        /* Move un-sent packets in SEND and SCHED queue to the
2529         * SENT Queue. (never marked sent)
2530         */
2531        if (!grspw_list_is_empty(&dma->tx_sched)) {
2532                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2533                grspw_list_clr(&dma->tx_sched);
2534                dma->sent_cnt += dma->tx_sched_cnt;
2535                dma->tx_sched_cnt = 0;
2536        }
2537        if (!grspw_list_is_empty(&dma->send)) {
2538                grspw_list_append_list(&dma->sent, &dma->send);
2539                grspw_list_clr(&dma->send);
2540                dma->sent_cnt += dma->send_cnt;
2541                dma->send_cnt = 0;
2542        }
2543
2544        /* Similar for RX */
2545        grspw_rx_process_scheduled(dma);
2546        if (!grspw_list_is_empty(&dma->rx_sched)) {
2547                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2548                grspw_list_clr(&dma->rx_sched);
2549                dma->recv_cnt += dma->rx_sched_cnt;
2550                dma->rx_sched_cnt = 0;
2551        }
2552        if (!grspw_list_is_empty(&dma->ready)) {
2553                grspw_list_append_list(&dma->recv, &dma->ready);
2554                grspw_list_clr(&dma->ready);
2555                dma->recv_cnt += dma->ready_cnt;
2556                dma->ready_cnt = 0;
2557        }
2558
2559        /* Throw out blocked threads */
2560        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2561        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2562}
2563
2564void grspw_dma_stop(void *c)
2565{
2566        struct grspw_dma_priv *dma = c;
2567
[eb5a42f6]2568        /* If DMA channel is closed we should not access the semaphore */
2569        if (!dma->open)
2570                return;
2571
[0f49c0e]2572        /* Take DMA Channel lock */
[0d31dcc]2573        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2574            != RTEMS_SUCCESSFUL)
2575                return;
[0d31dcc]2576        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2577            != RTEMS_SUCCESSFUL) {
2578                rtems_semaphore_release(dma->sem_rxdma);
2579                return;
2580        }
[0f49c0e]2581
2582        grspw_dma_stop_locked(dma);
2583
[0d31dcc]2584        rtems_semaphore_release(dma->sem_txdma);
2585        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2586}
2587
2588/* Do general work, invoked indirectly from ISR */
2589static void grspw_work_shutdown_func(struct grspw_priv *priv)
2590{
2591        int i;
2592
2593        /* Link is down for some reason, and the user has configured
[9cb7e5d]2594         * that we stop all (open) DMA channels and throw out all their
2595         * blocked threads.
[0f49c0e]2596         */
2597        for (i=0; i<priv->hwsup.ndma_chans; i++)
2598                grspw_dma_stop(&priv->dma[i]);
2599        grspw_hw_stop(priv);
2600}
2601
2602/* Do DMA work on one channel, invoked indirectly from ISR */
[ab9b447]2603static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
[0f49c0e]2604{
[72ec13ef]2605        int tx_cond_true, rx_cond_true, rxtx;
[0f49c0e]2606
[eb5a42f6]2607        /* If DMA channel is closed we should not access the semaphore */
2608        if (dma->open == 0)
2609                return;
2610
[0f49c0e]2611        dma->stats.irq_cnt++;
2612
2613        /* Look at cause we were woken up and clear source */
[72ec13ef]2614        rxtx = 0;
2615        if (msg & WORK_DMA_RX_MASK)
2616                rxtx |= 1;
2617        if (msg & WORK_DMA_TX_MASK)
2618                rxtx |= 2;
2619        switch (grspw_dma_enable_int(dma, rxtx, 0)) {
2620        case 1:
2621                /* DMA stopped */
[0d31dcc]2622                return;
[72ec13ef]2623        case 2:
[0f49c0e]2624                /* DMA error -> Stop DMA channel (both RX and TX) */
[ab9b447]2625                if (msg & WORK_DMA_ER_MASK) {
2626                        /* DMA error and user wants work-task to handle error */
2627                        grspw_dma_stop(dma);
2628                        grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
2629                }
[72ec13ef]2630                return;
2631        default:
2632                break;
2633        }
[94fb377b]2634        if (msg == 0)
2635                return;
[72ec13ef]2636
2637        rx_cond_true = 0;
2638        tx_cond_true = 0;
2639
[94fb377b]2640        if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
2641                /* In case both interrupt sources are disabled simultaneously
2642                 * by the ISR the re-enabling of the interrupt source must also
2643                 * do so to avoid missing interrupts. Both RX and TX process
2644                 * will be forced.
2645                 */
2646                msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
2647        }
2648
[72ec13ef]2649        if (msg & WORK_DMA_RX_MASK) {
2650                /* Do RX Work */
2651
2652                /* Take DMA channel RX lock */
2653                if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2654                    != RTEMS_SUCCESSFUL)
2655                        return;
2656
2657                dma->stats.rx_work_cnt++;
2658                grspw_rx_process_scheduled(dma);
2659                if (dma->started) {
2660                        dma->stats.rx_work_enabled +=
2661                                grspw_rx_schedule_ready(dma);
2662                        /* Check to see if condition for waking blocked
2663                         * USER task is fullfilled.
2664                         */
2665                        if (dma->rx_wait.waiting)
2666                                rx_cond_true = grspw_rx_wait_eval(dma);
[0f49c0e]2667                }
[72ec13ef]2668                rtems_semaphore_release(dma->sem_rxdma);
2669        }
2670
2671        if (msg & WORK_DMA_TX_MASK) {
2672                /* Do TX Work */
2673
2674                /* Take DMA channel TX lock */
2675                if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2676                    != RTEMS_SUCCESSFUL)
2677                        return;
2678
2679                dma->stats.tx_work_cnt++;
2680                grspw_tx_process_scheduled(dma);
2681                if (dma->started) {
2682                        dma->stats.tx_work_enabled +=
2683                                grspw_tx_schedule_send(dma);
2684                        /* Check to see if condition for waking blocked
2685                         * USER task is fullfilled.
2686                         */
2687                        if (dma->tx_wait.waiting)
2688                                tx_cond_true = grspw_tx_wait_eval(dma);
[0f49c0e]2689                }
[72ec13ef]2690                rtems_semaphore_release(dma->sem_txdma);
2691        }
[0f49c0e]2692
2693        if (rx_cond_true)
2694                rtems_semaphore_release(dma->rx_wait.sem_wait);
2695
2696        if (tx_cond_true)
2697                rtems_semaphore_release(dma->tx_wait.sem_wait);
2698}
2699
2700/* Work task is receiving work for the work message queue posted from
2701 * the ISR.
2702 */
[ab9b447]2703void grspw_work_func(rtems_id msgQ)
[0f49c0e]2704{
[ab9b447]2705        unsigned int message = 0, msg;
[0f49c0e]2706        size_t size;
2707        struct grspw_priv *priv;
2708        int i;
2709
[ab9b447]2710        /* Wait for ISR to schedule work */
2711        while (rtems_message_queue_receive(msgQ, &message, &size,
2712               RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
2713                if (message & WORK_QUIT_TASK)
[0f49c0e]2714                        break;
2715
2716                /* Handle work */
2717                priv = priv_tab[message >> WORK_CORE_BIT];
[ab9b447]2718                if (message & WORK_SHUTDOWN) {
[0f49c0e]2719                        grspw_work_shutdown_func(priv);
[ab9b447]2720                               
2721                        grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
2722                } else if (message & WORK_DMA_MASK) {
2723                        for (i = 0; i < priv->hwsup.ndma_chans; i++) {
2724                                msg = message &
2725                                      (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
2726                                if (msg)
2727                                        grspw_work_dma_func(&priv->dma[i], msg);
[0f49c0e]2728                        }
2729                }
[ab9b447]2730                message = 0;
[0f49c0e]2731        }
[ab9b447]2732
2733        if (message & WORK_FREE_MSGQ)
2734                rtems_message_queue_delete(msgQ);
2735
2736        grspw_work_event(WORKTASK_EV_QUIT, message);
[0f49c0e]2737        rtems_task_delete(RTEMS_SELF);
2738}
2739
2740STATIC void grspw_isr(void *data)
2741{
2742        struct grspw_priv *priv = data;
[ab9b447]2743        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
[56fc7809]2744        unsigned int rxirq, rxack, intto;
[ab9b447]2745        int i, handled = 0, call_user_int_isr;
[94fb377b]2746        unsigned int message = WORK_NONE, dma_en;
[0f49c0e]2747#ifdef RTEMS_HAS_SMP
2748        IRQFLAGS_TYPE irqflags;
2749#endif
2750
2751        /* Get Status from Hardware */
2752        stat = REG_READ(&priv->regs->status);
[a7cc0da9]2753        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2754                        (GRSPW_STS_TO | priv->stscfg);
[0f49c0e]2755
2756        /* Make sure to put the timecode handling first in order to get the
2757         * smallest possible interrupt latency
2758         */
2759        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
[56fc7809]2760                ctrl = REG_READ(&priv->regs->ctrl);
2761                if (ctrl & GRSPW_CTRL_TQ) {
2762                        /* Timecode received. Let custom function handle this */
2763                        timecode = REG_READ(&priv->regs->time) &
2764                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2765                        (priv->tcisr)(priv->tcisr_arg, timecode);
2766                }
2767        }
2768
2769        /* Get Interrupt status from hardware */
2770        icctrl = REG_READ(&priv->regs->icctrl);
2771        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2772                call_user_int_isr = 0;
2773                rxirq = rxack = intto = 0;
2774
2775                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2776                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2777                        call_user_int_isr = 1;
2778
2779                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2780                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2781                        call_user_int_isr = 1;
2782
2783                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2784                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2785                        call_user_int_isr = 1;                 
2786
2787                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2788                 * user function is called even if no such IRQ has happened!
2789                 * User must make sure to clear all interrupts that have been
2790                 * handled from the three registers by writing a one.
2791                 */
2792                if (call_user_int_isr)
2793                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
[0f49c0e]2794        }
2795
2796        /* An Error occured? */
2797        if (stat & GRSPW_STAT_ERROR) {
2798                /* Wake Global WorkQ */
2799                handled = 1;
2800
2801                if (stat & GRSPW_STS_EE)
2802                        priv->stats.err_eeop++;
2803
2804                if (stat & GRSPW_STS_IA)
2805                        priv->stats.err_addr++;
2806
2807                if (stat & GRSPW_STS_PE)
2808                        priv->stats.err_parity++;
2809
[ac7da5bc]2810                if (stat & GRSPW_STS_DE)
2811                        priv->stats.err_disconnect++;
2812
[0f49c0e]2813                if (stat & GRSPW_STS_ER)
2814                        priv->stats.err_escape++;
2815
2816                if (stat & GRSPW_STS_CE)
2817                        priv->stats.err_credit++;
2818
2819                if (stat & GRSPW_STS_WE)
2820                        priv->stats.err_wsync++;
2821
[090016a]2822                if (((priv->dis_link_on_err >> 16) & stat) &&
2823                    (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
[0f49c0e]2824                        /* Disable the link, no more transfers are expected
2825                         * on any DMA channel.
2826                         */
2827                        SPIN_LOCK(&priv->devlock, irqflags);
2828                        ctrl = REG_READ(&priv->regs->ctrl);
2829                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2830                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2831                        SPIN_UNLOCK(&priv->devlock, irqflags);
2832                        /* Signal to work-thread to stop DMA and clean up */
2833                        message = WORK_SHUTDOWN;
2834                }
2835        }
2836
2837        /* Clear Status Flags */
2838        if (stat_clrmsk) {
2839                handled = 1;
2840                REG_WRITE(&priv->regs->status, stat_clrmsk);
2841        }
2842
2843        /* A DMA transfer or Error occured? In that case disable more IRQs
2844         * from the DMA channel, then invoke the workQ.
2845         *
2846         * Also the GI interrupt flag may not be available for older
2847         * designs where (was added together with mutiple DMA channels).
2848         */
2849        SPIN_LOCK(&priv->devlock, irqflags);
2850        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2851                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2852                /* Check for Errors and if Packets been sent or received if
2853                 * respective IRQ are enabled
2854                 */
[ab9b447]2855                irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2856                        | GRSPW_DMA_STATUS_ERROR) & dma_stat;
2857                if (!irqs)
2858                        continue;
2859
2860                handled = 1;
2861
2862                /* DMA error has priority, if error happens it is assumed that
2863                 * the common work-queue stops the DMA operation for that
2864                 * channel and makes the DMA tasks exit from their waiting
2865                 * functions (both RX and TX tasks).
[94fb377b]2866                 *
2867                 * Disable Further IRQs (until enabled again)
2868                 * from this DMA channel. Let the status
2869                 * bit remain so that they can be handled by
2870                 * work function.
[ab9b447]2871                 */
2872                if (irqs & GRSPW_DMA_STATUS_ERROR) {
[94fb377b]2873                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2874                                ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2875                                  GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2876                                  GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2877                                  GRSPW_DMACTRL_AT));
[ab9b447]2878                        message |= WORK_DMA_ER(i);
2879                } else {
[94fb377b]2880                        /* determine if RX/TX interrupt source(s) shall remain
2881                         * enabled.
2882                         */
2883                        if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
2884                                dma_en = ~irqs >> 3;
2885                        } else {
2886                                dma_en = priv->dma[i].cfg.flags >>
2887                                 (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
2888                        }
2889                        dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
2890                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2891                                (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2892                                   GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2893                                   GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2894                                   GRSPW_DMACTRL_AT) | dma_en));
[ab9b447]2895                        message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
[0f49c0e]2896                }
2897        }
2898        SPIN_UNLOCK(&priv->devlock, irqflags);
2899
2900        if (handled != 0)
2901                priv->stats.irq_cnt++;
2902
2903        /* Schedule work by sending message to work thread */
[ab9b447]2904        if (message != WORK_NONE && priv->wc.msgisr) {
2905                int status;
[0f49c0e]2906                message |= WORK_CORE(priv->index);
[ab9b447]2907                /* func interface compatible with msgQSend() on purpose, but
2908                 * at the same time the user can assign a custom function to
2909                 * handle DMA RX/TX operations as indicated by the "message"
2910                 * and clear the handled bits before given to msgQSend().
2911                 */
2912                status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
2913                if (status != RTEMS_SUCCESSFUL) {
[0f49c0e]2914                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
[ab9b447]2915                                priv->index, status, message);
2916                }
[0f49c0e]2917        }
2918}
2919
2920STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2921{
2922        unsigned int ctrl;
2923        struct grspw_dma_regs *dregs = dma->regs;
2924
2925        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2926               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2927        ctrl |= GRSPW_DMACTRL_AT;
2928        REG_WRITE(&dregs->ctrl, ctrl);
2929}
2930
2931STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2932{
2933        unsigned int ctrl;
2934        struct grspw_dma_regs *dregs = dma->regs;
2935
2936        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2937        REG_WRITE(&dregs->ctrl, ctrl);
2938
2939        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2940        REG_WRITE(&dregs->txdesc, 0);
2941        REG_WRITE(&dregs->rxdesc, 0);
2942}
2943
2944/* Hardware Action:
2945 *  - stop DMA
2946 *  - do not bring down the link (RMAP may be active)
2947 *  - RMAP settings untouched (RMAP may be active)
2948 *  - port select untouched (RMAP may be active)
2949 *  - timecodes are disabled
2950 *  - IRQ generation disabled
2951 *  - status not cleared (let user analyze it if requested later on)
2952 *  - Node address / First DMA channels Node address
2953 *    is untouched (RMAP may be active)
2954 */
2955STATIC void grspw_hw_stop(struct grspw_priv *priv)
2956{
2957        int i;
2958        unsigned int ctrl;
2959        IRQFLAGS_TYPE irqflags;
2960
2961        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2962
2963        for (i=0; i<priv->hwsup.ndma_chans; i++)
2964                grspw_hw_dma_stop(&priv->dma[i]);
2965
2966        ctrl = REG_READ(&priv->regs->ctrl);
2967        REG_WRITE(&priv->regs->ctrl, ctrl & (
2968                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2969                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2970                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2971
2972        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2973}
2974
2975/* Soft reset of GRSPW core registers */
2976STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2977{
2978        int i;
[56fc7809]2979        unsigned int tmp;
[0f49c0e]2980
2981        for (i=0; i<priv->hwsup.ndma_chans; i++)
2982                grspw_hw_dma_softreset(&priv->dma[i]);
2983
2984        REG_WRITE(&priv->regs->status, 0xffffffff);
2985        REG_WRITE(&priv->regs->time, 0);
[56fc7809]2986        /* Clear all but valuable reset values of ICCTRL */
2987        tmp = REG_READ(&priv->regs->icctrl);
2988        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2989        tmp |= GRSPW_ICCTRL_ID;
2990        REG_WRITE(&priv->regs->icctrl, tmp);
2991        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2992        REG_WRITE(&priv->regs->icack, 0xffffffff);
2993        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
[0f49c0e]2994}
2995
2996int grspw_dev_count(void)
2997{
2998        return grspw_count;
2999}
3000
3001void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
3002{
3003        int i;
3004        struct grspw_priv *priv;
3005
3006        /* Set new Device Found Handler */
3007        grspw_dev_add = devfound;
3008        grspw_dev_del = devremove;
3009
3010        if (grspw_initialized == 1 && grspw_dev_add) {
3011                /* Call callback for every previously found device */
3012                for (i=0; i<grspw_count; i++) {
3013                        priv = priv_tab[i];
3014                        if (priv)
3015                                priv->data = grspw_dev_add(i);
3016                }
3017        }
3018}
3019
[1b559e3]3020/* Get a value at least 6.4us in number of clock cycles */
3021static unsigned int grspw1_calc_timer64(int freq_khz)
3022{
3023        unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
3024        return timer64 & 0xfff;
3025}
3026
3027/* Get a value at least 850ns in number of clock cycles - 3 */
3028static unsigned int grspw1_calc_discon(int freq_khz)
3029{
3030        unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
3031        return discon & 0x3ff;
3032}
3033
[0f49c0e]3034/******************* Driver manager interface ***********************/
3035
3036/* Driver prototypes */
3037static int grspw_common_init(void);
3038static int grspw2_init3(struct drvmgr_dev *dev);
3039
3040static struct drvmgr_drv_ops grspw2_ops =
3041{
3042        .init = {NULL,  NULL, grspw2_init3, NULL},
3043        .remove = NULL,
3044        .info = NULL
3045};
3046
3047static struct amba_dev_id grspw2_ids[] =
3048{
3049        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
3050        {VENDOR_GAISLER, GAISLER_SPW2},
3051        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
3052        {0, 0}          /* Mark end of table */
3053};
3054
3055static struct amba_drv_info grspw2_drv_info =
3056{
3057        {
3058                DRVMGR_OBJ_DRV,                 /* Driver */
3059                NULL,                           /* Next driver */
3060                NULL,                           /* Device list */
3061                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
3062                "GRSPW_PKT_DRV",                /* Driver Name */
3063                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
3064                &grspw2_ops,
3065                NULL,                           /* Funcs */
3066                0,                              /* No devices yet */
3067                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
3068        },
3069        &grspw2_ids[0]
3070};
3071
3072void grspw2_register_drv (void)
3073{
3074        GRSPW_DBG("Registering GRSPW2 packet driver\n");
3075        drvmgr_drv_register(&grspw2_drv_info.general);
3076}
3077
3078static int grspw2_init3(struct drvmgr_dev *dev)
3079{
3080        struct grspw_priv *priv;
3081        struct amba_dev_info *ambadev;
3082        struct ambapp_core *pnpinfo;
3083        int i, size;
[56fc7809]3084        unsigned int ctrl, icctrl, numi;
[0f49c0e]3085        union drvmgr_key_value *value;
3086
3087        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
3088                dev->parent->dev->name);
3089
3090        if (grspw_count > GRSPW_MAX)
3091                return DRVMGR_ENORES;
3092
3093        priv = dev->priv;
3094        if (priv == NULL)
3095                return DRVMGR_NOMEM;
3096        priv->dev = dev;
3097
3098        /* If first device init common part of driver */
3099        if (grspw_common_init())
3100                return DRVMGR_FAIL;
3101
3102        /*** Now we take care of device initialization ***/
3103
3104        /* Get device information from AMBA PnP information */
3105        ambadev = (struct amba_dev_info *)dev->businfo;
3106        if (ambadev == NULL)
3107                return -1;
3108        pnpinfo = &ambadev->info;
3109        priv->irq = pnpinfo->irq;
3110        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
3111
3112        /* Read Hardware Support from Control Register */
3113        ctrl = REG_READ(&priv->regs->ctrl);
3114        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
3115        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
3116        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
3117        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
3118        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
[56fc7809]3119        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
3120        icctrl = REG_READ(&priv->regs->icctrl);
3121        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
3122        if (numi > 0)
3123                priv->hwsup.irq_num = 1 << (numi - 1);
3124        else
3125                priv->hwsup.irq_num = 0;
[0f49c0e]3126
3127        /* Construct hardware version identification */
3128        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
3129
3130        if ((pnpinfo->device == GAISLER_SPW2) ||
3131            (pnpinfo->device == GAISLER_SPW2_DMA)) {
3132                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3133                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3134        } else {
[1b559e3]3135                unsigned int apb_hz, apb_khz;
3136
[0f49c0e]3137                /* Autodetect GRSPW1 features? */
3138                priv->hwsup.strip_adr = 0;
3139                priv->hwsup.strip_pid = 0;
[1b559e3]3140
3141                drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
3142                apb_khz = apb_hz / 1000;
3143
3144                REG_WRITE(&priv->regs->timer,
3145                        ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
3146                        (grspw1_calc_timer64(apb_khz) & 0xFFF));
[0f49c0e]3147        }
3148
[56fc7809]3149        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3150         * width... so only the first is probed, if no timer result will be
3151         * zero.
3152         */
3153        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3154        ctrl = REG_READ(&priv->regs->icrlpresc);
3155        REG_WRITE(&priv->regs->icrlpresc, 0);
3156        priv->hwsup.itmr_width = 0;
3157        while (ctrl & 1) {
3158                priv->hwsup.itmr_width++;
3159                ctrl = ctrl >> 1;
3160        }
3161
[0f49c0e]3162        /* Let user limit the number of DMA channels on this core to save
3163         * space. Only the first nDMA channels will be available.
3164         */
[4d3e70f4]3165        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
[0f49c0e]3166        if (value && (value->i < priv->hwsup.ndma_chans))
3167                priv->hwsup.ndma_chans = value->i;
3168
3169        /* Allocate and init Memory for all DMA channels */
3170        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
3171        priv->dma = (struct grspw_dma_priv *) malloc(size);
3172        if (priv->dma == NULL)
3173                return DRVMGR_NOMEM;
3174        memset(priv->dma, 0, size);
3175        for (i=0; i<priv->hwsup.ndma_chans; i++) {
3176                priv->dma[i].core = priv;
3177                priv->dma[i].index = i;
3178                priv->dma[i].regs = &priv->regs->dma[i];
3179        }
3180
3181        /* Startup Action:
3182         *  - stop DMA
3183         *  - do not bring down the link (RMAP may be active)
3184         *  - RMAP settings untouched (RMAP may be active)
3185         *  - port select untouched (RMAP may be active)
3186         *  - timecodes are diabled
3187         *  - IRQ generation disabled
3188         *  - status cleared
3189         *  - Node address / First DMA channels Node address
3190         *    is untouched (RMAP may be active)
3191         */
3192        grspw_hw_stop(priv);
3193        grspw_hw_softreset(priv);
3194
3195        /* Register character device in registered region */
3196        priv->index = grspw_count;
3197        priv_tab[priv->index] = priv;
3198        grspw_count++;
3199
3200        /* Device name */
3201        sprintf(priv->devname, "grspw%d", priv->index);
3202
3203        /* Tell above layer about new device */
3204        if (grspw_dev_add)
3205                priv->data = grspw_dev_add(priv->index);
3206
3207        return DRVMGR_OK;
3208}
3209
3210/******************* Driver Implementation ***********************/
[ab9b447]3211/* Creates a MsgQ (optional) and spawns a worker task associated with the
3212 * message Q. The task can also be associated with a custom msgQ if *msgQ.
3213 * is non-zero.
3214 */
3215rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
3216{
3217        rtems_id tid;
3218        int created_msgq = 0;
[8acfa94]3219        static char work_name = 'A';
[ab9b447]3220
3221        if (pMsgQ == NULL)
3222                return OBJECTS_ID_NONE;
3223
3224        if (*pMsgQ == OBJECTS_ID_NONE) {
3225                if (msgMax <= 0)
3226                        msgMax = 32;
3227
3228                if (rtems_message_queue_create(
[8acfa94]3229                        rtems_build_name('S', 'G', 'Q', work_name),
[ab9b447]3230                        msgMax, 4, RTEMS_FIFO, pMsgQ) !=
3231                        RTEMS_SUCCESSFUL)
3232                        return OBJECTS_ID_NONE;
3233                created_msgq = 1;
3234        }
3235
3236        if (prio < 0)
3237                prio = grspw_work_task_priority; /* default prio */
3238        if (stack < 0x800)
3239                stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
3240
[8acfa94]3241        if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
[ab9b447]3242                prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
3243                RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
3244                tid = OBJECTS_ID_NONE;
3245        else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
3246                    RTEMS_SUCCESSFUL) {
3247                rtems_task_delete(tid);
3248                tid = OBJECTS_ID_NONE;
3249        }
3250
3251        if (tid == OBJECTS_ID_NONE && created_msgq) {
3252                rtems_message_queue_delete(*pMsgQ);
3253                *pMsgQ = OBJECTS_ID_NONE;
[8acfa94]3254        } else {
3255                if (++work_name > 'Z')
3256                        work_name = 'A';
[ab9b447]3257        }
3258        return tid;
3259}
3260
3261/* Free task associated with message queue and optionally also the message
3262 * queue itself. The message queue is deleted by the work task and is therefore
3263 * delayed until it the work task resumes its execution.
3264 */
3265rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
3266{
3267        int msg = WORK_QUIT_TASK;
3268        if (freeMsgQ)
3269                msg |= WORK_FREE_MSGQ;
3270        return rtems_message_queue_send(msgQ, &msg, 4);
3271}
3272
3273void grspw_work_cfg(void *d, struct grspw_work_config *wc)
3274{
3275        struct grspw_priv *priv = (struct grspw_priv *)d;
3276
3277        if (wc == NULL)
3278                wc = &grspw_wc_def; /* use default config */
3279        priv->wc = *wc;
3280}
[0f49c0e]3281
3282static int grspw_common_init(void)
3283{
3284        if (grspw_initialized == 1)
3285                return 0;
3286        if (grspw_initialized == -1)
3287                return -1;
3288        grspw_initialized = -1;
3289
3290        /* Device Semaphore created with count = 1 */
3291        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3292            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3293            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3294            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3295                return -1;
3296
3297        /* Work queue, Work thread. Not created if user disables it.
3298         * user can disable it when interrupt is not used to save resources
3299         */
3300        if (grspw_work_task_priority != -1) {
[ab9b447]3301                grspw_work_task = grspw_work_spawn(-1, 0,
3302                        (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
3303                if (grspw_work_task == OBJECTS_ID_NONE)
3304                        return -2;
3305                grspw_wc_def.msgisr =
3306                        (grspw_msgqisr_t) rtems_message_queue_send;
3307        } else {
3308                grspw_wc_def.msgisr = NULL;
3309                grspw_wc_def.msgisr_arg = NULL;
3310        }
[0f49c0e]3311
3312        grspw_initialized = 1;
3313        return 0;
3314}
Note: See TracBrowser for help on using the repository browser.