source: rtems/bsps/sparc/shared/spw/grspw_pkt.c @ 408fad3

5
Last change on this file since 408fad3 was 408fad3, checked in by Daniel Hellstrom <daniel@…>, on 06/20/18 at 07:23:31

leon,grspw_pkt: remove incorrect comment on SMP not being tested

To clarify, SMP with GRSWP_PKT driver API has been extended to
take advantage of multi-core, is now SMP-safe, and has been tested
on GR740.

  • Property mode set to 100644
File size: 91.0 KB
Line 
1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
29#include <bsp/grspw_pkt.h>
30
31/* Use interrupt lock privmitives compatible with SMP defined in
32 * RTEMS 4.11.99 and higher.
33 */
34#if (((__RTEMS_MAJOR__ << 16) | (__RTEMS_MINOR__ << 8) | __RTEMS_REVISION__) >= 0x040b63)
35
36#include <rtems/score/isrlock.h> /* spin-lock */
37
38/* map via ISR lock: */
39#define SPIN_DECLARE(lock) ISR_LOCK_MEMBER(lock)
40#define SPIN_INIT(lock, name) _ISR_lock_Initialize(lock, name)
41#define SPIN_LOCK(lock, level) _ISR_lock_Acquire_inline(lock, &level)
42#define SPIN_LOCK_IRQ(lock, level) _ISR_lock_ISR_disable_and_acquire(lock, &level)
43#define SPIN_UNLOCK(lock, level) _ISR_lock_Release_inline(lock, &level)
44#define SPIN_UNLOCK_IRQ(lock, level) _ISR_lock_Release_and_ISR_enable(lock, &level)
45#define SPIN_IRQFLAGS(k) ISR_lock_Context k
46#define SPIN_ISR_IRQFLAGS(k) SPIN_IRQFLAGS(k)
47
48#else
49
50/* maintain single-core compatibility with older versions of RTEMS: */
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock, name)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define SPIN_IRQFLAGS(k) rtems_interrupt_level k
58#define SPIN_ISR_IRQFLAGS(k)
59
60#ifdef RTEMS_SMP
61#error SMP mode not compatible with these interrupt lock primitives
62#endif
63
64#endif
65
66/*#define STATIC*/
67#define STATIC static
68
69/*#define GRSPW_DBG(args...) printk(args)*/
70#define GRSPW_DBG(args...)
71
72struct grspw_dma_regs {
73        volatile unsigned int ctrl;     /* DMA Channel Control */
74        volatile unsigned int rxmax;    /* RX Max Packet Length */
75        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
76        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
77        volatile unsigned int addr;     /* Address Register */
78        volatile unsigned int resv[3];
79};
80
81struct grspw_regs {
82        volatile unsigned int ctrl;
83        volatile unsigned int status;
84        volatile unsigned int nodeaddr;
85        volatile unsigned int clkdiv;
86        volatile unsigned int destkey;
87        volatile unsigned int time;
88        volatile unsigned int timer;    /* Used only in GRSPW1 */
89        volatile unsigned int resv1;
90
91        /* DMA Registers, ctrl.NCH determines number of ports,
92         * up to 4 channels are supported
93         */
94        struct grspw_dma_regs dma[4];
95
96        volatile unsigned int icctrl;
97        volatile unsigned int icrx;
98        volatile unsigned int icack;
99        volatile unsigned int ictimeout;
100        volatile unsigned int ictickomask;
101        volatile unsigned int icaamask;
102        volatile unsigned int icrlpresc;
103        volatile unsigned int icrlisr;
104        volatile unsigned int icrlintack;
105        volatile unsigned int resv2;
106        volatile unsigned int icisr;
107        volatile unsigned int resv3;
108};
109
110/* GRSPW - Control Register - 0x00 */
111#define GRSPW_CTRL_RA_BIT       31
112#define GRSPW_CTRL_RX_BIT       30
113#define GRSPW_CTRL_RC_BIT       29
114#define GRSPW_CTRL_NCH_BIT      27
115#define GRSPW_CTRL_PO_BIT       26
116#define GRSPW_CTRL_ID_BIT       24
117#define GRSPW_CTRL_LE_BIT       22
118#define GRSPW_CTRL_PS_BIT       21
119#define GRSPW_CTRL_NP_BIT       20
120#define GRSPW_CTRL_RD_BIT       17
121#define GRSPW_CTRL_RE_BIT       16
122#define GRSPW_CTRL_TF_BIT       12
123#define GRSPW_CTRL_TR_BIT       11
124#define GRSPW_CTRL_TT_BIT       10
125#define GRSPW_CTRL_LI_BIT       9
126#define GRSPW_CTRL_TQ_BIT       8
127#define GRSPW_CTRL_RS_BIT       6
128#define GRSPW_CTRL_PM_BIT       5
129#define GRSPW_CTRL_TI_BIT       4
130#define GRSPW_CTRL_IE_BIT       3
131#define GRSPW_CTRL_AS_BIT       2
132#define GRSPW_CTRL_LS_BIT       1
133#define GRSPW_CTRL_LD_BIT       0
134
135#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
136#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
137#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
138#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
139#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
140#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
141#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
142#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
143#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
144#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
145#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
146#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
147#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
148#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
149#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
150#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
151#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
152#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
153#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
154#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
155#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
156#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
157#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
158
159#define GRSPW_CTRL_IRQSRC_MASK \
160        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
161#define GRSPW_ICCTRL_IRQSRC_MASK \
162        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
163
164
165/* GRSPW - Status Register - 0x04 */
166#define GRSPW_STS_LS_BIT        21
167#define GRSPW_STS_AP_BIT        9
168#define GRSPW_STS_EE_BIT        8
169#define GRSPW_STS_IA_BIT        7
170#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
171#define GRSPW_STS_PE_BIT        4
172#define GRSPW_STS_DE_BIT        3
173#define GRSPW_STS_ER_BIT        2
174#define GRSPW_STS_CE_BIT        1
175#define GRSPW_STS_TO_BIT        0
176
177#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
178#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
179#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
180#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
181#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
182#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
183#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
184#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
185#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
186#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
187
188/* GRSPW - Default Address Register - 0x08 */
189#define GRSPW_DEF_ADDR_BIT      0
190#define GRSPW_DEF_MASK_BIT      8
191#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
192#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
193
194/* GRSPW - Clock Divisor Register - 0x0C */
195#define GRSPW_CLKDIV_START_BIT  8
196#define GRSPW_CLKDIV_RUN_BIT    0
197#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
198#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
199#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
200
201/* GRSPW - Destination key Register - 0x10 */
202#define GRSPW_DK_DESTKEY_BIT    0
203#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
204
205/* GRSPW - Time Register - 0x14 */
206#define GRSPW_TIME_CTRL_BIT     6
207#define GRSPW_TIME_CNT_BIT      0
208#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
209#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
210
211/* GRSPW - DMA Control Register - 0x20*N */
212#define GRSPW_DMACTRL_LE_BIT    16
213#define GRSPW_DMACTRL_SP_BIT    15
214#define GRSPW_DMACTRL_SA_BIT    14
215#define GRSPW_DMACTRL_EN_BIT    13
216#define GRSPW_DMACTRL_NS_BIT    12
217#define GRSPW_DMACTRL_RD_BIT    11
218#define GRSPW_DMACTRL_RX_BIT    10
219#define GRSPW_DMACTRL_AT_BIT    9
220#define GRSPW_DMACTRL_RA_BIT    8
221#define GRSPW_DMACTRL_TA_BIT    7
222#define GRSPW_DMACTRL_PR_BIT    6
223#define GRSPW_DMACTRL_PS_BIT    5
224#define GRSPW_DMACTRL_AI_BIT    4
225#define GRSPW_DMACTRL_RI_BIT    3
226#define GRSPW_DMACTRL_TI_BIT    2
227#define GRSPW_DMACTRL_RE_BIT    1
228#define GRSPW_DMACTRL_TE_BIT    0
229
230#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
231#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
232#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
233#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
234#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
235#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
236#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
237#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
238#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
239#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
240#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
241#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
242#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
243#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
244#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
245#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
246#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
247
248/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
249#define GRSPW_DMARXLEN_MAX_BIT  0
250#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
251
252/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
253#define GRSPW_DMAADR_ADDR_BIT   0
254#define GRSPW_DMAADR_MASK_BIT   8
255#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
256#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
257
258/* GRSPW - Interrupt code receive register - 0xa4 */
259#define GRSPW_ICCTRL_INUM_BIT   27
260#define GRSPW_ICCTRL_IA_BIT     24
261#define GRSPW_ICCTRL_LE_BIT     23
262#define GRSPW_ICCTRL_PR_BIT     22
263#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
264#define GRSPW_ICCTRL_TQ_BIT     20
265#define GRSPW_ICCTRL_AQ_BIT     19
266#define GRSPW_ICCTRL_IQ_BIT     18
267#define GRSPW_ICCTRL_IR_BIT     17
268#define GRSPW_ICCTRL_IT_BIT     16
269#define GRSPW_ICCTRL_NUMI_BIT   13
270#define GRSPW_ICCTRL_BIRQ_BIT   8
271#define GRSPW_ICCTRL_ID_BIT     7
272#define GRSPW_ICCTRL_II_BIT     6
273#define GRSPW_ICCTRL_TXIRQ_BIT  0
274#define GRSPW_ICCTRL_INUM       (0x1f << GRSPW_ICCTRL_INUM_BIT)
275#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
276#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
277#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
278#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
279#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
280#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
281#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
282#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
283#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
284#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
285#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
286#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
287#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
288#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
289
290/* RX Buffer Descriptor */
291struct grspw_rxbd {
292   volatile unsigned int ctrl;
293   volatile unsigned int addr;
294};
295
296/* TX Buffer Descriptor */
297struct grspw_txbd {
298   volatile unsigned int ctrl;
299   volatile unsigned int haddr;
300   volatile unsigned int dlen;
301   volatile unsigned int daddr;
302};
303
304/* GRSPW - DMA RXBD Ctrl */
305#define GRSPW_RXBD_LEN_BIT 0
306#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
307#define GRSPW_RXBD_EN   (1<<25)
308#define GRSPW_RXBD_WR   (1<<26)
309#define GRSPW_RXBD_IE   (1<<27)
310#define GRSPW_RXBD_EP   (1<<28)
311#define GRSPW_RXBD_HC   (1<<29)
312#define GRSPW_RXBD_DC   (1<<30)
313#define GRSPW_RXBD_TR   (1<<31)
314
315#define GRSPW_TXBD_HLEN (0xff<<0)
316#define GRSPW_TXBD_NCL  (0xf<<8)
317#define GRSPW_TXBD_EN   (1<<12)
318#define GRSPW_TXBD_WR   (1<<13)
319#define GRSPW_TXBD_IE   (1<<14)
320#define GRSPW_TXBD_LE   (1<<15)
321#define GRSPW_TXBD_HC   (1<<16)
322#define GRSPW_TXBD_DC   (1<<17)
323
324#define GRSPW_DMAADR_MASK_BIT   8
325#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
326#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
327
328
329/* GRSPW Error Condition */
330#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
331#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
332/* GRSPW Link configuration options */
333#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
334#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
335
336/* Software Defaults */
337#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
338
339/* GRSPW Constants */
340#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
341#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
342#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
343#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
344#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
345#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
346
347/* Memory and HW Registers Access routines. All 32-bit access routines */
348#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
349/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
350#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
351#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
352#define REG_READ(addr) (*(volatile unsigned int *)(addr))
353
354struct grspw_ring {
355        struct grspw_ring *next;        /* Next Descriptor */
356        union {
357                struct grspw_txbd *tx;  /* Descriptor Address */
358                struct grspw_rxbd *rx;  /* Descriptor Address */
359        } bd;
360        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
361};
362
363/* An entry in the TX descriptor Ring */
364struct grspw_txring {
365        struct grspw_txring *next;      /* Next Descriptor */
366        struct grspw_txbd *bd;          /* Descriptor Address */
367        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
368};
369
370/* An entry in the RX descriptor Ring */
371struct grspw_rxring {
372        struct grspw_rxring *next;      /* Next Descriptor */
373        struct grspw_rxbd *bd;          /* Descriptor Address */
374        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
375};
376
377
378struct grspw_dma_priv {
379        struct grspw_priv *core;        /* GRSPW Core */
380        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
381        int index;                      /* DMA Channel Index @ GRSPW core */
382        int open;                       /* DMA Channel opened by user */
383        int started;                    /* DMA Channel activity (start|stop) */
384        rtems_id sem_rxdma;             /* DMA Channel RX Semaphore */
385        rtems_id sem_txdma;             /* DMA Channel TX Semaphore */
386        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
387        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
388
389        /*** RX ***/
390
391        /* RX Descriptor Ring */
392        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
393        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
394        struct grspw_rxring *rx_ring_base;
395        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
396        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
397        int rx_irq_en_cnt_curr;
398        struct {
399                int waiting;
400                int ready_cnt;
401                int op;
402                int recv_cnt;
403                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
404        } rx_wait;
405
406        /* Queue of Packets READY to be scheduled */
407        struct grspw_list ready;
408        int ready_cnt;
409
410        /* Scheduled RX Packets Queue */
411        struct grspw_list rx_sched;
412        int rx_sched_cnt;
413
414        /* Queue of Packets that has been RECIEVED */
415        struct grspw_list recv;
416        int recv_cnt;
417
418
419        /*** TX ***/
420
421        /* TX Descriptor Ring */
422        struct grspw_txbd *tx_bds;              /* Descriptor Address */
423        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
424        struct grspw_txring *tx_ring_base;
425        struct grspw_txring *tx_ring_head;
426        struct grspw_txring *tx_ring_tail;
427        int tx_irq_en_cnt_curr;
428        struct {
429                int waiting;
430                int send_cnt;
431                int op;
432                int sent_cnt;
433                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
434        } tx_wait;
435
436        /* Queue of Packets ready to be scheduled for transmission */
437        struct grspw_list send;
438        int send_cnt;
439
440        /* Scheduled TX Packets Queue */
441        struct grspw_list tx_sched;
442        int tx_sched_cnt;
443
444        /* Queue of Packets that has been SENT */
445        struct grspw_list sent;
446        int sent_cnt;
447};
448
449struct grspw_priv {
450        char devname[8];                /* Device name "grspw%d" */
451        struct drvmgr_dev *dev;         /* Device */
452        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
453        int irq;                        /* AMBA IRQ number of core */
454        int index;                      /* Index in order it was probed */
455        int core_index;                 /* Core Bus Index */
456        int open;                       /* If Device is alrady opened (=1) or not (=0) */
457        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
458
459        /* Features supported by Hardware */
460        struct grspw_hw_sup hwsup;
461
462        /* Pointer to an array of Maximally 4 DMA Channels */
463        struct grspw_dma_priv *dma;
464
465        /* Spin-lock ISR protection */
466        SPIN_DECLARE(devlock);
467
468        /* Descriptor Memory Area for TX & RX and all DMA channels */
469        unsigned int bd_mem;
470        unsigned int bd_mem_alloced;
471
472        /*** Time Code Handling ***/
473        void (*tcisr)(void *data, int timecode);
474        void *tcisr_arg;
475
476        /*** Interrupt-code Handling ***/
477        spwpkt_ic_isr_t icisr;
478        void *icisr_arg;
479
480        /* Bit mask representing events which shall cause link disable. */
481        unsigned int dis_link_on_err;
482
483        /* Bit mask for link status bits to clear by ISR */
484        unsigned int stscfg;
485
486        /*** Message Queue Handling ***/
487        struct grspw_work_config wc;
488
489        /* "Core Global" Statistics gathered, not dependent on DMA channel */
490        struct grspw_core_stats stats;
491};
492
493int grspw_initialized = 0;
494int grspw_count = 0;
495rtems_id grspw_sem;
496static struct grspw_priv *priv_tab[GRSPW_MAX];
497
498/* callback to upper layer when devices are discovered/removed */
499void *(*grspw_dev_add)(int) = NULL;
500void (*grspw_dev_del)(int,void*) = NULL;
501
502/* Defaults to do nothing - user can override this function.
503 * Called from work-task.
504 */
505void __attribute__((weak)) grspw_work_event(
506        enum grspw_worktask_ev ev,
507        unsigned int msg)
508{
509
510}
511
512/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
513 * the work-task and work-queue to save space.
514 */
515int grspw_work_task_priority __attribute__((weak)) = 100;
516rtems_id grspw_work_task;
517static struct grspw_work_config grspw_wc_def;
518
519STATIC void grspw_hw_stop(struct grspw_priv *priv);
520STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
521STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
522STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
523STATIC void grspw_isr(void *data);
524
525void *grspw_open(int dev_no)
526{
527        struct grspw_priv *priv;
528        unsigned int bdtabsize, hwa;
529        int i;
530        union drvmgr_key_value *value;
531
532        if (grspw_initialized != 1 || (dev_no >= grspw_count))
533                return NULL;
534
535        priv = priv_tab[dev_no];
536
537        /* Take GRSPW lock - Wait until we get semaphore */
538        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
539            != RTEMS_SUCCESSFUL)
540                return NULL;
541
542        if (priv->open) {
543                priv = NULL;
544                goto out;
545        }
546
547        /* Initialize Spin-lock for GRSPW Device. This is to protect
548         * CTRL and DMACTRL registers from ISR.
549         */
550        SPIN_INIT(&priv->devlock, priv->devname);
551
552        priv->tcisr = NULL;
553        priv->tcisr_arg = NULL;
554        priv->icisr = NULL;
555        priv->icisr_arg = NULL;
556        priv->stscfg = LINKSTS_MASK;
557
558        /* Default to common work queue and message queue, if not created
559         * during initialization then its disabled.
560         */
561        grspw_work_cfg(priv, &grspw_wc_def);
562
563        grspw_stats_clr(priv);
564
565        /* Allocate TX & RX Descriptor memory area for all DMA
566         * channels. Max-size descriptor area is allocated (or user assigned):
567         *  - 128 RX descriptors per DMA Channel
568         *  - 64 TX descriptors per DMA Channel
569         * Specified address must be in CPU RAM.
570         */
571        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
572        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
573        if (value) {
574                priv->bd_mem = value->i;
575                priv->bd_mem_alloced = 0;
576                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
577                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
578                                  priv->index);
579                        priv = NULL;
580                        goto out;
581                }
582        } else {
583                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
584                if (priv->bd_mem_alloced == 0) {
585                        priv = NULL;
586                        goto out;
587                }
588                /* Align memory */
589                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
590                               ~(BDTAB_ALIGN-1);
591        }
592
593        /* Translate into DMA address that HW can use to access DMA
594         * descriptors
595         */
596        drvmgr_translate_check(
597                priv->dev,
598                CPUMEM_TO_DMA,
599                (void *)priv->bd_mem,
600                (void **)&hwa,
601                bdtabsize);
602
603        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
604                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
605        for (i=0; i<priv->hwsup.ndma_chans; i++) {
606                /* Do DMA Channel Init, other variables etc. are inited
607                 * when respective DMA channel is opened.
608                 *
609                 * index & core are initialized by probe function.
610                 */
611                priv->dma[i].open = 0;
612                priv->dma[i].rx_bds = (struct grspw_rxbd *)
613                        (priv->bd_mem + i*BDTAB_SIZE*2);
614                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
615                        (hwa + BDTAB_SIZE*(2*i));
616                priv->dma[i].tx_bds = (struct grspw_txbd *)
617                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
618                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
619                        (hwa + BDTAB_SIZE*(2*i+1));
620                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
621                        i,
622                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
623                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
624                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
625                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
626        }
627
628        /* Basic initialization of hardware, clear some registers but
629         * keep Link/RMAP/Node-Address registers intact.
630         */
631        grspw_hw_stop(priv);
632
633        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
634        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
635
636        /* Take the device */
637        priv->open = 1;
638out:
639        rtems_semaphore_release(grspw_sem);
640        return priv;
641}
642
643int grspw_close(void *d)
644{
645        struct grspw_priv *priv = d;
646        int i;
647
648        /* Take GRSPW lock - Wait until we get semaphore */
649        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
650            != RTEMS_SUCCESSFUL)
651                return -1;
652
653        /* Check that user has stopped and closed all DMA channels
654         * appropriately. At this point the Hardware shall not be doing DMA
655         * or generating Interrupts. We want HW in a "startup-state".
656         */
657        for (i=0; i<priv->hwsup.ndma_chans; i++) {
658                if (priv->dma[i].open) {
659                        rtems_semaphore_release(grspw_sem);
660                        return 1;
661                }
662        }
663        grspw_hw_stop(priv);
664
665        /* Uninstall Interrupt handler */
666        drvmgr_interrupt_unregister(priv->dev, 0, grspw_isr, priv);
667
668        /* Free descriptor table memory if allocated using malloc() */
669        if (priv->bd_mem_alloced) {
670                free((void *)priv->bd_mem_alloced);
671                priv->bd_mem_alloced = 0;
672        }
673
674        /* Mark not open */
675        priv->open = 0;
676        rtems_semaphore_release(grspw_sem);
677        return 0;
678}
679
680void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
681{
682        struct grspw_priv *priv = d;
683
684        *hw = priv->hwsup;
685}
686
687void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
688{
689        struct grspw_priv *priv = d;
690        struct grspw_regs *regs = priv->regs;
691        unsigned int ctrl, nodeaddr;
692        SPIN_IRQFLAGS(irqflags);
693        int i;
694
695        if (!priv || !cfg)
696                return;
697
698        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
699
700        if (cfg->promiscuous != -1) {
701                /* Set Configuration */
702                ctrl = REG_READ(&regs->ctrl);
703                if (cfg->promiscuous)
704                        ctrl |= GRSPW_CTRL_PM;
705                else
706                        ctrl &= ~GRSPW_CTRL_PM;
707                REG_WRITE(&regs->ctrl, ctrl);
708                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
709
710                for (i=0; i<priv->hwsup.ndma_chans; i++) {
711                        ctrl = REG_READ(&regs->dma[i].ctrl);
712                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
713                        if (cfg->dma_nacfg[i].node_en) {
714                                ctrl |= GRSPW_DMACTRL_EN;
715                                REG_WRITE(&regs->dma[i].addr,
716                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
717                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
718                        } else {
719                                ctrl &= ~GRSPW_DMACTRL_EN;
720                        }
721                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
722                }
723        }
724
725        /* Read Current Configuration */
726        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
727        nodeaddr = REG_READ(&regs->nodeaddr);
728        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
729        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
730        for (i=0; i<priv->hwsup.ndma_chans; i++) {
731                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
732                                                GRSPW_DMACTRL_EN;
733                ctrl = REG_READ(&regs->dma[i].addr);
734                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
735                                                GRSPW_DMAADR_ADDR_BIT;
736                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
737                                                GRSPW_DMAADR_MASK_BIT;
738        }
739        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
740        for (; i<4; i++) {
741                cfg->dma_nacfg[i].node_en = 0;
742                cfg->dma_nacfg[i].node_addr = 0;
743                cfg->dma_nacfg[i].node_mask = 0;
744        }
745}
746
747/* Return Current DMA CTRL/Status Register */
748unsigned int grspw_dma_ctrlsts(void *c)
749{
750        struct grspw_dma_priv *dma = c;
751
752        return REG_READ(&dma->regs->ctrl);
753}
754
755/* Return Current Status Register */
756unsigned int grspw_link_status(void *d)
757{
758        struct grspw_priv *priv = d;
759
760        return REG_READ(&priv->regs->status);
761}
762
763/* Clear Status Register bits */
764void grspw_link_status_clr(void *d, unsigned int mask)
765{
766        struct grspw_priv *priv = d;
767
768        REG_WRITE(&priv->regs->status, mask);
769}
770
771/* Return Current Link State */
772spw_link_state_t grspw_link_state(void *d)
773{
774        struct grspw_priv *priv = d;
775        unsigned int status = REG_READ(&priv->regs->status);
776
777        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
778}
779
780/* Enable Global IRQ only if some irq source is set */
781static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
782{
783        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
784                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
785}
786
787
788/* options and clkdiv [in/out]: set to -1 to only read current config */
789void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
790{
791        struct grspw_priv *priv = d;
792        struct grspw_regs *regs = priv->regs;
793        unsigned int ctrl;
794        SPIN_IRQFLAGS(irqflags);
795
796        /* Write? */
797        if (clkdiv) {
798                if (*clkdiv != -1)
799                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
800                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
801        }
802        if (options) {
803                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
804                ctrl = REG_READ(&regs->ctrl);
805                if (*options != -1) {
806                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
807                                (*options & GRSPW_LINK_CFG);
808
809                        /* Enable Global IRQ only if some irq source is set */
810                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
811                                ctrl |= GRSPW_CTRL_IE;
812                        else
813                                ctrl &= ~GRSPW_CTRL_IE;
814
815                        REG_WRITE(&regs->ctrl, ctrl);
816                        /* Store the link disable events for use in
817                        ISR. The LINKOPTS_DIS_ON_* options are actually the
818                        corresponding bits in the status register, shifted
819                        by 16. */
820                        priv->dis_link_on_err = *options &
821                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
822                }
823                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
824                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
825        }
826        if (stscfg) {
827                if (*stscfg != -1) {
828                        priv->stscfg = *stscfg & LINKSTS_MASK;
829                }
830                *stscfg = priv->stscfg;
831        }
832}
833
834/* Generate Tick-In (increment Time Counter, Send Time Code) */
835void grspw_tc_tx(void *d)
836{
837        struct grspw_priv *priv = d;
838        struct grspw_regs *regs = priv->regs;
839        SPIN_IRQFLAGS(irqflags);
840
841        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
842        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
843        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
844}
845
846void grspw_tc_ctrl(void *d, int *options)
847{
848        struct grspw_priv *priv = d;
849        struct grspw_regs *regs = priv->regs;
850        unsigned int ctrl;
851        SPIN_IRQFLAGS(irqflags);
852
853        if (options == NULL)
854                return;
855
856        /* Write? */
857        if (*options != -1) {
858                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
859                ctrl = REG_READ(&regs->ctrl);
860                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
861                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
862
863                /* Enable Global IRQ only if some irq source is set */
864                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
865                        ctrl |= GRSPW_CTRL_IE;
866                else
867                        ctrl &= ~GRSPW_CTRL_IE;
868
869                REG_WRITE(&regs->ctrl, ctrl);
870                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
871        } else
872                ctrl = REG_READ(&regs->ctrl);
873        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
874}
875
876/* Assign ISR Function to TimeCode RX IRQ */
877void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
878{
879        struct grspw_priv *priv = d;
880
881        priv->tcisr_arg = data;
882        priv->tcisr = tcisr;
883}
884
885/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
886 * TCTRL   = bits 7 and 6
887 * TIMECNT = bits 5 to 0
888 */
889void grspw_tc_time(void *d, int *time)
890{
891        struct grspw_priv *priv = d;
892        struct grspw_regs *regs = priv->regs;
893
894        if (time == NULL)
895                return;
896        if (*time != -1)
897                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
898        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
899}
900
901/* Generate Tick-In for the given Interrupt-code and check for generation
902 * error.
903 *
904 * Returns zero on success and non-zero on failure
905 */
906int grspw_ic_tickin(void *d, int ic)
907{
908        struct grspw_priv *priv = d;
909        struct grspw_regs *regs = priv->regs;
910        SPIN_IRQFLAGS(irqflags);
911        unsigned int icctrl, mask;
912
913        /* Prepare before turning off IRQ */
914        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
915        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
916             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
917
918        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
919        icctrl = REG_READ(&regs->icctrl);
920        icctrl &= ~mask;
921        icctrl |= ic;
922        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
923        /* the ID bit is valid after two clocks, so we not to wait here */
924        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
925        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
926
927        return icctrl & GRSPW_ICCTRL_ID;
928}
929
930#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
931#define ICOPTS_ICCTRL_MASK                                              \
932        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
933         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
934         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
935         ICOPTS_BASEIRQ)
936
937/* Control Interrupt-code settings of core
938 * Write if not pointing to -1, always read current value
939 *
940 * TODO: A lot of code duplication with grspw_tc_ctrl
941 */
942void grspw_ic_ctrl(void *d, unsigned int *options)
943{
944        struct grspw_priv *priv = d;
945        struct grspw_regs *regs = priv->regs;
946        unsigned int ctrl;
947        unsigned int icctrl;
948        SPIN_IRQFLAGS(irqflags);
949
950        if (options == NULL)
951                return;
952
953        if (*options != -1) {
954                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
955
956                ctrl = REG_READ(&regs->ctrl);
957                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
958                                         * irqopts bits and ctrl bits */
959                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
960                        (GRSPW_CTRL_TF_BIT - 0);
961
962                icctrl = REG_READ(&regs->icctrl);
963                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
964                                                * irqopts bits and icctrl bits */
965                icctrl |= *options & ICOPTS_ICCTRL_MASK;
966
967                /* Enable Global IRQ only if some irq source is set */
968                if (grspw_is_irqsource_set(ctrl, icctrl))
969                        ctrl |= GRSPW_CTRL_IE;
970                else
971                        ctrl &= ~GRSPW_CTRL_IE;
972
973                REG_WRITE(&regs->ctrl, ctrl);
974                REG_WRITE(&regs->icctrl, icctrl);
975                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
976        }
977        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
978                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
979}
980
981void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
982{
983        struct grspw_priv *priv = d;
984        struct grspw_regs *regs = priv->regs;
985
986        if (!cfg)
987                return;
988
989        if (rw & 1) {
990                REG_WRITE(&regs->ictickomask, cfg->tomask);
991                REG_WRITE(&regs->icaamask, cfg->aamask);
992                REG_WRITE(&regs->icrlpresc, cfg->scaler);
993                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
994                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
995        }
996        if (rw & 2) {
997                cfg->tomask = REG_READ(&regs->ictickomask);
998                cfg->aamask = REG_READ(&regs->icaamask);
999                cfg->scaler = REG_READ(&regs->icrlpresc);
1000                cfg->isr_reload = REG_READ(&regs->icrlisr);
1001                cfg->ack_reload = REG_READ(&regs->icrlintack);
1002        }
1003}
1004
1005/* Read or Write Interrupt-code status registers */
1006void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
1007{
1008        struct grspw_priv *priv = d;
1009        struct grspw_regs *regs = priv->regs;
1010
1011        /* No locking needed since the status bits are clear-on-write */
1012
1013        if (rxirq) {
1014                if (*rxirq != 0)
1015                        REG_WRITE(&regs->icrx, *rxirq);
1016                else
1017                        *rxirq = REG_READ(&regs->icrx);
1018        }
1019
1020        if (rxack) {
1021                if (*rxack != 0)
1022                        REG_WRITE(&regs->icack, *rxack);
1023                else
1024                        *rxack = REG_READ(&regs->icack);
1025        }
1026
1027        if (intto) {
1028                if (*intto != 0)
1029                        REG_WRITE(&regs->ictimeout, *intto);
1030                else
1031                        *intto = REG_READ(&regs->ictimeout);
1032        }
1033}
1034
1035/* Assign handler function to Interrupt-code tick out IRQ */
1036void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1037{
1038        struct grspw_priv *priv = d;
1039
1040        priv->icisr_arg = data;
1041        priv->icisr = handler;
1042}
1043
1044/* Set (not -1) and/or read RMAP options. */
1045int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1046{
1047        struct grspw_priv *priv = d;
1048        struct grspw_regs *regs = priv->regs;
1049        unsigned int ctrl;
1050        SPIN_IRQFLAGS(irqflags);
1051
1052        if (dstkey) {
1053                if (*dstkey != -1)
1054                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1055                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1056        }
1057        if (options) {
1058                if (*options != -1) {
1059                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1060                                return -1;
1061
1062
1063                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1064                        ctrl = REG_READ(&regs->ctrl);
1065                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1066                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1067                        REG_WRITE(&regs->ctrl, ctrl);
1068                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1069                }
1070                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1071        }
1072
1073        return 0;
1074}
1075
1076void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1077{
1078        struct grspw_priv *priv = d;
1079
1080        if (rmap)
1081                *rmap = priv->hwsup.rmap;
1082        if (rmap_crc)
1083                *rmap_crc = priv->hwsup.rmap_crc;
1084}
1085
1086/* Select port, if
1087 * -1=The current selected port is returned
1088 * 0=Port 0
1089 * 1=Port 1
1090 * Others=Both Port0 and Port1
1091 */
1092int grspw_port_ctrl(void *d, int *port)
1093{
1094        struct grspw_priv *priv = d;
1095        struct grspw_regs *regs = priv->regs;
1096        unsigned int ctrl;
1097        SPIN_IRQFLAGS(irqflags);
1098
1099        if (port == NULL)
1100                return -1;
1101
1102        if ((*port == 1) || (*port == 0)) {
1103                /* Select port user selected */
1104                if ((*port == 1) && (priv->hwsup.nports < 2))
1105                        return -1; /* Changing to Port 1, but only one port available */
1106                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1107                ctrl = REG_READ(&regs->ctrl);
1108                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1109                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1110                REG_WRITE(&regs->ctrl, ctrl);
1111                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1112        } else if (*port > 1) {
1113                /* Select both ports */
1114                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1115                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1116                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1117        }
1118
1119        /* Get current settings */
1120        ctrl = REG_READ(&regs->ctrl);
1121        if (ctrl & GRSPW_CTRL_NP) {
1122                /* Any port, selected by hardware */
1123                if (priv->hwsup.nports > 1)
1124                        *port = 3;
1125                else
1126                        *port = 0; /* Port0 the only port available */
1127        } else {
1128                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1129        }
1130
1131        return 0;
1132}
1133
1134/* Returns Number ports available in hardware */
1135int grspw_port_count(void *d)
1136{
1137        struct grspw_priv *priv = d;
1138
1139        return priv->hwsup.nports;
1140}
1141
1142/* Current active port: 0 or 1 */
1143int grspw_port_active(void *d)
1144{
1145        struct grspw_priv *priv = d;
1146        unsigned int status;
1147
1148        status = REG_READ(&priv->regs->status);
1149
1150        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1151}
1152
1153void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1154{
1155        struct grspw_priv *priv = d;
1156
1157        if (sts == NULL)
1158                return;
1159        memcpy(sts, &priv->stats, sizeof(priv->stats));
1160}
1161
1162void grspw_stats_clr(void *d)
1163{
1164        struct grspw_priv *priv = d;
1165
1166        /* Clear most of the statistics */     
1167        memset(&priv->stats, 0, sizeof(priv->stats));
1168}
1169
1170/*** DMA Interface ***/
1171
1172/* Initialize the RX and TX Descriptor Ring, empty of packets */
1173STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1174{
1175        struct grspw_ring *r;
1176        int i;
1177
1178        /* Empty BD rings */
1179        dma->rx_ring_head = dma->rx_ring_base;
1180        dma->rx_ring_tail = dma->rx_ring_base;
1181        dma->tx_ring_head = dma->tx_ring_base;
1182        dma->tx_ring_tail = dma->tx_ring_base;
1183
1184        /* Init RX Descriptors */
1185        r = (struct grspw_ring *)dma->rx_ring_base;
1186        for (i=0; i<GRSPW_RXBD_NR; i++) {
1187
1188                /* Init Ring Entry */
1189                r[i].next = &r[i+1];
1190                r[i].bd.rx = &dma->rx_bds[i];
1191                r[i].pkt = NULL;
1192
1193                /* Init HW Descriptor */
1194                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1195                BD_WRITE(&r[i].bd.rx->addr, 0);
1196        }
1197        r[GRSPW_RXBD_NR-1].next = &r[0];
1198
1199        /* Init TX Descriptors */
1200        r = (struct grspw_ring *)dma->tx_ring_base;
1201        for (i=0; i<GRSPW_TXBD_NR; i++) {
1202
1203                /* Init Ring Entry */
1204                r[i].next = &r[i+1];
1205                r[i].bd.tx = &dma->tx_bds[i];
1206                r[i].pkt = NULL;
1207
1208                /* Init HW Descriptor */
1209                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1210                BD_WRITE(&r[i].bd.tx->haddr, 0);
1211                BD_WRITE(&r[i].bd.tx->dlen, 0);
1212                BD_WRITE(&r[i].bd.tx->daddr, 0);
1213        }
1214        r[GRSPW_TXBD_NR-1].next = &r[0];
1215}
1216
1217/* Try to populate descriptor ring with as many as possible READY unused packet
1218 * buffers. The packets assigned with to a descriptor are put in the end of
1219 * the scheduled list.
1220 *
1221 * The number of Packets scheduled is returned.
1222 *
1223 *  - READY List -> RX-SCHED List
1224 *  - Descriptors are initialized and enabled for reception
1225 */
1226STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1227{
1228        int cnt;
1229        unsigned int ctrl, dmactrl;
1230        void *hwaddr;
1231        struct grspw_rxring *curr_bd;
1232        struct grspw_pkt *curr_pkt, *last_pkt;
1233        struct grspw_list lst;
1234        SPIN_IRQFLAGS(irqflags);
1235
1236        /* Is Ready Q empty? */
1237        if (grspw_list_is_empty(&dma->ready))
1238                return 0;
1239
1240        cnt = 0;
1241        lst.head = curr_pkt = dma->ready.head;
1242        curr_bd = dma->rx_ring_head;
1243        while (!curr_bd->pkt) {
1244
1245                /* Assign Packet to descriptor */
1246                curr_bd->pkt = curr_pkt;
1247
1248                /* Prepare descriptor address. */
1249                hwaddr = curr_pkt->data;
1250                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1251                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1252                                         hwaddr, &hwaddr);
1253                        if (curr_pkt->data == hwaddr) /* translation needed? */
1254                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1255                }
1256                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1257
1258                ctrl = GRSPW_RXBD_EN;
1259                if (curr_bd->next == dma->rx_ring_base) {
1260                        /* Wrap around (only needed when smaller descriptor
1261                         * table)
1262                         */
1263                        ctrl |= GRSPW_RXBD_WR;
1264                }
1265
1266                /* Is this Packet going to be an interrupt Packet? */
1267                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1268                        if (dma->cfg.rx_irq_en_cnt == 0) {
1269                                /* IRQ is disabled. A big number to avoid
1270                                 * equal to zero too often
1271                                 */
1272                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1273                        } else {
1274                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1275                                ctrl |= GRSPW_RXBD_IE;
1276                        }
1277                }
1278
1279                if (curr_pkt->flags & RXPKT_FLAG_IE)
1280                        ctrl |= GRSPW_RXBD_IE;
1281
1282                /* Enable descriptor */
1283                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1284
1285                last_pkt = curr_pkt;
1286                curr_bd = curr_bd->next;
1287                cnt++;
1288
1289                /* Get Next Packet from Ready Queue */
1290                if (curr_pkt == dma->ready.tail) {
1291                        /* Handled all in ready queue. */
1292                        curr_pkt = NULL;
1293                        break;
1294                }
1295                curr_pkt = curr_pkt->next;
1296        }
1297
1298        /* Has Packets been scheduled? */
1299        if (cnt > 0) {
1300                /* Prepare list for insertion/deleation */
1301                lst.tail = last_pkt;
1302
1303                /* Remove scheduled packets from ready queue */
1304                grspw_list_remove_head_list(&dma->ready, &lst);
1305                dma->ready_cnt -= cnt;
1306                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1307                        dma->stats.ready_cnt_min = dma->ready_cnt;
1308
1309                /* Insert scheduled packets into scheduled queue */
1310                grspw_list_append_list(&dma->rx_sched, &lst);
1311                dma->rx_sched_cnt += cnt;
1312                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1313                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1314
1315                /* Update TX ring posistion */
1316                dma->rx_ring_head = curr_bd;
1317
1318                /* Make hardware aware of the newly enabled descriptors
1319                 * We must protect from ISR which writes RI|TI
1320                 */
1321                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1322                dmactrl = REG_READ(&dma->regs->ctrl);
1323                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1324                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1325                REG_WRITE(&dma->regs->ctrl, dmactrl);
1326                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1327        }
1328
1329        return cnt;
1330}
1331
1332/* Scans the RX desciptor table for scheduled Packet that has been received,
1333 * and moves these Packet from the head of the scheduled queue to the
1334 * tail of the recv queue.
1335 *
1336 * Also, for all packets the status is updated.
1337 *
1338 *  - SCHED List -> SENT List
1339 *
1340 * Return Value
1341 * Number of packets moved
1342 */
1343STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1344{
1345        struct grspw_rxring *curr;
1346        struct grspw_pkt *last_pkt;
1347        int recv_pkt_cnt = 0;
1348        unsigned int ctrl;
1349        struct grspw_list lst;
1350
1351        curr = dma->rx_ring_tail;
1352
1353        /* Step into RX ring to find if packets have been scheduled for
1354         * reception.
1355         */
1356        if (!curr->pkt)
1357                return 0; /* No scheduled packets, thus no received, abort */
1358
1359        /* There has been Packets scheduled ==> scheduled Packets may have been
1360         * received and needs to be collected into RECV List.
1361         *
1362         * A temporary list "lst" with all received packets is created.
1363         */
1364        lst.head = curr->pkt;
1365
1366        /* Loop until first enabled "unrecveived" SpW Packet is found.
1367         * An unused descriptor is indicated by an unassigned pkt field.
1368         */
1369        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1370                /* Handle one received Packet */
1371
1372                /* Remember last handled Packet so that insertion/removal from
1373                 * Packet lists go fast.
1374                 */
1375                last_pkt = curr->pkt;
1376
1377                /* Get Length of Packet in bytes, and reception options */
1378                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1379
1380                /* Set flags to indicate error(s) and CRC information,
1381                 * and Mark Received.
1382                 */
1383                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1384                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1385                                  RXPKT_FLAG_RX;
1386
1387                /* Packet was Truncated? */
1388                if (ctrl & GRSPW_RXBD_TR)
1389                        dma->stats.rx_err_trunk++;
1390
1391                /* Error End-Of-Packet? */
1392                if (ctrl & GRSPW_RXBD_EP)
1393                        dma->stats.rx_err_endpkt++;
1394                curr->pkt = NULL; /* Mark descriptor unused */
1395
1396                /* Increment */
1397                curr = curr->next;
1398                recv_pkt_cnt++;
1399        }
1400
1401        /* 1. Remove all handled packets from scheduled queue
1402         * 2. Put all handled packets into recv queue
1403         */
1404        if (recv_pkt_cnt > 0) {
1405
1406                /* Update Stats, Number of Received Packets */
1407                dma->stats.rx_pkts += recv_pkt_cnt;
1408
1409                /* Save RX ring posistion */
1410                dma->rx_ring_tail = curr;
1411
1412                /* Prepare list for insertion/deleation */
1413                lst.tail = last_pkt;
1414
1415                /* Remove received Packets from RX-SCHED queue */
1416                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1417                dma->rx_sched_cnt -= recv_pkt_cnt;
1418                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1419                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1420
1421                /* Insert received Packets into RECV queue */
1422                grspw_list_append_list(&dma->recv, &lst);
1423                dma->recv_cnt += recv_pkt_cnt;
1424                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1425                        dma->stats.recv_cnt_max = dma->recv_cnt;
1426        }
1427
1428        return recv_pkt_cnt;
1429}
1430
1431/* Try to populate descriptor ring with as many SEND packets as possible. The
1432 * packets assigned with to a descriptor are put in the end of
1433 * the scheduled list.
1434 *
1435 * The number of Packets scheduled is returned.
1436 *
1437 *  - SEND List -> TX-SCHED List
1438 *  - Descriptors are initialized and enabled for transmission
1439 */
1440STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1441{
1442        int cnt;
1443        unsigned int ctrl, dmactrl;
1444        void *hwaddr;
1445        struct grspw_txring *curr_bd;
1446        struct grspw_pkt *curr_pkt, *last_pkt;
1447        struct grspw_list lst;
1448        SPIN_IRQFLAGS(irqflags);
1449
1450        /* Is Ready Q empty? */
1451        if (grspw_list_is_empty(&dma->send))
1452                return 0;
1453
1454        cnt = 0;
1455        lst.head = curr_pkt = dma->send.head;
1456        curr_bd = dma->tx_ring_head;
1457        while (!curr_bd->pkt) {
1458
1459                /* Assign Packet to descriptor */
1460                curr_bd->pkt = curr_pkt;
1461
1462                /* Set up header transmission */
1463                if (curr_pkt->hdr && curr_pkt->hlen) {
1464                        hwaddr = curr_pkt->hdr;
1465                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1466                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1467                                                 hwaddr, &hwaddr);
1468                                /* translation needed? */
1469                                if (curr_pkt->hdr == hwaddr)
1470                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1471                        }
1472                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1473                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1474                } else {
1475                        ctrl = GRSPW_TXBD_EN;
1476                }
1477                /* Enable IRQ generation and CRC options as specified
1478                 * by user.
1479                 */
1480                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1481
1482                if (curr_bd->next == dma->tx_ring_base) {
1483                        /* Wrap around (only needed when smaller descriptor table) */
1484                        ctrl |= GRSPW_TXBD_WR;
1485                }
1486
1487                /* Is this Packet going to be an interrupt Packet? */
1488                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1489                        if (dma->cfg.tx_irq_en_cnt == 0) {
1490                                /* IRQ is disabled.
1491                                 * A big number to avoid equal to zero too often
1492                                 */
1493                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1494                        } else {
1495                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1496                                ctrl |= GRSPW_TXBD_IE;
1497                        }
1498                }
1499
1500                /* Prepare descriptor address. Parts of CTRL is written to
1501                 * DLEN for debug-only (CTRL is cleared by HW).
1502                 */
1503                if (curr_pkt->data && curr_pkt->dlen) {
1504                        hwaddr = curr_pkt->data;
1505                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1506                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1507                                                 hwaddr, &hwaddr);
1508                                /* translation needed? */
1509                                if (curr_pkt->data == hwaddr)
1510                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1511                        }
1512                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1513                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1514                                                     ((ctrl & 0x3f000) << 12));
1515                } else {
1516                        BD_WRITE(&curr_bd->bd->daddr, 0);
1517                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1518                }
1519
1520                /* Enable descriptor */
1521                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1522
1523                last_pkt = curr_pkt;
1524                curr_bd = curr_bd->next;
1525                cnt++;
1526
1527                /* Get Next Packet from Ready Queue */
1528                if (curr_pkt == dma->send.tail) {
1529                        /* Handled all in ready queue. */
1530                        curr_pkt = NULL;
1531                        break;
1532                }
1533                curr_pkt = curr_pkt->next;
1534        }
1535
1536        /* Have Packets been scheduled? */
1537        if (cnt > 0) {
1538                /* Prepare list for insertion/deleation */
1539                lst.tail = last_pkt;
1540
1541                /* Remove scheduled packets from ready queue */
1542                grspw_list_remove_head_list(&dma->send, &lst);
1543                dma->send_cnt -= cnt;
1544                if (dma->stats.send_cnt_min > dma->send_cnt)
1545                        dma->stats.send_cnt_min = dma->send_cnt;
1546
1547                /* Insert scheduled packets into scheduled queue */
1548                grspw_list_append_list(&dma->tx_sched, &lst);
1549                dma->tx_sched_cnt += cnt;
1550                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1551                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1552
1553                /* Update TX ring posistion */
1554                dma->tx_ring_head = curr_bd;
1555
1556                /* Make hardware aware of the newly enabled descriptors */
1557                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1558                dmactrl = REG_READ(&dma->regs->ctrl);
1559                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1560                dmactrl |= GRSPW_DMACTRL_TE;
1561                REG_WRITE(&dma->regs->ctrl, dmactrl);
1562                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1563        }
1564        return cnt;
1565}
1566
1567/* Scans the TX desciptor table for transmitted packets, and moves these
1568 * packets from the head of the scheduled queue to the tail of the sent queue.
1569 *
1570 * Also, for all packets the status is updated.
1571 *
1572 *  - SCHED List -> SENT List
1573 *
1574 * Return Value
1575 * Number of packet moved
1576 */
1577STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1578{
1579        struct grspw_txring *curr;
1580        struct grspw_pkt *last_pkt;
1581        int sent_pkt_cnt = 0;
1582        unsigned int ctrl;
1583        struct grspw_list lst;
1584
1585        curr = dma->tx_ring_tail;
1586
1587        /* Step into TX ring to find if packets have been scheduled for
1588         * transmission.
1589         */
1590        if (!curr->pkt)
1591                return 0; /* No scheduled packets, thus no sent, abort */
1592
1593        /* There has been Packets scheduled ==> scheduled Packets may have been
1594         * transmitted and needs to be collected into SENT List.
1595         *
1596         * A temporary list "lst" with all sent packets is created.
1597         */
1598        lst.head = curr->pkt;
1599
1600        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1601         * An unused descriptor is indicated by an unassigned pkt field.
1602         */
1603        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1604                /* Handle one sent Packet */
1605
1606                /* Remember last handled Packet so that insertion/removal from
1607                 * packet lists go fast.
1608                 */
1609                last_pkt = curr->pkt;
1610
1611                /* Set flags to indicate error(s) and Mark Sent.
1612                 */
1613                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1614                                        (ctrl & TXPKT_FLAG_LINKERR) |
1615                                        TXPKT_FLAG_TX;
1616
1617                /* Sent packet experienced link error? */
1618                if (ctrl & GRSPW_TXBD_LE)
1619                        dma->stats.tx_err_link++;
1620
1621                curr->pkt = NULL; /* Mark descriptor unused */
1622
1623                /* Increment */
1624                curr = curr->next;
1625                sent_pkt_cnt++;
1626        }
1627
1628        /* 1. Remove all handled packets from TX-SCHED queue
1629         * 2. Put all handled packets into SENT queue
1630         */
1631        if (sent_pkt_cnt > 0) {
1632                /* Update Stats, Number of Transmitted Packets */
1633                dma->stats.tx_pkts += sent_pkt_cnt;
1634
1635                /* Save TX ring posistion */
1636                dma->tx_ring_tail = curr;
1637
1638                /* Prepare list for insertion/deleation */
1639                lst.tail = last_pkt;
1640
1641                /* Remove sent packets from TX-SCHED queue */
1642                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1643                dma->tx_sched_cnt -= sent_pkt_cnt;
1644                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1645                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1646
1647                /* Insert received packets into SENT queue */
1648                grspw_list_append_list(&dma->sent, &lst);
1649                dma->sent_cnt += sent_pkt_cnt;
1650                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1651                        dma->stats.sent_cnt_max = dma->sent_cnt;
1652        }
1653
1654        return sent_pkt_cnt;
1655}
1656
1657void *grspw_dma_open(void *d, int chan_no)
1658{
1659        struct grspw_priv *priv = d;
1660        struct grspw_dma_priv *dma;
1661        int size;
1662
1663        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
1664                return NULL;
1665
1666        dma = &priv->dma[chan_no];
1667
1668        /* Take GRSPW lock */
1669        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1670            != RTEMS_SUCCESSFUL)
1671                return NULL;
1672
1673        if (dma->open) {
1674                dma = NULL;
1675                goto out;
1676        }
1677
1678        dma->started = 0;
1679
1680        /* Set Default Configuration:
1681         *
1682         *  - MAX RX Packet Length =
1683         *  - Disable IRQ generation
1684         *  -
1685         */
1686        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1687        dma->cfg.rx_irq_en_cnt = 0;
1688        dma->cfg.tx_irq_en_cnt = 0;
1689        dma->cfg.flags = DMAFLAG_NO_SPILL;
1690
1691        /* set to NULL so that error exit works correctly */
1692        dma->sem_rxdma = RTEMS_ID_NONE;
1693        dma->sem_txdma = RTEMS_ID_NONE;
1694        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1695        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1696        dma->rx_ring_base = NULL;
1697
1698        /* DMA Channel Semaphore created with count = 1 */
1699        if (rtems_semaphore_create(
1700            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
1701            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1702            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1703            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1704                dma->sem_rxdma = RTEMS_ID_NONE;
1705                goto err;
1706        }
1707        if (rtems_semaphore_create(
1708            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1709            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1710            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1711            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1712                dma->sem_txdma = RTEMS_ID_NONE;
1713                goto err;
1714        }
1715
1716        /* Allocate memory for the two descriptor rings */
1717        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1718        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1719        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1720        if (dma->rx_ring_base == NULL)
1721                goto err;
1722
1723        /* Create DMA RX and TX Channel sempahore with count = 0 */
1724        if (rtems_semaphore_create(
1725            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1726            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1727            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1728            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1729                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1730                goto err;
1731        }
1732        if (rtems_semaphore_create(
1733            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1734            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1735            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1736            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1737                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1738                goto err;
1739        }
1740
1741        /* Reset software structures */
1742        grspw_dma_reset(dma);
1743
1744        /* Take the device */
1745        dma->open = 1;
1746out:
1747        /* Return GRSPW Lock */
1748        rtems_semaphore_release(grspw_sem);
1749
1750        return dma;
1751
1752        /* initialization error happended */
1753err:
1754        if (dma->sem_rxdma != RTEMS_ID_NONE)
1755                rtems_semaphore_delete(dma->sem_rxdma);
1756        if (dma->sem_txdma != RTEMS_ID_NONE)
1757                rtems_semaphore_delete(dma->sem_txdma);
1758        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1759                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1760        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1761                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1762        if (dma->rx_ring_base)
1763                free(dma->rx_ring_base);
1764        dma = NULL;
1765        goto out;
1766}
1767
1768/* Initialize Software Structures:
1769 *  - Clear all Queues
1770 *  - init BD ring
1771 *  - init IRQ counter
1772 *  - clear statistics counters
1773 *  - init wait structures and semaphores
1774 */
1775STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1776{
1777        /* Empty RX and TX queues */
1778        grspw_list_clr(&dma->ready);
1779        grspw_list_clr(&dma->rx_sched);
1780        grspw_list_clr(&dma->recv);
1781        grspw_list_clr(&dma->send);
1782        grspw_list_clr(&dma->tx_sched);
1783        grspw_list_clr(&dma->sent);
1784        dma->ready_cnt = 0;
1785        dma->rx_sched_cnt = 0;
1786        dma->recv_cnt = 0;
1787        dma->send_cnt = 0;
1788        dma->tx_sched_cnt = 0;
1789        dma->sent_cnt = 0;
1790
1791        dma->rx_irq_en_cnt_curr = 0;
1792        dma->tx_irq_en_cnt_curr = 0;
1793
1794        grspw_bdrings_init(dma);
1795
1796        dma->rx_wait.waiting = 0;
1797        dma->tx_wait.waiting = 0;
1798
1799        grspw_dma_stats_clr(dma);
1800}
1801
1802int grspw_dma_close(void *c)
1803{
1804        struct grspw_dma_priv *dma = c;
1805
1806        if (!dma->open)
1807                return 0;
1808
1809        /* Take device lock - Wait until we get semaphore */
1810        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1811            != RTEMS_SUCCESSFUL)
1812                return -1;
1813        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1814            != RTEMS_SUCCESSFUL) {
1815                rtems_semaphore_release(dma->sem_rxdma);
1816                return -1;
1817        }
1818
1819        /* Can not close active DMA channel. User must stop DMA and make sure
1820         * no threads are active/blocked within driver.
1821         */
1822        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
1823                rtems_semaphore_release(dma->sem_txdma);
1824                rtems_semaphore_release(dma->sem_rxdma);
1825                return 1;
1826        }
1827
1828        /* Free resources */
1829        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1830        rtems_semaphore_delete(dma->tx_wait.sem_wait);
1831        /* Release and delete lock. Operations requiring lock will fail */
1832        rtems_semaphore_delete(dma->sem_txdma);
1833        rtems_semaphore_delete(dma->sem_rxdma);
1834        dma->sem_txdma = RTEMS_ID_NONE;
1835        dma->sem_rxdma = RTEMS_ID_NONE;
1836
1837        /* Free memory */
1838        if (dma->rx_ring_base)
1839                free(dma->rx_ring_base);
1840        dma->rx_ring_base = NULL;
1841        dma->tx_ring_base = NULL;
1842
1843        dma->open = 0;
1844        return 0;
1845}
1846
1847unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
1848{
1849        struct grspw_dma_priv *dma = c;
1850        int rc = 0;
1851        unsigned int ctrl, ctrl_old;
1852        SPIN_IRQFLAGS(irqflags);
1853
1854        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1855        if (dma->started == 0) {
1856                rc = 1; /* DMA stopped */
1857                goto out;
1858        }
1859        ctrl = REG_READ(&dma->regs->ctrl);
1860        ctrl_old = ctrl;
1861
1862        /* Read/Write DMA error ? */
1863        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
1864                rc = 2; /* DMA error */
1865                goto out;
1866        }
1867
1868        /* DMA has finished a TX/RX packet and user wants work-task to
1869         * take care of DMA table processing.
1870         */
1871        ctrl &= ~GRSPW_DMACTRL_AT;
1872
1873        if ((rxtx & 1) == 0)
1874                ctrl &= ~GRSPW_DMACTRL_PR;
1875        else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
1876                 (dma->cfg.flags & DMAFLAG2_RXIE)))
1877                ctrl |= GRSPW_DMACTRL_RI;
1878
1879        if ((rxtx & 2) == 0)
1880                ctrl &= ~GRSPW_DMACTRL_PS;
1881        else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
1882                 (dma->cfg.flags & DMAFLAG2_TXIE)))
1883                ctrl |= GRSPW_DMACTRL_TI;
1884
1885        REG_WRITE(&dma->regs->ctrl, ctrl);
1886        /* Re-enabled interrupts previously enabled */
1887        rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
1888out:
1889        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1890        return rc;
1891}
1892
1893/* Schedule List of packets for transmission at some point in
1894 * future.
1895 *
1896 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1897 * 2. Add the requested packets to the SEND List (USER->SEND)
1898 * 3. Schedule as many packets as possible (SEND->SCHED)
1899 */
1900int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1901{
1902        struct grspw_dma_priv *dma = c;
1903        int ret;
1904
1905        /* Take DMA channel lock */
1906        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1907            != RTEMS_SUCCESSFUL)
1908                return -1;
1909
1910        if (dma->started == 0) {
1911                ret = 1; /* signal DMA has been stopped */
1912                goto out;
1913        }
1914        ret = 0;
1915
1916        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1917        if ((opts & 1) == 0)
1918                grspw_tx_process_scheduled(dma);
1919
1920        /* 2. Add the requested packets to the SEND List (USER->SEND) */
1921        if (pkts && (count > 0)) {
1922                grspw_list_append_list(&dma->send, pkts);
1923                dma->send_cnt += count;
1924                if (dma->stats.send_cnt_max < dma->send_cnt)
1925                        dma->stats.send_cnt_max = dma->send_cnt;
1926        }
1927
1928        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1929        if ((opts & 2) == 0)
1930                grspw_tx_schedule_send(dma);
1931
1932out:
1933        /* Unlock DMA channel */
1934        rtems_semaphore_release(dma->sem_txdma);
1935
1936        return ret;
1937}
1938
1939int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1940{
1941        struct grspw_dma_priv *dma = c;
1942        struct grspw_pkt *pkt, *lastpkt;
1943        int cnt, started;
1944
1945        /* Take DMA channel lock */
1946        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1947            != RTEMS_SUCCESSFUL)
1948                return -1;
1949
1950        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1951        started = dma->started;
1952        if ((started > 0) && ((opts & 1) == 0))
1953                grspw_tx_process_scheduled(dma);
1954
1955        /* Move all/count SENT packet to the callers list (SENT->USER) */
1956        if (pkts) {
1957                if ((count == NULL) || (*count == -1) ||
1958                    (*count >= dma->sent_cnt)) {
1959                        /* Move all SENT Packets */
1960                        *pkts = dma->sent;
1961                        grspw_list_clr(&dma->sent);
1962                        if (count)
1963                                *count = dma->sent_cnt;
1964                        dma->sent_cnt = 0;
1965                } else {
1966                        /* Move a number of SENT Packets */
1967                        pkts->head = pkt = lastpkt = dma->sent.head;
1968                        cnt = 0;
1969                        while (cnt < *count) {
1970                                lastpkt = pkt;
1971                                pkt = pkt->next;
1972                                cnt++;
1973                        }
1974                        if (cnt > 0) {
1975                                pkts->tail = lastpkt;
1976                                grspw_list_remove_head_list(&dma->sent, pkts);
1977                                dma->sent_cnt -= cnt;
1978                        } else {
1979                                grspw_list_clr(pkts);
1980                        }
1981                }
1982        } else if (count) {
1983                *count = 0;
1984        }
1985
1986        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1987        if ((started > 0) && ((opts & 2) == 0))
1988                grspw_tx_schedule_send(dma);
1989
1990        /* Unlock DMA channel */
1991        rtems_semaphore_release(dma->sem_txdma);
1992
1993        return (~started) & 1; /* signal DMA has been stopped */
1994}
1995
1996void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
1997{
1998        struct grspw_dma_priv *dma = c;
1999        int sched_cnt, diff;
2000        unsigned int hwbd;
2001        struct grspw_txbd *tailbd;
2002
2003        /* Take device lock - Wait until we get semaphore.
2004         * The lock is taken so that the counters are in sync with each other
2005         * and that DMA descriptor table and tx_ring_tail is not being updated
2006         * during HW counter processing in this function.
2007         */
2008        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2009            != RTEMS_SUCCESSFUL)
2010                return;
2011
2012        if (send)
2013                *send = dma->send_cnt;
2014        sched_cnt = dma->tx_sched_cnt;
2015        if (sched)
2016                *sched = sched_cnt;
2017        if (sent)
2018                *sent = dma->sent_cnt;
2019        if (hw) {
2020                /* Calculate number of descriptors (processed by HW) between
2021                 * HW pointer and oldest SW pointer.
2022                 */
2023                hwbd = REG_READ(&dma->regs->txdesc);
2024                tailbd = dma->tx_ring_tail->bd;
2025                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
2026                        (GRSPW_TXBD_NR - 1);
2027                /* Handle special case when HW and SW pointers are equal
2028                 * because all TX descriptors have been processed by HW.
2029                 */
2030                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
2031                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
2032                        diff = GRSPW_TXBD_NR;
2033                }
2034                *hw = diff;
2035        }
2036
2037        /* Unlock DMA channel */
2038        rtems_semaphore_release(dma->sem_txdma);
2039}
2040
2041static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
2042{
2043        int send_val, sent_val;
2044
2045        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
2046                send_val = 1;
2047        else
2048                send_val = 0;
2049
2050        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
2051                sent_val = 1;
2052        else
2053                sent_val = 0;
2054
2055        /* AND or OR ? */
2056        if (dma->tx_wait.op == 0)
2057                return send_val & sent_val; /* AND */
2058        else
2059                return send_val | sent_val; /* OR */
2060}
2061
2062/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
2063 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
2064 * is met.
2065 * If a link error occurs and the Stop on Link error is defined, this function
2066 * will also return to caller.
2067 */
2068int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
2069{
2070        struct grspw_dma_priv *dma = c;
2071        int ret, rc, initialized = 0;
2072
2073        if (timeout == 0)
2074                timeout = RTEMS_NO_TIMEOUT;
2075
2076check_condition:
2077
2078        /* Take DMA channel lock */
2079        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2080            != RTEMS_SUCCESSFUL)
2081                return -1;
2082
2083        /* Check so that no other thread is waiting, this driver only supports
2084         * one waiter at a time.
2085         */
2086        if (initialized == 0 && dma->tx_wait.waiting) {
2087                ret = 3;
2088                goto out_release;
2089        }
2090
2091        /* Stop if link error or similar (DMA stopped), abort */
2092        if (dma->started == 0) {
2093                ret = 1;
2094                goto out_release;
2095        }
2096
2097        /* Set up Condition */
2098        dma->tx_wait.send_cnt = send_cnt;
2099        dma->tx_wait.op = op;
2100        dma->tx_wait.sent_cnt = sent_cnt;
2101
2102        if (grspw_tx_wait_eval(dma) == 0) {
2103                /* Prepare Wait */
2104                initialized = 1;
2105                dma->tx_wait.waiting = 1;
2106
2107                /* Release DMA channel lock */
2108                rtems_semaphore_release(dma->sem_txdma);
2109
2110                /* Try to take Wait lock, if this fail link may have gone down
2111                 * or user stopped this DMA channel
2112                 */
2113                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2114                                                timeout);
2115                if (rc == RTEMS_TIMEOUT) {
2116                        ret = 2;
2117                        goto out;
2118                } else if (rc == RTEMS_UNSATISFIED ||
2119                           rc == RTEMS_OBJECT_WAS_DELETED) {
2120                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2121                        goto out;
2122                } else if (rc != RTEMS_SUCCESSFUL) {
2123                        /* Unknown Error */
2124                        ret = -1;
2125                        goto out;
2126                } else if (dma->started == 0) {
2127                        ret = 1;
2128                        goto out;
2129                }
2130
2131                /* Check condition once more */
2132                goto check_condition;
2133        }
2134
2135        ret = 0;
2136
2137out_release:
2138        /* Unlock DMA channel */
2139        rtems_semaphore_release(dma->sem_txdma);
2140
2141out:
2142        if (initialized)
2143                dma->tx_wait.waiting = 0;
2144        return ret;
2145}
2146
2147int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2148{
2149        struct grspw_dma_priv *dma = c;
2150        struct grspw_pkt *pkt, *lastpkt;
2151        int cnt, started;
2152
2153        /* Take DMA channel lock */
2154        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2155            != RTEMS_SUCCESSFUL)
2156                return -1;
2157
2158        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2159        started = dma->started;
2160        if (((opts & 1) == 0) && (started > 0))
2161                grspw_rx_process_scheduled(dma);
2162
2163        /* Move all RECV packet to the callers list */
2164        if (pkts) {
2165                if ((count == NULL) || (*count == -1) ||
2166                    (*count >= dma->recv_cnt)) {
2167                        /* Move all Received packets */
2168                        *pkts = dma->recv;
2169                        grspw_list_clr(&dma->recv);
2170                        if ( count )
2171                                *count = dma->recv_cnt;
2172                        dma->recv_cnt = 0;
2173                } else {
2174                        /* Move a number of RECV Packets */
2175                        pkts->head = pkt = lastpkt = dma->recv.head;
2176                        cnt = 0;
2177                        while (cnt < *count) {
2178                                lastpkt = pkt;
2179                                pkt = pkt->next;
2180                                cnt++;
2181                        }
2182                        if (cnt > 0) {
2183                                pkts->tail = lastpkt;
2184                                grspw_list_remove_head_list(&dma->recv, pkts);
2185                                dma->recv_cnt -= cnt;
2186                        } else {
2187                                grspw_list_clr(pkts);
2188                        }
2189                }
2190        } else if (count) {
2191                *count = 0;
2192        }
2193
2194        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2195        if (((opts & 2) == 0) && (started > 0))
2196                grspw_rx_schedule_ready(dma);
2197
2198        /* Unlock DMA channel */
2199        rtems_semaphore_release(dma->sem_rxdma);
2200
2201        return (~started) & 1;
2202}
2203
2204int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2205{
2206        struct grspw_dma_priv *dma = c;
2207        int ret;
2208
2209        /* Take DMA channel lock */
2210        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2211            != RTEMS_SUCCESSFUL)
2212                return -1;
2213
2214        if (dma->started == 0) {
2215                ret = 1;
2216                goto out;
2217        }
2218
2219        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2220        if ((opts & 1) == 0)
2221                grspw_rx_process_scheduled(dma);
2222
2223        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2224        if (pkts && (count > 0)) {
2225                grspw_list_append_list(&dma->ready, pkts);
2226                dma->ready_cnt += count;
2227                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2228                        dma->stats.ready_cnt_max = dma->ready_cnt;
2229        }
2230
2231        /* 3. Schedule as many packets as possible (READY->SCHED) */
2232        if ((opts & 2) == 0)
2233                grspw_rx_schedule_ready(dma);
2234
2235        ret = 0;
2236out:
2237        /* Unlock DMA channel */
2238        rtems_semaphore_release(dma->sem_rxdma);
2239
2240        return ret;
2241}
2242
2243void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
2244{
2245        struct grspw_dma_priv *dma = c;
2246        int sched_cnt, diff;
2247        unsigned int hwbd;
2248        struct grspw_rxbd *tailbd;
2249
2250        /* Take device lock - Wait until we get semaphore.
2251         * The lock is taken so that the counters are in sync with each other
2252         * and that DMA descriptor table and rx_ring_tail is not being updated
2253         * during HW counter processing in this function.
2254         */
2255        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2256            != RTEMS_SUCCESSFUL)
2257                return;
2258
2259        if (ready)
2260                *ready = dma->ready_cnt;
2261        sched_cnt = dma->rx_sched_cnt;
2262        if (sched)
2263                *sched = sched_cnt;
2264        if (recv)
2265                *recv = dma->recv_cnt;
2266        if (hw) {
2267                /* Calculate number of descriptors (processed by HW) between
2268                 * HW pointer and oldest SW pointer.
2269                 */
2270                hwbd = REG_READ(&dma->regs->rxdesc);
2271                tailbd = dma->rx_ring_tail->bd;
2272                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2273                        (GRSPW_RXBD_NR - 1);
2274                /* Handle special case when HW and SW pointers are equal
2275                 * because all RX descriptors have been processed by HW.
2276                 */
2277                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2278                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2279                        diff = GRSPW_RXBD_NR;
2280                }
2281                *hw = diff;
2282        }
2283
2284        /* Unlock DMA channel */
2285        rtems_semaphore_release(dma->sem_rxdma);
2286}
2287
2288static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2289{
2290        int ready_val, recv_val;
2291
2292        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2293                ready_val = 1;
2294        else
2295                ready_val = 0;
2296
2297        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2298                recv_val = 1;
2299        else
2300                recv_val = 0;
2301
2302        /* AND or OR ? */
2303        if (dma->rx_wait.op == 0)
2304                return ready_val & recv_val; /* AND */
2305        else
2306                return ready_val | recv_val; /* OR */
2307}
2308
2309/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2310 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2311 * condition is met.
2312 * If a link error occurs and the Stop on Link error is defined, this function
2313 * will also return to caller, however with an error.
2314 */
2315int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2316{
2317        struct grspw_dma_priv *dma = c;
2318        int ret, rc, initialized = 0;
2319
2320        if (timeout == 0)
2321                timeout = RTEMS_NO_TIMEOUT;
2322
2323check_condition:
2324
2325        /* Take DMA channel lock */
2326        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2327            != RTEMS_SUCCESSFUL)
2328                return -1;
2329
2330        /* Check so that no other thread is waiting, this driver only supports
2331         * one waiter at a time.
2332         */
2333        if (initialized == 0 && dma->rx_wait.waiting) {
2334                ret = 3;
2335                goto out_release;
2336        }
2337
2338        /* Stop if link error or similar (DMA stopped), abort */
2339        if (dma->started == 0) {
2340                ret = 1;
2341                goto out_release;
2342        }
2343
2344        /* Set up Condition */
2345        dma->rx_wait.recv_cnt = recv_cnt;
2346        dma->rx_wait.op = op;
2347        dma->rx_wait.ready_cnt = ready_cnt;
2348
2349        if (grspw_rx_wait_eval(dma) == 0) {
2350                /* Prepare Wait */
2351                initialized = 1;
2352                dma->rx_wait.waiting = 1;
2353
2354                /* Release channel lock */
2355                rtems_semaphore_release(dma->sem_rxdma);
2356
2357                /* Try to take Wait lock, if this fail link may have gone down
2358                 * or user stopped this DMA channel
2359                 */
2360                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2361                                           timeout);
2362                if (rc == RTEMS_TIMEOUT) {
2363                        ret = 2;
2364                        goto out;
2365                } else if (rc == RTEMS_UNSATISFIED ||
2366                           rc == RTEMS_OBJECT_WAS_DELETED) {
2367                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2368                        goto out;
2369                } else if (rc != RTEMS_SUCCESSFUL) {
2370                        /* Unknown Error */
2371                        ret = -1;
2372                        goto out;
2373                } else if (dma->started == 0) {
2374                        ret = 1;
2375                        goto out;
2376                }
2377
2378                /* Check condition once more */
2379                goto check_condition;
2380        }
2381
2382        ret = 0;
2383
2384out_release:
2385        /* Unlock DMA channel */
2386        rtems_semaphore_release(dma->sem_rxdma);
2387
2388out:
2389        if (initialized)
2390                dma->rx_wait.waiting = 0;
2391        return ret;
2392}
2393
2394int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2395{
2396        struct grspw_dma_priv *dma = c;
2397
2398        if (dma->started || !cfg)
2399                return -1;
2400
2401        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
2402                return -1;
2403
2404        /* Update Configuration */
2405        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2406
2407        return 0;
2408}
2409
2410void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2411{
2412        struct grspw_dma_priv *dma = c;
2413
2414        /* Copy Current Configuration */
2415        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2416}
2417
2418void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2419{
2420        struct grspw_dma_priv *dma = c;
2421
2422        memcpy(sts, &dma->stats, sizeof(dma->stats));
2423}
2424
2425void grspw_dma_stats_clr(void *c)
2426{
2427        struct grspw_dma_priv *dma = c;
2428
2429        /* Clear most of the statistics */     
2430        memset(&dma->stats, 0, sizeof(dma->stats));
2431
2432        /* Init proper default values so that comparisons will work the
2433         * first time.
2434         */
2435        dma->stats.send_cnt_min = 0x3fffffff;
2436        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2437        dma->stats.ready_cnt_min = 0x3fffffff;
2438        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2439}
2440
2441int grspw_dma_start(void *c)
2442{
2443        struct grspw_dma_priv *dma = c;
2444        struct grspw_dma_regs *dregs = dma->regs;
2445        unsigned int ctrl;
2446        SPIN_IRQFLAGS(irqflags);
2447
2448        if (dma->started)
2449                return 0;
2450
2451        /* Initialize Software Structures:
2452         *  - Clear all Queues
2453         *  - init BD ring
2454         *  - init IRQ counter
2455         *  - clear statistics counters
2456         *  - init wait structures and semaphores
2457         */
2458        grspw_dma_reset(dma);
2459
2460        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2461         * with SpaceWire Packet buffers. So we do not have to worry about
2462         * IRQs for this channel just yet. However other DMA channels
2463         * may be active.
2464         *
2465         * Some functionality that is not changed during started mode is set up
2466         * once and for all here:
2467         *
2468         *   - RX MAX Packet length
2469         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2470         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2471         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2472         *   - Strip PID
2473         *   - Strip Address
2474         *   - No Spill
2475         *   - Receiver Enable
2476         *   - disable on link error (LE)
2477         *
2478         * Note that the address register and the address enable bit in DMACTRL
2479         * register must be left untouched, they are configured on a GRSPW
2480         * core level.
2481         *
2482         * Note that the receiver is enabled here, but since descriptors are
2483         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2484         * descriptors are enabled or it may ignore RX packets (NS=0) until
2485         * descriptors are enabled (writing RD bit).
2486         */
2487        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2488        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2489
2490        /* MAX Packet length */
2491        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2492
2493        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2494                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2495                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2496        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
2497                ctrl |= GRSPW_DMACTRL_LE;
2498        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
2499                ctrl |= GRSPW_DMACTRL_RI;
2500        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
2501                ctrl |= GRSPW_DMACTRL_TI;
2502        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2503        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
2504        REG_WRITE(&dregs->ctrl, ctrl);
2505        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2506
2507        dma->started = 1; /* open up other DMA interfaces */
2508
2509        return 0;
2510}
2511
2512STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2513{
2514        SPIN_IRQFLAGS(irqflags);
2515
2516        if (dma->started == 0)
2517                return;
2518        dma->started = 0;
2519
2520        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2521        grspw_hw_dma_stop(dma);
2522        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2523
2524        /* From here no more packets will be sent, however
2525         * there may still exist scheduled packets that has been
2526         * sent, and packets in the SEND Queue waiting for free
2527         * descriptors. All packets are moved to the SENT Queue
2528         * so that the user may get its buffers back, the user
2529         * must look at the TXPKT_FLAG_TX in order to determine
2530         * if the packet was sent or not.
2531         */
2532
2533        /* Retreive scheduled all sent packets */
2534        grspw_tx_process_scheduled(dma);
2535
2536        /* Move un-sent packets in SEND and SCHED queue to the
2537         * SENT Queue. (never marked sent)
2538         */
2539        if (!grspw_list_is_empty(&dma->tx_sched)) {
2540                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2541                grspw_list_clr(&dma->tx_sched);
2542                dma->sent_cnt += dma->tx_sched_cnt;
2543                dma->tx_sched_cnt = 0;
2544        }
2545        if (!grspw_list_is_empty(&dma->send)) {
2546                grspw_list_append_list(&dma->sent, &dma->send);
2547                grspw_list_clr(&dma->send);
2548                dma->sent_cnt += dma->send_cnt;
2549                dma->send_cnt = 0;
2550        }
2551
2552        /* Similar for RX */
2553        grspw_rx_process_scheduled(dma);
2554        if (!grspw_list_is_empty(&dma->rx_sched)) {
2555                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2556                grspw_list_clr(&dma->rx_sched);
2557                dma->recv_cnt += dma->rx_sched_cnt;
2558                dma->rx_sched_cnt = 0;
2559        }
2560        if (!grspw_list_is_empty(&dma->ready)) {
2561                grspw_list_append_list(&dma->recv, &dma->ready);
2562                grspw_list_clr(&dma->ready);
2563                dma->recv_cnt += dma->ready_cnt;
2564                dma->ready_cnt = 0;
2565        }
2566
2567        /* Throw out blocked threads */
2568        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2569        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2570}
2571
2572void grspw_dma_stop(void *c)
2573{
2574        struct grspw_dma_priv *dma = c;
2575
2576        /* If DMA channel is closed we should not access the semaphore */
2577        if (!dma->open)
2578                return;
2579
2580        /* Take DMA Channel lock */
2581        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2582            != RTEMS_SUCCESSFUL)
2583                return;
2584        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2585            != RTEMS_SUCCESSFUL) {
2586                rtems_semaphore_release(dma->sem_rxdma);
2587                return;
2588        }
2589
2590        grspw_dma_stop_locked(dma);
2591
2592        rtems_semaphore_release(dma->sem_txdma);
2593        rtems_semaphore_release(dma->sem_rxdma);
2594}
2595
2596/* Do general work, invoked indirectly from ISR */
2597static void grspw_work_shutdown_func(struct grspw_priv *priv)
2598{
2599        int i;
2600
2601        /* Link is down for some reason, and the user has configured
2602         * that we stop all (open) DMA channels and throw out all their
2603         * blocked threads.
2604         */
2605        for (i=0; i<priv->hwsup.ndma_chans; i++)
2606                grspw_dma_stop(&priv->dma[i]);
2607        grspw_hw_stop(priv);
2608}
2609
2610/* Do DMA work on one channel, invoked indirectly from ISR */
2611static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
2612{
2613        int tx_cond_true, rx_cond_true, rxtx;
2614
2615        /* If DMA channel is closed we should not access the semaphore */
2616        if (dma->open == 0)
2617                return;
2618
2619        dma->stats.irq_cnt++;
2620
2621        /* Look at cause we were woken up and clear source */
2622        rxtx = 0;
2623        if (msg & WORK_DMA_RX_MASK)
2624                rxtx |= 1;
2625        if (msg & WORK_DMA_TX_MASK)
2626                rxtx |= 2;
2627        switch (grspw_dma_enable_int(dma, rxtx, 0)) {
2628        case 1:
2629                /* DMA stopped */
2630                return;
2631        case 2:
2632                /* DMA error -> Stop DMA channel (both RX and TX) */
2633                if (msg & WORK_DMA_ER_MASK) {
2634                        /* DMA error and user wants work-task to handle error */
2635                        grspw_dma_stop(dma);
2636                        grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
2637                }
2638                return;
2639        default:
2640                break;
2641        }
2642        if (msg == 0)
2643                return;
2644
2645        rx_cond_true = 0;
2646        tx_cond_true = 0;
2647
2648        if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
2649                /* In case both interrupt sources are disabled simultaneously
2650                 * by the ISR the re-enabling of the interrupt source must also
2651                 * do so to avoid missing interrupts. Both RX and TX process
2652                 * will be forced.
2653                 */
2654                msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
2655        }
2656
2657        if (msg & WORK_DMA_RX_MASK) {
2658                /* Do RX Work */
2659
2660                /* Take DMA channel RX lock */
2661                if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2662                    != RTEMS_SUCCESSFUL)
2663                        return;
2664
2665                dma->stats.rx_work_cnt++;
2666                grspw_rx_process_scheduled(dma);
2667                if (dma->started) {
2668                        dma->stats.rx_work_enabled +=
2669                                grspw_rx_schedule_ready(dma);
2670                        /* Check to see if condition for waking blocked
2671                         * USER task is fullfilled.
2672                         */
2673                        if (dma->rx_wait.waiting)
2674                                rx_cond_true = grspw_rx_wait_eval(dma);
2675                }
2676                rtems_semaphore_release(dma->sem_rxdma);
2677        }
2678
2679        if (msg & WORK_DMA_TX_MASK) {
2680                /* Do TX Work */
2681
2682                /* Take DMA channel TX lock */
2683                if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2684                    != RTEMS_SUCCESSFUL)
2685                        return;
2686
2687                dma->stats.tx_work_cnt++;
2688                grspw_tx_process_scheduled(dma);
2689                if (dma->started) {
2690                        dma->stats.tx_work_enabled +=
2691                                grspw_tx_schedule_send(dma);
2692                        /* Check to see if condition for waking blocked
2693                         * USER task is fullfilled.
2694                         */
2695                        if (dma->tx_wait.waiting)
2696                                tx_cond_true = grspw_tx_wait_eval(dma);
2697                }
2698                rtems_semaphore_release(dma->sem_txdma);
2699        }
2700
2701        if (rx_cond_true)
2702                rtems_semaphore_release(dma->rx_wait.sem_wait);
2703
2704        if (tx_cond_true)
2705                rtems_semaphore_release(dma->tx_wait.sem_wait);
2706}
2707
2708/* Work task is receiving work for the work message queue posted from
2709 * the ISR.
2710 */
2711void grspw_work_func(rtems_id msgQ)
2712{
2713        unsigned int message = 0, msg;
2714        size_t size;
2715        struct grspw_priv *priv;
2716        int i;
2717
2718        /* Wait for ISR to schedule work */
2719        while (rtems_message_queue_receive(msgQ, &message, &size,
2720               RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
2721                if (message & WORK_QUIT_TASK)
2722                        break;
2723
2724                /* Handle work */
2725                priv = priv_tab[message >> WORK_CORE_BIT];
2726                if (message & WORK_SHUTDOWN) {
2727                        grspw_work_shutdown_func(priv);
2728                               
2729                        grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
2730                } else if (message & WORK_DMA_MASK) {
2731                        for (i = 0; i < priv->hwsup.ndma_chans; i++) {
2732                                msg = message &
2733                                      (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
2734                                if (msg)
2735                                        grspw_work_dma_func(&priv->dma[i], msg);
2736                        }
2737                }
2738                message = 0;
2739        }
2740
2741        if (message & WORK_FREE_MSGQ)
2742                rtems_message_queue_delete(msgQ);
2743
2744        grspw_work_event(WORKTASK_EV_QUIT, message);
2745        rtems_task_delete(RTEMS_SELF);
2746}
2747
2748STATIC void grspw_isr(void *data)
2749{
2750        struct grspw_priv *priv = data;
2751        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
2752        unsigned int rxirq, rxack, intto;
2753        int i, handled = 0, call_user_int_isr;
2754        unsigned int message = WORK_NONE, dma_en;
2755        SPIN_ISR_IRQFLAGS(irqflags);
2756
2757        /* Get Status from Hardware */
2758        stat = REG_READ(&priv->regs->status);
2759        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2760                        (GRSPW_STS_TO | priv->stscfg);
2761
2762        /* Make sure to put the timecode handling first in order to get the
2763         * smallest possible interrupt latency
2764         */
2765        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2766                ctrl = REG_READ(&priv->regs->ctrl);
2767                if (ctrl & GRSPW_CTRL_TQ) {
2768                        /* Timecode received. Let custom function handle this */
2769                        timecode = REG_READ(&priv->regs->time) &
2770                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2771                        (priv->tcisr)(priv->tcisr_arg, timecode);
2772                }
2773        }
2774
2775        /* Get Interrupt status from hardware */
2776        icctrl = REG_READ(&priv->regs->icctrl);
2777        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2778                call_user_int_isr = 0;
2779                rxirq = rxack = intto = 0;
2780
2781                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2782                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2783                        call_user_int_isr = 1;
2784
2785                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2786                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2787                        call_user_int_isr = 1;
2788
2789                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2790                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2791                        call_user_int_isr = 1;                 
2792
2793                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2794                 * user function is called even if no such IRQ has happened!
2795                 * User must make sure to clear all interrupts that have been
2796                 * handled from the three registers by writing a one.
2797                 */
2798                if (call_user_int_isr)
2799                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2800        }
2801
2802        /* An Error occured? */
2803        if (stat & GRSPW_STAT_ERROR) {
2804                /* Wake Global WorkQ */
2805                handled = 1;
2806
2807                if (stat & GRSPW_STS_EE)
2808                        priv->stats.err_eeop++;
2809
2810                if (stat & GRSPW_STS_IA)
2811                        priv->stats.err_addr++;
2812
2813                if (stat & GRSPW_STS_PE)
2814                        priv->stats.err_parity++;
2815
2816                if (stat & GRSPW_STS_DE)
2817                        priv->stats.err_disconnect++;
2818
2819                if (stat & GRSPW_STS_ER)
2820                        priv->stats.err_escape++;
2821
2822                if (stat & GRSPW_STS_CE)
2823                        priv->stats.err_credit++;
2824
2825                if (stat & GRSPW_STS_WE)
2826                        priv->stats.err_wsync++;
2827
2828                if (((priv->dis_link_on_err >> 16) & stat) &&
2829                    (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
2830                        /* Disable the link, no more transfers are expected
2831                         * on any DMA channel.
2832                         */
2833                        SPIN_LOCK(&priv->devlock, irqflags);
2834                        ctrl = REG_READ(&priv->regs->ctrl);
2835                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2836                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2837                        SPIN_UNLOCK(&priv->devlock, irqflags);
2838                        /* Signal to work-thread to stop DMA and clean up */
2839                        message = WORK_SHUTDOWN;
2840                }
2841        }
2842
2843        /* Clear Status Flags */
2844        if (stat_clrmsk) {
2845                handled = 1;
2846                REG_WRITE(&priv->regs->status, stat_clrmsk);
2847        }
2848
2849        /* A DMA transfer or Error occured? In that case disable more IRQs
2850         * from the DMA channel, then invoke the workQ.
2851         *
2852         * Also the GI interrupt flag may not be available for older
2853         * designs where (was added together with mutiple DMA channels).
2854         */
2855        SPIN_LOCK(&priv->devlock, irqflags);
2856        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2857                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2858                /* Check for Errors and if Packets been sent or received if
2859                 * respective IRQ are enabled
2860                 */
2861                irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2862                        | GRSPW_DMA_STATUS_ERROR) & dma_stat;
2863                if (!irqs)
2864                        continue;
2865
2866                handled = 1;
2867
2868                /* DMA error has priority, if error happens it is assumed that
2869                 * the common work-queue stops the DMA operation for that
2870                 * channel and makes the DMA tasks exit from their waiting
2871                 * functions (both RX and TX tasks).
2872                 *
2873                 * Disable Further IRQs (until enabled again)
2874                 * from this DMA channel. Let the status
2875                 * bit remain so that they can be handled by
2876                 * work function.
2877                 */
2878                if (irqs & GRSPW_DMA_STATUS_ERROR) {
2879                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2880                                ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2881                                  GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2882                                  GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2883                                  GRSPW_DMACTRL_AT));
2884                        message |= WORK_DMA_ER(i);
2885                } else {
2886                        /* determine if RX/TX interrupt source(s) shall remain
2887                         * enabled.
2888                         */
2889                        if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
2890                                dma_en = ~irqs >> 3;
2891                        } else {
2892                                dma_en = priv->dma[i].cfg.flags >>
2893                                 (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
2894                        }
2895                        dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
2896                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2897                                (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2898                                   GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2899                                   GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2900                                   GRSPW_DMACTRL_AT) | dma_en));
2901                        message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
2902                }
2903        }
2904        SPIN_UNLOCK(&priv->devlock, irqflags);
2905
2906        if (handled != 0)
2907                priv->stats.irq_cnt++;
2908
2909        /* Schedule work by sending message to work thread */
2910        if (message != WORK_NONE && priv->wc.msgisr) {
2911                int status;
2912                message |= WORK_CORE(priv->index);
2913                /* func interface compatible with msgQSend() on purpose, but
2914                 * at the same time the user can assign a custom function to
2915                 * handle DMA RX/TX operations as indicated by the "message"
2916                 * and clear the handled bits before given to msgQSend().
2917                 */
2918                status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
2919                if (status != RTEMS_SUCCESSFUL) {
2920                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
2921                                priv->index, status, message);
2922                }
2923        }
2924}
2925
2926STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2927{
2928        unsigned int ctrl;
2929        struct grspw_dma_regs *dregs = dma->regs;
2930
2931        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2932               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2933        ctrl |= GRSPW_DMACTRL_AT;
2934        REG_WRITE(&dregs->ctrl, ctrl);
2935}
2936
2937STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2938{
2939        unsigned int ctrl;
2940        struct grspw_dma_regs *dregs = dma->regs;
2941
2942        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2943        REG_WRITE(&dregs->ctrl, ctrl);
2944
2945        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2946        REG_WRITE(&dregs->txdesc, 0);
2947        REG_WRITE(&dregs->rxdesc, 0);
2948}
2949
2950/* Hardware Action:
2951 *  - stop DMA
2952 *  - do not bring down the link (RMAP may be active)
2953 *  - RMAP settings untouched (RMAP may be active)
2954 *  - port select untouched (RMAP may be active)
2955 *  - timecodes are disabled
2956 *  - IRQ generation disabled
2957 *  - status not cleared (let user analyze it if requested later on)
2958 *  - Node address / First DMA channels Node address
2959 *    is untouched (RMAP may be active)
2960 */
2961STATIC void grspw_hw_stop(struct grspw_priv *priv)
2962{
2963        int i;
2964        unsigned int ctrl;
2965        SPIN_IRQFLAGS(irqflags);
2966
2967        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2968
2969        for (i=0; i<priv->hwsup.ndma_chans; i++)
2970                grspw_hw_dma_stop(&priv->dma[i]);
2971
2972        ctrl = REG_READ(&priv->regs->ctrl);
2973        REG_WRITE(&priv->regs->ctrl, ctrl & (
2974                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2975                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2976                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2977
2978        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2979}
2980
2981/* Soft reset of GRSPW core registers */
2982STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2983{
2984        int i;
2985        unsigned int tmp;
2986
2987        for (i=0; i<priv->hwsup.ndma_chans; i++)
2988                grspw_hw_dma_softreset(&priv->dma[i]);
2989
2990        REG_WRITE(&priv->regs->status, 0xffffffff);
2991        REG_WRITE(&priv->regs->time, 0);
2992        /* Clear all but valuable reset values of ICCTRL */
2993        tmp = REG_READ(&priv->regs->icctrl);
2994        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2995        tmp |= GRSPW_ICCTRL_ID;
2996        REG_WRITE(&priv->regs->icctrl, tmp);
2997        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2998        REG_WRITE(&priv->regs->icack, 0xffffffff);
2999        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
3000}
3001
3002int grspw_dev_count(void)
3003{
3004        return grspw_count;
3005}
3006
3007void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
3008{
3009        int i;
3010        struct grspw_priv *priv;
3011
3012        /* Set new Device Found Handler */
3013        grspw_dev_add = devfound;
3014        grspw_dev_del = devremove;
3015
3016        if (grspw_initialized == 1 && grspw_dev_add) {
3017                /* Call callback for every previously found device */
3018                for (i=0; i<grspw_count; i++) {
3019                        priv = priv_tab[i];
3020                        if (priv)
3021                                priv->data = grspw_dev_add(i);
3022                }
3023        }
3024}
3025
3026/* Get a value at least 6.4us in number of clock cycles */
3027static unsigned int grspw1_calc_timer64(int freq_khz)
3028{
3029        unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
3030        return timer64 & 0xfff;
3031}
3032
3033/* Get a value at least 850ns in number of clock cycles - 3 */
3034static unsigned int grspw1_calc_discon(int freq_khz)
3035{
3036        unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
3037        return discon & 0x3ff;
3038}
3039
3040/******************* Driver manager interface ***********************/
3041
3042/* Driver prototypes */
3043static int grspw_common_init(void);
3044static int grspw2_init3(struct drvmgr_dev *dev);
3045
3046static struct drvmgr_drv_ops grspw2_ops =
3047{
3048        .init = {NULL,  NULL, grspw2_init3, NULL},
3049        .remove = NULL,
3050        .info = NULL
3051};
3052
3053static struct amba_dev_id grspw2_ids[] =
3054{
3055        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
3056        {VENDOR_GAISLER, GAISLER_SPW2},
3057        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
3058        {0, 0}          /* Mark end of table */
3059};
3060
3061static struct amba_drv_info grspw2_drv_info =
3062{
3063        {
3064                DRVMGR_OBJ_DRV,                 /* Driver */
3065                NULL,                           /* Next driver */
3066                NULL,                           /* Device list */
3067                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
3068                "GRSPW_PKT_DRV",                /* Driver Name */
3069                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
3070                &grspw2_ops,
3071                NULL,                           /* Funcs */
3072                0,                              /* No devices yet */
3073                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
3074        },
3075        &grspw2_ids[0]
3076};
3077
3078void grspw2_register_drv (void)
3079{
3080        GRSPW_DBG("Registering GRSPW2 packet driver\n");
3081        drvmgr_drv_register(&grspw2_drv_info.general);
3082}
3083
3084static int grspw2_init3(struct drvmgr_dev *dev)
3085{
3086        struct grspw_priv *priv;
3087        struct amba_dev_info *ambadev;
3088        struct ambapp_core *pnpinfo;
3089        int i, size;
3090        unsigned int ctrl, icctrl, numi;
3091        union drvmgr_key_value *value;
3092
3093        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
3094                dev->parent->dev->name);
3095
3096        if (grspw_count >= GRSPW_MAX)
3097                return DRVMGR_ENORES;
3098
3099        priv = dev->priv;
3100        if (priv == NULL)
3101                return DRVMGR_NOMEM;
3102        priv->dev = dev;
3103
3104        /* If first device init common part of driver */
3105        if (grspw_common_init())
3106                return DRVMGR_FAIL;
3107
3108        /*** Now we take care of device initialization ***/
3109
3110        /* Get device information from AMBA PnP information */
3111        ambadev = (struct amba_dev_info *)dev->businfo;
3112        if (ambadev == NULL)
3113                return -1;
3114        pnpinfo = &ambadev->info;
3115        priv->irq = pnpinfo->irq;
3116        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
3117
3118        /* Read Hardware Support from Control Register */
3119        ctrl = REG_READ(&priv->regs->ctrl);
3120        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
3121        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
3122        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
3123        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
3124        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
3125        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
3126        icctrl = REG_READ(&priv->regs->icctrl);
3127        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
3128        if (numi > 0)
3129                priv->hwsup.irq_num = 1 << (numi - 1);
3130        else
3131                priv->hwsup.irq_num = 0;
3132
3133        /* Construct hardware version identification */
3134        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
3135
3136        if ((pnpinfo->device == GAISLER_SPW2) ||
3137            (pnpinfo->device == GAISLER_SPW2_DMA)) {
3138                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3139                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3140        } else {
3141                unsigned int apb_hz, apb_khz;
3142
3143                /* Autodetect GRSPW1 features? */
3144                priv->hwsup.strip_adr = 0;
3145                priv->hwsup.strip_pid = 0;
3146
3147                drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
3148                apb_khz = apb_hz / 1000;
3149
3150                REG_WRITE(&priv->regs->timer,
3151                        ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
3152                        (grspw1_calc_timer64(apb_khz) & 0xFFF));
3153        }
3154
3155        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3156         * width... so only the first is probed, if no timer result will be
3157         * zero.
3158         */
3159        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3160        ctrl = REG_READ(&priv->regs->icrlpresc);
3161        REG_WRITE(&priv->regs->icrlpresc, 0);
3162        priv->hwsup.itmr_width = 0;
3163        while (ctrl & 1) {
3164                priv->hwsup.itmr_width++;
3165                ctrl = ctrl >> 1;
3166        }
3167
3168        /* Let user limit the number of DMA channels on this core to save
3169         * space. Only the first nDMA channels will be available.
3170         */
3171        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
3172        if (value && (value->i < priv->hwsup.ndma_chans))
3173                priv->hwsup.ndma_chans = value->i;
3174
3175        /* Allocate and init Memory for all DMA channels */
3176        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
3177        priv->dma = (struct grspw_dma_priv *) malloc(size);
3178        if (priv->dma == NULL)
3179                return DRVMGR_NOMEM;
3180        memset(priv->dma, 0, size);
3181        for (i=0; i<priv->hwsup.ndma_chans; i++) {
3182                priv->dma[i].core = priv;
3183                priv->dma[i].index = i;
3184                priv->dma[i].regs = &priv->regs->dma[i];
3185        }
3186
3187        /* Startup Action:
3188         *  - stop DMA
3189         *  - do not bring down the link (RMAP may be active)
3190         *  - RMAP settings untouched (RMAP may be active)
3191         *  - port select untouched (RMAP may be active)
3192         *  - timecodes are diabled
3193         *  - IRQ generation disabled
3194         *  - status cleared
3195         *  - Node address / First DMA channels Node address
3196         *    is untouched (RMAP may be active)
3197         */
3198        grspw_hw_stop(priv);
3199        grspw_hw_softreset(priv);
3200
3201        /* Register character device in registered region */
3202        priv->index = grspw_count;
3203        priv_tab[priv->index] = priv;
3204        grspw_count++;
3205
3206        /* Device name */
3207        sprintf(priv->devname, "grspw%d", priv->index);
3208
3209        /* Tell above layer about new device */
3210        if (grspw_dev_add)
3211                priv->data = grspw_dev_add(priv->index);
3212
3213        return DRVMGR_OK;
3214}
3215
3216/******************* Driver Implementation ***********************/
3217/* Creates a MsgQ (optional) and spawns a worker task associated with the
3218 * message Q. The task can also be associated with a custom msgQ if *msgQ.
3219 * is non-zero.
3220 */
3221rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
3222{
3223        rtems_id tid;
3224        int created_msgq = 0;
3225        static char work_name = 'A';
3226
3227        if (pMsgQ == NULL)
3228                return OBJECTS_ID_NONE;
3229
3230        if (*pMsgQ == OBJECTS_ID_NONE) {
3231                if (msgMax <= 0)
3232                        msgMax = 32;
3233
3234                if (rtems_message_queue_create(
3235                        rtems_build_name('S', 'G', 'Q', work_name),
3236                        msgMax, 4, RTEMS_FIFO, pMsgQ) !=
3237                        RTEMS_SUCCESSFUL)
3238                        return OBJECTS_ID_NONE;
3239                created_msgq = 1;
3240        }
3241
3242        if (prio < 0)
3243                prio = grspw_work_task_priority; /* default prio */
3244        if (stack < 0x800)
3245                stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
3246
3247        if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
3248                prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
3249                RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
3250                tid = OBJECTS_ID_NONE;
3251        else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
3252                    RTEMS_SUCCESSFUL) {
3253                rtems_task_delete(tid);
3254                tid = OBJECTS_ID_NONE;
3255        }
3256
3257        if (tid == OBJECTS_ID_NONE && created_msgq) {
3258                rtems_message_queue_delete(*pMsgQ);
3259                *pMsgQ = OBJECTS_ID_NONE;
3260        } else {
3261                if (++work_name > 'Z')
3262                        work_name = 'A';
3263        }
3264        return tid;
3265}
3266
3267/* Free task associated with message queue and optionally also the message
3268 * queue itself. The message queue is deleted by the work task and is therefore
3269 * delayed until it the work task resumes its execution.
3270 */
3271rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
3272{
3273        int msg = WORK_QUIT_TASK;
3274        if (freeMsgQ)
3275                msg |= WORK_FREE_MSGQ;
3276        return rtems_message_queue_send(msgQ, &msg, 4);
3277}
3278
3279void grspw_work_cfg(void *d, struct grspw_work_config *wc)
3280{
3281        struct grspw_priv *priv = (struct grspw_priv *)d;
3282
3283        if (wc == NULL)
3284                wc = &grspw_wc_def; /* use default config */
3285        priv->wc = *wc;
3286}
3287
3288#ifdef RTEMS_SMP
3289int grspw_isr_affinity(void *d, const cpu_set_t *cpus)
3290{
3291        return -1; /* BSP support only static configured IRQ affinity */
3292}
3293#endif
3294
3295static int grspw_common_init(void)
3296{
3297        if (grspw_initialized == 1)
3298                return 0;
3299        if (grspw_initialized == -1)
3300                return -1;
3301        grspw_initialized = -1;
3302
3303        /* Device Semaphore created with count = 1 */
3304        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3305            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3306            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3307            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3308                return -1;
3309
3310        /* Work queue, Work thread. Not created if user disables it.
3311         * user can disable it when interrupt is not used to save resources
3312         */
3313        if (grspw_work_task_priority != -1) {
3314                grspw_work_task = grspw_work_spawn(-1, 0,
3315                        (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
3316                if (grspw_work_task == OBJECTS_ID_NONE)
3317                        return -2;
3318                grspw_wc_def.msgisr =
3319                        (grspw_msgqisr_t) rtems_message_queue_send;
3320        } else {
3321                grspw_wc_def.msgisr = NULL;
3322                grspw_wc_def.msgisr_arg = NULL;
3323        }
3324
3325        grspw_initialized = 1;
3326        return 0;
3327}
Note: See TracBrowser for help on using the repository browser.