source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ 77856f6

5
Last change on this file since 77856f6 was 77856f6, checked in by Daniel Hellstrom <daniel@…>, on 03/30/16 at 12:29:52

leon, grspw_pkt: allow user controlled DMA intr

The user has already the power to control which DMA buffer
will generate interrupt, but no clean way to enable RX/TX
interrupts on DMA channel. Without this patch the user had
to init DMA config rx/tx_irq_en_cnt to a very large value.

  • Property mode set to 100644
File size: 84.1 KB
Line 
1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
29#include <bsp/grspw_pkt.h>
30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
117#define GRSPW_CTRL_TF_BIT       12
118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
269#define GRSPW_ICCTRL_INUM       (0x3f << GRSPW_ICCTRL_INUM_BIT)
270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
337#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
338#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
339#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
340#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
341
342/* Memory and HW Registers Access routines. All 32-bit access routines */
343#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
344/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
345#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
346#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
347#define REG_READ(addr) (*(volatile unsigned int *)(addr))
348
349struct grspw_ring {
350        struct grspw_ring *next;        /* Next Descriptor */
351        union {
352                struct grspw_txbd *tx;  /* Descriptor Address */
353                struct grspw_rxbd *rx;  /* Descriptor Address */
354        } bd;
355        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
356};
357
358/* An entry in the TX descriptor Ring */
359struct grspw_txring {
360        struct grspw_txring *next;      /* Next Descriptor */
361        struct grspw_txbd *bd;          /* Descriptor Address */
362        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
363};
364
365/* An entry in the RX descriptor Ring */
366struct grspw_rxring {
367        struct grspw_rxring *next;      /* Next Descriptor */
368        struct grspw_rxbd *bd;          /* Descriptor Address */
369        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
370};
371
372
373struct grspw_dma_priv {
374        struct grspw_priv *core;        /* GRSPW Core */
375        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
376        int index;                      /* DMA Channel Index @ GRSPW core */
377        int open;                       /* DMA Channel opened by user */
378        int started;                    /* DMA Channel activity (start|stop) */
379        rtems_id sem_dma;               /* DMA Channel Semaphore */
380        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
381        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
382
383        /*** RX ***/
384
385        /* RX Descriptor Ring */
386        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
387        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
388        struct grspw_rxring *rx_ring_base;
389        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
390        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
391        int rx_irq_en_cnt_curr;
392        struct {
393                int waiting;
394                int ready_cnt;
395                int op;
396                int recv_cnt;
397                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
398        } rx_wait;
399
400        /* Queue of Packets READY to be scheduled */
401        struct grspw_list ready;
402        int ready_cnt;
403
404        /* Scheduled RX Packets Queue */
405        struct grspw_list rx_sched;
406        int rx_sched_cnt;
407
408        /* Queue of Packets that has been RECIEVED */
409        struct grspw_list recv;
410        int recv_cnt;
411
412
413        /*** TX ***/
414
415        /* TX Descriptor Ring */
416        struct grspw_txbd *tx_bds;              /* Descriptor Address */
417        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
418        struct grspw_txring *tx_ring_base;
419        struct grspw_txring *tx_ring_head;
420        struct grspw_txring *tx_ring_tail;
421        int tx_irq_en_cnt_curr;
422        struct {
423                int waiting;
424                int send_cnt;
425                int op;
426                int sent_cnt;
427                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
428        } tx_wait;
429
430        /* Queue of Packets ready to be scheduled for transmission */
431        struct grspw_list send;
432        int send_cnt;
433
434        /* Scheduled TX Packets Queue */
435        struct grspw_list tx_sched;
436        int tx_sched_cnt;
437
438        /* Queue of Packets that has been SENT */
439        struct grspw_list sent;
440        int sent_cnt;
441};
442
443struct grspw_priv {
444        char devname[8];                /* Device name "grspw%d" */
445        struct drvmgr_dev *dev;         /* Device */
446        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
447        int irq;                        /* AMBA IRQ number of core */
448        int index;                      /* Index in order it was probed */
449        int core_index;                 /* Core Bus Index */
450        int open;                       /* If Device is alrady opened (=1) or not (=0) */
451        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
452
453        /* Features supported by Hardware */
454        struct grspw_hw_sup hwsup;
455
456        /* Pointer to an array of Maximally 4 DMA Channels */
457        struct grspw_dma_priv *dma;
458
459        /* Spin-lock ISR protection */
460        SPIN_DECLARE(devlock);
461
462        /* Descriptor Memory Area for TX & RX and all DMA channels */
463        unsigned int bd_mem;
464        unsigned int bd_mem_alloced;
465
466        /*** Time Code Handling ***/
467        void (*tcisr)(void *data, int timecode);
468        void *tcisr_arg;
469
470        /*** Interrupt-code Handling ***/
471        spwpkt_ic_isr_t icisr;
472        void *icisr_arg;
473
474        /* Bit mask representing events which shall cause link disable. */
475        unsigned int dis_link_on_err;
476
477        /* Bit mask for link status bits to clear by ISR */
478        unsigned int stscfg;
479
480        /* "Core Global" Statistics gathered, not dependent on DMA channel */
481        struct grspw_core_stats stats;
482};
483
484int grspw_initialized = 0;
485int grspw_count = 0;
486struct workqueue_struct *grspw_workq = NULL;
487rtems_id grspw_sem;
488static struct grspw_priv *priv_tab[GRSPW_MAX];
489
490/* callback to upper layer when devices are discovered/removed */
491void *(*grspw_dev_add)(int) = NULL;
492void (*grspw_dev_del)(int,void*) = NULL;
493
494/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
495 * the work-task and work-queue to save space.
496 */
497int grspw_work_task_priority __attribute__((weak)) = 100;
498int grspw_task_stop = 0;
499rtems_id grspw_work_task;
500rtems_id grspw_work_queue = 0;
501#define WORK_NONE         0
502#define WORK_SHUTDOWN     0x100
503#define WORK_DMA(channel) (0x1 << (channel))
504#define WORK_DMA_MASK     0xf /* max 4 channels */
505#define WORK_CORE_BIT     16
506#define WORK_CORE_MASK    0xffff
507#define WORK_CORE(device) ((device) << WORK_CORE_BIT)
508
509STATIC void grspw_hw_stop(struct grspw_priv *priv);
510STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
511STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
512STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
513STATIC void grspw_isr(void *data);
514
515void *grspw_open(int dev_no)
516{
517        struct grspw_priv *priv;
518        unsigned int bdtabsize, hwa;
519        int i;
520        union drvmgr_key_value *value;
521
522        if (grspw_initialized != 1 || (dev_no >= grspw_count))
523                return NULL;
524
525        priv = priv_tab[dev_no];
526
527        /* Take GRSPW lock - Wait until we get semaphore */
528        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
529            != RTEMS_SUCCESSFUL)
530                return NULL;
531
532        if (priv->open) {
533                priv = NULL;
534                goto out;
535        }
536
537        /* Initialize Spin-lock for GRSPW Device. This is to protect
538         * CTRL and DMACTRL registers from ISR.
539         */
540        SPIN_INIT(&priv->devlock);
541
542        priv->tcisr = NULL;
543        priv->tcisr_arg = NULL;
544        priv->icisr = NULL;
545        priv->icisr_arg = NULL;
546        priv->stscfg = LINKSTS_MASK;
547
548        grspw_stats_clr(priv);
549
550        /* Allocate TX & RX Descriptor memory area for all DMA
551         * channels. Max-size descriptor area is allocated (or user assigned):
552         *  - 128 RX descriptors per DMA Channel
553         *  - 64 TX descriptors per DMA Channel
554         * Specified address must be in CPU RAM.
555         */
556        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
557        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
558        if (value) {
559                priv->bd_mem = value->i;
560                priv->bd_mem_alloced = 0;
561                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
562                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
563                                  priv->index);
564                        priv = NULL;
565                        goto out;
566                }
567        } else {
568                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
569                if (priv->bd_mem_alloced == 0) {
570                        priv = NULL;
571                        goto out;
572                }
573                /* Align memory */
574                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
575                               ~(BDTAB_ALIGN-1);
576        }
577
578        /* Translate into DMA address that HW can use to access DMA
579         * descriptors
580         */
581        drvmgr_translate_check(
582                priv->dev,
583                CPUMEM_TO_DMA,
584                (void *)priv->bd_mem,
585                (void **)&hwa,
586                bdtabsize);
587
588        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
589                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
590        for (i=0; i<priv->hwsup.ndma_chans; i++) {
591                /* Do DMA Channel Init, other variables etc. are inited
592                 * when respective DMA channel is opened.
593                 *
594                 * index & core are initialized by probe function.
595                 */
596                priv->dma[i].open = 0;
597                priv->dma[i].rx_bds = (struct grspw_rxbd *)
598                        (priv->bd_mem + i*BDTAB_SIZE*2);
599                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
600                        (hwa + BDTAB_SIZE*(2*i));
601                priv->dma[i].tx_bds = (struct grspw_txbd *)
602                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
603                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
604                        (hwa + BDTAB_SIZE*(2*i+1));
605                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
606                        i,
607                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
608                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
609                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
610                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
611        }
612
613        /* Basic initialization of hardware, clear some registers but
614         * keep Link/RMAP/Node-Address registers intact.
615         */
616        grspw_hw_stop(priv);
617
618        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
619        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
620
621        /* Take the device */
622        priv->open = 1;
623out:
624        rtems_semaphore_release(grspw_sem);
625        return priv;
626}
627
628int grspw_close(void *d)
629{
630        struct grspw_priv *priv = d;
631        int i;
632
633        /* Take GRSPW lock - Wait until we get semaphore */
634        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
635            != RTEMS_SUCCESSFUL)
636                return -1;
637
638        /* Check that user has stopped and closed all DMA channels
639         * appropriately. At this point the Hardware shall not be doing DMA
640         * or generating Interrupts. We want HW in a "startup-state".
641         */
642        for (i=0; i<priv->hwsup.ndma_chans; i++) {
643                if (priv->dma[i].open) {
644                        rtems_semaphore_release(grspw_sem);
645                        return 1;
646                }
647        }
648        grspw_hw_stop(priv);
649
650        /* Mark not open */
651        priv->open = 0;
652        rtems_semaphore_release(grspw_sem);
653        return 0;
654}
655
656void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
657{
658        struct grspw_priv *priv = d;
659
660        *hw = priv->hwsup;
661}
662
663void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
664{
665        struct grspw_priv *priv = d;
666        struct grspw_regs *regs = priv->regs;
667        unsigned int ctrl, nodeaddr;
668        IRQFLAGS_TYPE irqflags;
669        int i;
670
671        if (!priv || !cfg)
672                return;
673
674        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
675
676        if (cfg->promiscuous != -1) {
677                /* Set Configuration */
678                ctrl = REG_READ(&regs->ctrl);
679                if (cfg->promiscuous)
680                        ctrl |= GRSPW_CTRL_PM;
681                else
682                        ctrl &= ~GRSPW_CTRL_PM;
683                REG_WRITE(&regs->ctrl, ctrl);
684                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
685
686                for (i=0; i<priv->hwsup.ndma_chans; i++) {
687                        ctrl = REG_READ(&regs->dma[i].ctrl);
688                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
689                        if (cfg->dma_nacfg[i].node_en) {
690                                ctrl |= GRSPW_DMACTRL_EN;
691                                REG_WRITE(&regs->dma[i].addr,
692                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
693                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
694                        } else {
695                                ctrl &= ~GRSPW_DMACTRL_EN;
696                        }
697                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
698                }
699        }
700
701        /* Read Current Configuration */
702        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
703        nodeaddr = REG_READ(&regs->nodeaddr);
704        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
705        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
706        for (i=0; i<priv->hwsup.ndma_chans; i++) {
707                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
708                                                GRSPW_DMACTRL_EN;
709                ctrl = REG_READ(&regs->dma[i].addr);
710                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
711                                                GRSPW_DMAADR_ADDR_BIT;
712                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
713                                                GRSPW_DMAADR_MASK_BIT;
714        }
715        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
716        for (; i<4; i++) {
717                cfg->dma_nacfg[i].node_en = 0;
718                cfg->dma_nacfg[i].node_addr = 0;
719                cfg->dma_nacfg[i].node_mask = 0;
720        }
721}
722
723/* Return Current Status Register */
724unsigned int grspw_link_status(void *d)
725{
726        struct grspw_priv *priv = d;
727
728        return REG_READ(&priv->regs->status);
729}
730
731/* Clear Status Register bits */
732void grspw_link_status_clr(void *d, unsigned int mask)
733{
734        struct grspw_priv *priv = d;
735
736        REG_WRITE(&priv->regs->status, mask);
737}
738
739/* Return Current Link State */
740spw_link_state_t grspw_link_state(void *d)
741{
742        struct grspw_priv *priv = d;
743        unsigned int status = REG_READ(&priv->regs->status);
744
745        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
746}
747
748/* Enable Global IRQ only if some irq source is set */
749static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
750{
751        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
752                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
753}
754
755
756/* options and clkdiv [in/out]: set to -1 to only read current config */
757void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
758{
759        struct grspw_priv *priv = d;
760        struct grspw_regs *regs = priv->regs;
761        unsigned int ctrl;
762        IRQFLAGS_TYPE irqflags;
763
764        /* Write? */
765        if (clkdiv) {
766                if (*clkdiv != -1)
767                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
768                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
769        }
770        if (options) {
771                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
772                ctrl = REG_READ(&regs->ctrl);
773                if (*options != -1) {
774                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
775                                (*options & GRSPW_LINK_CFG);
776
777                        /* Enable Global IRQ only if some irq source is set */
778                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
779                                ctrl |= GRSPW_CTRL_IE;
780                        else
781                                ctrl &= ~GRSPW_CTRL_IE;
782
783                        REG_WRITE(&regs->ctrl, ctrl);
784                        /* Store the link disable events for use in
785                        ISR. The LINKOPTS_DIS_ON_* options are actually the
786                        corresponding bits in the status register, shifted
787                        by 16. */
788                        priv->dis_link_on_err = *options &
789                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
790                }
791                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
792                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
793        }
794        if (stscfg) {
795                if (*stscfg != -1) {
796                        priv->stscfg = *stscfg & LINKSTS_MASK;
797                }
798                *stscfg = priv->stscfg;
799        }
800}
801
802/* Generate Tick-In (increment Time Counter, Send Time Code) */
803void grspw_tc_tx(void *d)
804{
805        struct grspw_priv *priv = d;
806        struct grspw_regs *regs = priv->regs;
807        IRQFLAGS_TYPE irqflags;
808
809        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
810        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
811        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
812}
813
814void grspw_tc_ctrl(void *d, int *options)
815{
816        struct grspw_priv *priv = d;
817        struct grspw_regs *regs = priv->regs;
818        unsigned int ctrl;
819        IRQFLAGS_TYPE irqflags;
820
821        if (options == NULL)
822                return;
823
824        /* Write? */
825        if (*options != -1) {
826                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
827                ctrl = REG_READ(&regs->ctrl);
828                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
829                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
830
831                /* Enable Global IRQ only if some irq source is set */
832                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
833                        ctrl |= GRSPW_CTRL_IE;
834                else
835                        ctrl &= ~GRSPW_CTRL_IE;
836
837                REG_WRITE(&regs->ctrl, ctrl);
838                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
839        } else
840                ctrl = REG_READ(&regs->ctrl);
841        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
842}
843
844/* Assign ISR Function to TimeCode RX IRQ */
845void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
846{
847        struct grspw_priv *priv = d;
848
849        priv->tcisr_arg = data;
850        priv->tcisr = tcisr;
851}
852
853/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
854 * TCTRL   = bits 7 and 6
855 * TIMECNT = bits 5 to 0
856 */
857void grspw_tc_time(void *d, int *time)
858{
859        struct grspw_priv *priv = d;
860        struct grspw_regs *regs = priv->regs;
861
862        if (time == NULL)
863                return;
864        if (*time != -1)
865                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
866        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
867}
868
869/* Generate Tick-In for the given Interrupt-code and check for generation
870 * error.
871 *
872 * Returns zero on success and non-zero on failure
873 */
874int grspw_ic_tickin(void *d, int ic)
875{
876        struct grspw_priv *priv = d;
877        struct grspw_regs *regs = priv->regs;
878        IRQFLAGS_TYPE irqflags;
879        unsigned int icctrl, mask;
880
881        /* Prepare before turning off IRQ */
882        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
883        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
884             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
885
886        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
887        icctrl = REG_READ(&regs->icctrl);
888        icctrl &= ~mask;
889        icctrl |= ic;
890        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
891        /* the ID bit is valid after two clocks, so we not to wait here */
892        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
893        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
894
895        return icctrl & GRSPW_ICCTRL_ID;
896}
897
898#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
899#define ICOPTS_ICCTRL_MASK                                              \
900        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
901         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
902         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
903         ICOPTS_BASEIRQ)
904
905/* Control Interrupt-code settings of core
906 * Write if not pointing to -1, always read current value
907 *
908 * TODO: A lot of code duplication with grspw_tc_ctrl
909 */
910void grspw_ic_ctrl(void *d, unsigned int *options)
911{
912        struct grspw_priv *priv = d;
913        struct grspw_regs *regs = priv->regs;
914        unsigned int ctrl;
915        unsigned int icctrl;
916        IRQFLAGS_TYPE irqflags;
917
918        if (options == NULL)
919                return;
920
921        if (*options != -1) {
922                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
923
924                ctrl = REG_READ(&regs->ctrl);
925                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
926                                         * irqopts bits and ctrl bits */
927                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
928                        (GRSPW_CTRL_TF_BIT - 0);
929
930                icctrl = REG_READ(&regs->icctrl);
931                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
932                                                * irqopts bits and icctrl bits */
933                icctrl |= *options & ICOPTS_ICCTRL_MASK;
934
935                /* Enable Global IRQ only if some irq source is set */
936                if (grspw_is_irqsource_set(ctrl, icctrl))
937                        ctrl |= GRSPW_CTRL_IE;
938                else
939                        ctrl &= ~GRSPW_CTRL_IE;
940
941                REG_WRITE(&regs->ctrl, ctrl);
942                REG_WRITE(&regs->icctrl, icctrl);
943                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
944        }
945        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
946                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
947}
948
949void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
950{
951        struct grspw_priv *priv = d;
952        struct grspw_regs *regs = priv->regs;
953
954        if (!cfg)
955                return;
956
957        if (rw & 1) {
958                REG_WRITE(&regs->ictickomask, cfg->tomask);
959                REG_WRITE(&regs->icaamask, cfg->aamask);
960                REG_WRITE(&regs->icrlpresc, cfg->scaler);
961                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
962                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
963        }
964        if (rw & 2) {
965                cfg->tomask = REG_READ(&regs->ictickomask);
966                cfg->aamask = REG_READ(&regs->icaamask);
967                cfg->scaler = REG_READ(&regs->icrlpresc);
968                cfg->isr_reload = REG_READ(&regs->icrlisr);
969                cfg->ack_reload = REG_READ(&regs->icrlintack);
970        }
971}
972
973/* Read or Write Interrupt-code status registers */
974void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
975{
976        struct grspw_priv *priv = d;
977        struct grspw_regs *regs = priv->regs;
978
979        /* No locking needed since the status bits are clear-on-write */
980
981        if (rxirq) {
982                if (*rxirq != 0)
983                        REG_WRITE(&regs->icrx, *rxirq);
984                else
985                        *rxirq = REG_READ(&regs->icrx);
986        }
987
988        if (rxack) {
989                if (*rxack != 0)
990                        REG_WRITE(&regs->icack, *rxack);
991                else
992                        *rxack = REG_READ(&regs->icack);
993        }
994
995        if (intto) {
996                if (*intto != 0)
997                        REG_WRITE(&regs->ictimeout, *intto);
998                else
999                        *intto = REG_READ(&regs->ictimeout);
1000        }
1001}
1002
1003/* Assign handler function to Interrupt-code tick out IRQ */
1004void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1005{
1006        struct grspw_priv *priv = d;
1007
1008        priv->icisr_arg = data;
1009        priv->icisr = handler;
1010}
1011
1012/* Set (not -1) and/or read RMAP options. */
1013int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1014{
1015        struct grspw_priv *priv = d;
1016        struct grspw_regs *regs = priv->regs;
1017        unsigned int ctrl;
1018        IRQFLAGS_TYPE irqflags;
1019
1020        if (dstkey) {
1021                if (*dstkey != -1)
1022                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1023                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1024        }
1025        if (options) {
1026                if (*options != -1) {
1027                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1028                                return -1;
1029
1030
1031                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1032                        ctrl = REG_READ(&regs->ctrl);
1033                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1034                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1035                        REG_WRITE(&regs->ctrl, ctrl);
1036                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1037                }
1038                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1039        }
1040
1041        return 0;
1042}
1043
1044void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1045{
1046        struct grspw_priv *priv = d;
1047
1048        if (rmap)
1049                *rmap = priv->hwsup.rmap;
1050        if (rmap_crc)
1051                *rmap_crc = priv->hwsup.rmap_crc;
1052}
1053
1054/* Select port, if
1055 * -1=The current selected port is returned
1056 * 0=Port 0
1057 * 1=Port 1
1058 * Others=Both Port0 and Port1
1059 */
1060int grspw_port_ctrl(void *d, int *port)
1061{
1062        struct grspw_priv *priv = d;
1063        struct grspw_regs *regs = priv->regs;
1064        unsigned int ctrl;
1065        IRQFLAGS_TYPE irqflags;
1066
1067        if (port == NULL)
1068                return -1;
1069
1070        if ((*port == 1) || (*port == 0)) {
1071                /* Select port user selected */
1072                if ((*port == 1) && (priv->hwsup.nports < 2))
1073                        return -1; /* Changing to Port 1, but only one port available */
1074                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1075                ctrl = REG_READ(&regs->ctrl);
1076                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1077                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1078                REG_WRITE(&regs->ctrl, ctrl);
1079                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1080        } else if (*port > 1) {
1081                /* Select both ports */
1082                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1083                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1084                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1085        }
1086
1087        /* Get current settings */
1088        ctrl = REG_READ(&regs->ctrl);
1089        if (ctrl & GRSPW_CTRL_NP) {
1090                /* Any port, selected by hardware */
1091                if (priv->hwsup.nports > 1)
1092                        *port = 3;
1093                else
1094                        *port = 0; /* Port0 the only port available */
1095        } else {
1096                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1097        }
1098
1099        return 0;
1100}
1101
1102/* Returns Number ports available in hardware */
1103int grspw_port_count(void *d)
1104{
1105        struct grspw_priv *priv = d;
1106
1107        return priv->hwsup.nports;
1108}
1109
1110/* Current active port: 0 or 1 */
1111int grspw_port_active(void *d)
1112{
1113        struct grspw_priv *priv = d;
1114        unsigned int status;
1115
1116        status = REG_READ(&priv->regs->status);
1117
1118        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1119}
1120
1121void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1122{
1123        struct grspw_priv *priv = d;
1124
1125        if (sts == NULL)
1126                return;
1127        memcpy(sts, &priv->stats, sizeof(priv->stats));
1128}
1129
1130void grspw_stats_clr(void *d)
1131{
1132        struct grspw_priv *priv = d;
1133
1134        /* Clear most of the statistics */     
1135        memset(&priv->stats, 0, sizeof(priv->stats));
1136}
1137
1138/*** DMA Interface ***/
1139
1140/* Initialize the RX and TX Descriptor Ring, empty of packets */
1141STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1142{
1143        struct grspw_ring *r;
1144        int i;
1145
1146        /* Empty BD rings */
1147        dma->rx_ring_head = dma->rx_ring_base;
1148        dma->rx_ring_tail = dma->rx_ring_base;
1149        dma->tx_ring_head = dma->tx_ring_base;
1150        dma->tx_ring_tail = dma->tx_ring_base;
1151
1152        /* Init RX Descriptors */
1153        r = (struct grspw_ring *)dma->rx_ring_base;
1154        for (i=0; i<GRSPW_RXBD_NR; i++) {
1155
1156                /* Init Ring Entry */
1157                r[i].next = &r[i+1];
1158                r[i].bd.rx = &dma->rx_bds[i];
1159                r[i].pkt = NULL;
1160
1161                /* Init HW Descriptor */
1162                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1163                BD_WRITE(&r[i].bd.rx->addr, 0);
1164        }
1165        r[GRSPW_RXBD_NR-1].next = &r[0];
1166
1167        /* Init TX Descriptors */
1168        r = (struct grspw_ring *)dma->tx_ring_base;
1169        for (i=0; i<GRSPW_TXBD_NR; i++) {
1170
1171                /* Init Ring Entry */
1172                r[i].next = &r[i+1];
1173                r[i].bd.tx = &dma->tx_bds[i];
1174                r[i].pkt = NULL;
1175
1176                /* Init HW Descriptor */
1177                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1178                BD_WRITE(&r[i].bd.tx->haddr, 0);
1179                BD_WRITE(&r[i].bd.tx->dlen, 0);
1180                BD_WRITE(&r[i].bd.tx->daddr, 0);
1181        }
1182        r[GRSPW_TXBD_NR-1].next = &r[0];
1183}
1184
1185/* Try to populate descriptor ring with as many as possible READY unused packet
1186 * buffers. The packets assigned with to a descriptor are put in the end of
1187 * the scheduled list.
1188 *
1189 * The number of Packets scheduled is returned.
1190 *
1191 *  - READY List -> RX-SCHED List
1192 *  - Descriptors are initialized and enabled for reception
1193 */
1194STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1195{
1196        int cnt;
1197        unsigned int ctrl, dmactrl;
1198        void *hwaddr;
1199        struct grspw_rxring *curr_bd;
1200        struct grspw_pkt *curr_pkt, *last_pkt;
1201        struct grspw_list lst;
1202        IRQFLAGS_TYPE irqflags;
1203
1204        /* Is Ready Q empty? */
1205        if (grspw_list_is_empty(&dma->ready))
1206                return 0;
1207
1208        cnt = 0;
1209        lst.head = curr_pkt = dma->ready.head;
1210        curr_bd = dma->rx_ring_head;
1211        while (!curr_bd->pkt) {
1212
1213                /* Assign Packet to descriptor */
1214                curr_bd->pkt = curr_pkt;
1215
1216                /* Prepare descriptor address. */
1217                hwaddr = curr_pkt->data;
1218                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1219                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1220                                         hwaddr, &hwaddr);
1221                        if (curr_pkt->data == hwaddr) /* translation needed? */
1222                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1223                }
1224                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1225
1226                ctrl = GRSPW_RXBD_EN;
1227                if (curr_bd->next == dma->rx_ring_base) {
1228                        /* Wrap around (only needed when smaller descriptor
1229                         * table)
1230                         */
1231                        ctrl |= GRSPW_RXBD_WR;
1232                }
1233
1234                /* Is this Packet going to be an interrupt Packet? */
1235                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1236                        if (dma->cfg.rx_irq_en_cnt == 0) {
1237                                /* IRQ is disabled. A big number to avoid
1238                                 * equal to zero too often
1239                                 */
1240                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1241                        } else {
1242                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1243                                ctrl |= GRSPW_RXBD_IE;
1244                        }
1245                }
1246
1247                if (curr_pkt->flags & RXPKT_FLAG_IE)
1248                        ctrl |= GRSPW_RXBD_IE;
1249
1250                /* Enable descriptor */
1251                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1252
1253                last_pkt = curr_pkt;
1254                curr_bd = curr_bd->next;
1255                cnt++;
1256
1257                /* Get Next Packet from Ready Queue */
1258                if (curr_pkt == dma->ready.tail) {
1259                        /* Handled all in ready queue. */
1260                        curr_pkt = NULL;
1261                        break;
1262                }
1263                curr_pkt = curr_pkt->next;
1264        }
1265
1266        /* Has Packets been scheduled? */
1267        if (cnt > 0) {
1268                /* Prepare list for insertion/deleation */
1269                lst.tail = last_pkt;
1270
1271                /* Remove scheduled packets from ready queue */
1272                grspw_list_remove_head_list(&dma->ready, &lst);
1273                dma->ready_cnt -= cnt;
1274                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1275                        dma->stats.ready_cnt_min = dma->ready_cnt;
1276
1277                /* Insert scheduled packets into scheduled queue */
1278                grspw_list_append_list(&dma->rx_sched, &lst);
1279                dma->rx_sched_cnt += cnt;
1280                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1281                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1282
1283                /* Update TX ring posistion */
1284                dma->rx_ring_head = curr_bd;
1285
1286                /* Make hardware aware of the newly enabled descriptors
1287                 * We must protect from ISR which writes RI|TI
1288                 */
1289                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1290                dmactrl = REG_READ(&dma->regs->ctrl);
1291                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1292                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1293                REG_WRITE(&dma->regs->ctrl, dmactrl);
1294                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1295        }
1296
1297        return cnt;
1298}
1299
1300/* Scans the RX desciptor table for scheduled Packet that has been received,
1301 * and moves these Packet from the head of the scheduled queue to the
1302 * tail of the recv queue.
1303 *
1304 * Also, for all packets the status is updated.
1305 *
1306 *  - SCHED List -> SENT List
1307 *
1308 * Return Value
1309 * Number of packets moved
1310 */
1311STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1312{
1313        struct grspw_rxring *curr;
1314        struct grspw_pkt *last_pkt;
1315        int recv_pkt_cnt = 0;
1316        unsigned int ctrl;
1317        struct grspw_list lst;
1318
1319        curr = dma->rx_ring_tail;
1320
1321        /* Step into RX ring to find if packets have been scheduled for
1322         * reception.
1323         */
1324        if (!curr->pkt)
1325                return 0; /* No scheduled packets, thus no received, abort */
1326
1327        /* There has been Packets scheduled ==> scheduled Packets may have been
1328         * received and needs to be collected into RECV List.
1329         *
1330         * A temporary list "lst" with all received packets is created.
1331         */
1332        lst.head = curr->pkt;
1333
1334        /* Loop until first enabled "unrecveived" SpW Packet is found.
1335         * An unused descriptor is indicated by an unassigned pkt field.
1336         */
1337        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1338                /* Handle one received Packet */
1339
1340                /* Remember last handled Packet so that insertion/removal from
1341                 * Packet lists go fast.
1342                 */
1343                last_pkt = curr->pkt;
1344
1345                /* Get Length of Packet in bytes, and reception options */
1346                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1347
1348                /* Set flags to indicate error(s) and CRC information,
1349                 * and Mark Received.
1350                 */
1351                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1352                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1353                                  RXPKT_FLAG_RX;
1354
1355                /* Packet was Truncated? */
1356                if (ctrl & GRSPW_RXBD_TR)
1357                        dma->stats.rx_err_trunk++;
1358
1359                /* Error End-Of-Packet? */
1360                if (ctrl & GRSPW_RXBD_EP)
1361                        dma->stats.rx_err_endpkt++;
1362                curr->pkt = NULL; /* Mark descriptor unused */
1363
1364                /* Increment */
1365                curr = curr->next;
1366                recv_pkt_cnt++;
1367        }
1368
1369        /* 1. Remove all handled packets from scheduled queue
1370         * 2. Put all handled packets into recv queue
1371         */
1372        if (recv_pkt_cnt > 0) {
1373
1374                /* Update Stats, Number of Received Packets */
1375                dma->stats.rx_pkts += recv_pkt_cnt;
1376
1377                /* Save RX ring posistion */
1378                dma->rx_ring_tail = curr;
1379
1380                /* Prepare list for insertion/deleation */
1381                lst.tail = last_pkt;
1382
1383                /* Remove received Packets from RX-SCHED queue */
1384                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1385                dma->rx_sched_cnt -= recv_pkt_cnt;
1386                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1387                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1388
1389                /* Insert received Packets into RECV queue */
1390                grspw_list_append_list(&dma->recv, &lst);
1391                dma->recv_cnt += recv_pkt_cnt;
1392                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1393                        dma->stats.recv_cnt_max = dma->recv_cnt;
1394        }
1395
1396        return recv_pkt_cnt;
1397}
1398
1399/* Try to populate descriptor ring with as many SEND packets as possible. The
1400 * packets assigned with to a descriptor are put in the end of
1401 * the scheduled list.
1402 *
1403 * The number of Packets scheduled is returned.
1404 *
1405 *  - SEND List -> TX-SCHED List
1406 *  - Descriptors are initialized and enabled for transmission
1407 */
1408STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1409{
1410        int cnt;
1411        unsigned int ctrl, dmactrl;
1412        void *hwaddr;
1413        struct grspw_txring *curr_bd;
1414        struct grspw_pkt *curr_pkt, *last_pkt;
1415        struct grspw_list lst;
1416        IRQFLAGS_TYPE irqflags;
1417
1418        /* Is Ready Q empty? */
1419        if (grspw_list_is_empty(&dma->send))
1420                return 0;
1421
1422        cnt = 0;
1423        lst.head = curr_pkt = dma->send.head;
1424        curr_bd = dma->tx_ring_head;
1425        while (!curr_bd->pkt) {
1426
1427                /* Assign Packet to descriptor */
1428                curr_bd->pkt = curr_pkt;
1429
1430                /* Set up header transmission */
1431                if (curr_pkt->hdr && curr_pkt->hlen) {
1432                        hwaddr = curr_pkt->hdr;
1433                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1434                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1435                                                 hwaddr, &hwaddr);
1436                                /* translation needed? */
1437                                if (curr_pkt->hdr == hwaddr)
1438                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1439                        }
1440                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1441                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1442                } else {
1443                        ctrl = GRSPW_TXBD_EN;
1444                }
1445                /* Enable IRQ generation and CRC options as specified
1446                 * by user.
1447                 */
1448                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1449
1450                if (curr_bd->next == dma->tx_ring_base) {
1451                        /* Wrap around (only needed when smaller descriptor table) */
1452                        ctrl |= GRSPW_TXBD_WR;
1453                }
1454
1455                /* Is this Packet going to be an interrupt Packet? */
1456                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1457                        if (dma->cfg.tx_irq_en_cnt == 0) {
1458                                /* IRQ is disabled.
1459                                 * A big number to avoid equal to zero too often
1460                                 */
1461                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1462                        } else {
1463                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1464                                ctrl |= GRSPW_TXBD_IE;
1465                        }
1466                }
1467
1468                /* Prepare descriptor address. Parts of CTRL is written to
1469                 * DLEN for debug-only (CTRL is cleared by HW).
1470                 */
1471                if (curr_pkt->data && curr_pkt->dlen) {
1472                        hwaddr = curr_pkt->data;
1473                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1474                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1475                                                 hwaddr, &hwaddr);
1476                                /* translation needed? */
1477                                if (curr_pkt->data == hwaddr)
1478                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1479                        }
1480                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1481                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1482                                                     ((ctrl & 0x3f000) << 12));
1483                } else {
1484                        BD_WRITE(&curr_bd->bd->daddr, 0);
1485                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1486                }
1487
1488                /* Enable descriptor */
1489                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1490
1491                last_pkt = curr_pkt;
1492                curr_bd = curr_bd->next;
1493                cnt++;
1494
1495                /* Get Next Packet from Ready Queue */
1496                if (curr_pkt == dma->send.tail) {
1497                        /* Handled all in ready queue. */
1498                        curr_pkt = NULL;
1499                        break;
1500                }
1501                curr_pkt = curr_pkt->next;
1502        }
1503
1504        /* Have Packets been scheduled? */
1505        if (cnt > 0) {
1506                /* Prepare list for insertion/deleation */
1507                lst.tail = last_pkt;
1508
1509                /* Remove scheduled packets from ready queue */
1510                grspw_list_remove_head_list(&dma->send, &lst);
1511                dma->send_cnt -= cnt;
1512                if (dma->stats.send_cnt_min > dma->send_cnt)
1513                        dma->stats.send_cnt_min = dma->send_cnt;
1514
1515                /* Insert scheduled packets into scheduled queue */
1516                grspw_list_append_list(&dma->tx_sched, &lst);
1517                dma->tx_sched_cnt += cnt;
1518                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1519                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1520
1521                /* Update TX ring posistion */
1522                dma->tx_ring_head = curr_bd;
1523
1524                /* Make hardware aware of the newly enabled descriptors */
1525                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1526                dmactrl = REG_READ(&dma->regs->ctrl);
1527                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1528                dmactrl |= GRSPW_DMACTRL_TE;
1529                REG_WRITE(&dma->regs->ctrl, dmactrl);
1530                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1531        }
1532        return cnt;
1533}
1534
1535/* Scans the TX desciptor table for transmitted packets, and moves these
1536 * packets from the head of the scheduled queue to the tail of the sent queue.
1537 *
1538 * Also, for all packets the status is updated.
1539 *
1540 *  - SCHED List -> SENT List
1541 *
1542 * Return Value
1543 * Number of packet moved
1544 */
1545STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1546{
1547        struct grspw_txring *curr;
1548        struct grspw_pkt *last_pkt;
1549        int sent_pkt_cnt = 0;
1550        unsigned int ctrl;
1551        struct grspw_list lst;
1552
1553        curr = dma->tx_ring_tail;
1554
1555        /* Step into TX ring to find if packets have been scheduled for
1556         * transmission.
1557         */
1558        if (!curr->pkt)
1559                return 0; /* No scheduled packets, thus no sent, abort */
1560
1561        /* There has been Packets scheduled ==> scheduled Packets may have been
1562         * transmitted and needs to be collected into SENT List.
1563         *
1564         * A temporary list "lst" with all sent packets is created.
1565         */
1566        lst.head = curr->pkt;
1567
1568        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1569         * An unused descriptor is indicated by an unassigned pkt field.
1570         */
1571        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1572                /* Handle one sent Packet */
1573
1574                /* Remember last handled Packet so that insertion/removal from
1575                 * packet lists go fast.
1576                 */
1577                last_pkt = curr->pkt;
1578
1579                /* Set flags to indicate error(s) and Mark Sent.
1580                 */
1581                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1582                                        (ctrl & TXPKT_FLAG_LINKERR) |
1583                                        TXPKT_FLAG_TX;
1584
1585                /* Sent packet experienced link error? */
1586                if (ctrl & GRSPW_TXBD_LE)
1587                        dma->stats.tx_err_link++;
1588
1589                curr->pkt = NULL; /* Mark descriptor unused */
1590
1591                /* Increment */
1592                curr = curr->next;
1593                sent_pkt_cnt++;
1594        }
1595
1596        /* 1. Remove all handled packets from TX-SCHED queue
1597         * 2. Put all handled packets into SENT queue
1598         */
1599        if (sent_pkt_cnt > 0) {
1600                /* Update Stats, Number of Transmitted Packets */
1601                dma->stats.tx_pkts += sent_pkt_cnt;
1602
1603                /* Save TX ring posistion */
1604                dma->tx_ring_tail = curr;
1605
1606                /* Prepare list for insertion/deleation */
1607                lst.tail = last_pkt;
1608
1609                /* Remove sent packets from TX-SCHED queue */
1610                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1611                dma->tx_sched_cnt -= sent_pkt_cnt;
1612                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1613                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1614
1615                /* Insert received packets into SENT queue */
1616                grspw_list_append_list(&dma->sent, &lst);
1617                dma->sent_cnt += sent_pkt_cnt;
1618                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1619                        dma->stats.sent_cnt_max = dma->sent_cnt;
1620        }
1621
1622        return sent_pkt_cnt;
1623}
1624
1625void *grspw_dma_open(void *d, int chan_no)
1626{
1627        struct grspw_priv *priv = d;
1628        struct grspw_dma_priv *dma;
1629        int size;
1630
1631        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
1632                return NULL;
1633
1634        dma = &priv->dma[chan_no];
1635
1636        /* Take GRSPW lock */
1637        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1638            != RTEMS_SUCCESSFUL)
1639                return NULL;
1640
1641        if (dma->open) {
1642                dma = NULL;
1643                goto out;
1644        }
1645
1646        dma->started = 0;
1647
1648        /* Set Default Configuration:
1649         *
1650         *  - MAX RX Packet Length =
1651         *  - Disable IRQ generation
1652         *  -
1653         */
1654        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1655        dma->cfg.rx_irq_en_cnt = 0;
1656        dma->cfg.tx_irq_en_cnt = 0;
1657        dma->cfg.flags = DMAFLAG_NO_SPILL;
1658
1659        /* set to NULL so that error exit works correctly */
1660        dma->sem_dma = RTEMS_ID_NONE;
1661        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1662        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1663        dma->rx_ring_base = NULL;
1664
1665        /* DMA Channel Semaphore created with count = 1 */
1666        if (rtems_semaphore_create(
1667            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no), 1,
1668            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1669            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1670            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_dma) != RTEMS_SUCCESSFUL) {
1671                dma->sem_dma = RTEMS_ID_NONE;
1672                goto err;
1673        }
1674
1675        /* Allocate memory for the two descriptor rings */
1676        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1677        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1678        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1679        if (dma->rx_ring_base == NULL)
1680                goto err;
1681
1682        /* Create DMA RX and TX Channel sempahore with count = 0 */
1683        if (rtems_semaphore_create(
1684            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1685            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1686            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1687            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1688                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1689                goto err;
1690        }
1691        if (rtems_semaphore_create(
1692            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1693            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1694            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1695            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1696                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1697                goto err;
1698        }
1699
1700        /* Reset software structures */
1701        grspw_dma_reset(dma);
1702
1703        /* Take the device */
1704        dma->open = 1;
1705out:
1706        /* Return GRSPW Lock */
1707        rtems_semaphore_release(grspw_sem);
1708
1709        return dma;
1710
1711        /* initialization error happended */
1712err:
1713        if (dma->sem_dma != RTEMS_ID_NONE)
1714                rtems_semaphore_delete(dma->sem_dma);
1715        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1716                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1717        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1718                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1719        if (dma->rx_ring_base)
1720                free(dma->rx_ring_base);
1721        dma = NULL;
1722        goto out;
1723}
1724
1725/* Initialize Software Structures:
1726 *  - Clear all Queues
1727 *  - init BD ring
1728 *  - init IRQ counter
1729 *  - clear statistics counters
1730 *  - init wait structures and semaphores
1731 */
1732STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1733{
1734        /* Empty RX and TX queues */
1735        grspw_list_clr(&dma->ready);
1736        grspw_list_clr(&dma->rx_sched);
1737        grspw_list_clr(&dma->recv);
1738        grspw_list_clr(&dma->send);
1739        grspw_list_clr(&dma->tx_sched);
1740        grspw_list_clr(&dma->sent);
1741        dma->ready_cnt = 0;
1742        dma->rx_sched_cnt = 0;
1743        dma->recv_cnt = 0;
1744        dma->send_cnt = 0;
1745        dma->tx_sched_cnt = 0;
1746        dma->sent_cnt = 0;
1747
1748        dma->rx_irq_en_cnt_curr = 0;
1749        dma->tx_irq_en_cnt_curr = 0;
1750
1751        grspw_bdrings_init(dma);
1752
1753        dma->rx_wait.waiting = 0;
1754        dma->tx_wait.waiting = 0;
1755
1756        grspw_dma_stats_clr(dma);
1757}
1758
1759int grspw_dma_close(void *c)
1760{
1761        struct grspw_dma_priv *dma = c;
1762
1763        if (!dma->open)
1764                return 0;
1765
1766        /* Take device lock - Wait until we get semaphore */
1767        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1768            != RTEMS_SUCCESSFUL)
1769                return -1;
1770
1771        /* Can not close active DMA channel. User must stop DMA and make sure
1772         * no threads are active/blocked within driver.
1773         */
1774        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
1775                rtems_semaphore_release(dma->sem_dma);
1776                return 1;
1777        }
1778
1779        /* Free resources */
1780        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1781        rtems_semaphore_delete(dma->tx_wait.sem_wait);
1782        /* Release and delete lock. Operations requiring lock will fail */
1783        rtems_semaphore_delete(dma->sem_dma);
1784        dma->sem_dma = RTEMS_ID_NONE;
1785
1786        /* Free memory */
1787        if (dma->rx_ring_base)
1788                free(dma->rx_ring_base);
1789        dma->rx_ring_base = NULL;
1790        dma->tx_ring_base = NULL;
1791
1792        dma->open = 0;
1793        return 0;
1794}
1795
1796/* Schedule List of packets for transmission at some point in
1797 * future.
1798 *
1799 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1800 * 2. Add the requested packets to the SEND List (USER->SEND)
1801 * 3. Schedule as many packets as possible (SEND->SCHED)
1802 */
1803int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1804{
1805        struct grspw_dma_priv *dma = c;
1806        int ret;
1807
1808        /* Take DMA channel lock */
1809        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1810            != RTEMS_SUCCESSFUL)
1811                return -1;
1812
1813        if (dma->started == 0) {
1814                ret = 1; /* signal DMA has been stopped */
1815                goto out;
1816        }
1817        ret = 0;
1818
1819        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1820        if ((opts & 1) == 0)
1821                grspw_tx_process_scheduled(dma);
1822
1823        /* 2. Add the requested packets to the SEND List (USER->SEND) */
1824        if (pkts) {
1825                grspw_list_append_list(&dma->send, pkts);
1826                dma->send_cnt += count;
1827                if (dma->stats.send_cnt_max < dma->send_cnt)
1828                        dma->stats.send_cnt_max = dma->send_cnt;
1829        }
1830
1831        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1832        if ((opts & 2) == 0)
1833                grspw_tx_schedule_send(dma);
1834
1835out:
1836        /* Unlock DMA channel */
1837        rtems_semaphore_release(dma->sem_dma);
1838
1839        return ret;
1840}
1841
1842int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1843{
1844        struct grspw_dma_priv *dma = c;
1845        struct grspw_pkt *pkt, *lastpkt;
1846        int cnt, started;
1847
1848        /* Take DMA channel lock */
1849        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1850            != RTEMS_SUCCESSFUL)
1851                return -1;
1852
1853        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1854        started = dma->started;
1855        if ((started > 0) && ((opts & 1) == 0))
1856                grspw_tx_process_scheduled(dma);
1857
1858        /* Move all/count SENT packet to the callers list (SENT->USER) */
1859        if (pkts) {
1860                if ((count == NULL) || (*count == -1) ||
1861                    (*count >= dma->sent_cnt)) {
1862                        /* Move all SENT Packets */
1863                        *pkts = dma->sent;
1864                        grspw_list_clr(&dma->sent);
1865                        if (count)
1866                                *count = dma->sent_cnt;
1867                        dma->sent_cnt = 0;
1868                } else {
1869                        /* Move a number of SENT Packets */
1870                        pkts->head = pkt = lastpkt = dma->sent.head;
1871                        cnt = 0;
1872                        while (cnt < *count) {
1873                                lastpkt = pkt;
1874                                pkt = pkt->next;
1875                                cnt++;
1876                        }
1877                        if (cnt > 0) {
1878                                pkts->tail = lastpkt;
1879                                grspw_list_remove_head_list(&dma->sent, pkts);
1880                                dma->sent_cnt -= cnt;
1881                        } else {
1882                                grspw_list_clr(pkts);
1883                        }
1884                }
1885        } else if (count) {
1886                *count = 0;
1887        }
1888
1889        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1890        if ((started > 0) && ((opts & 2) == 0))
1891                grspw_tx_schedule_send(dma);
1892
1893        /* Unlock DMA channel */
1894        rtems_semaphore_release(dma->sem_dma);
1895
1896        return (~started) & 1; /* signal DMA has been stopped */
1897}
1898
1899void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
1900{
1901        struct grspw_dma_priv *dma = c;
1902        int sched_cnt, diff;
1903        unsigned int hwbd;
1904        struct grspw_txbd *tailbd;
1905
1906        /* Take device lock - Wait until we get semaphore.
1907         * The lock is taken so that the counters are in sync with each other
1908         * and that DMA descriptor table and tx_ring_tail is not being updated
1909         * during HW counter processing in this function.
1910         */
1911        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1912            != RTEMS_SUCCESSFUL)
1913                return;
1914
1915        if (send)
1916                *send = dma->send_cnt;
1917        sched_cnt = dma->tx_sched_cnt;
1918        if (sched)
1919                *sched = sched_cnt;
1920        if (sent)
1921                *sent = dma->sent_cnt;
1922        if (hw) {
1923                /* Calculate number of descriptors (processed by HW) between
1924                 * HW pointer and oldest SW pointer.
1925                 */
1926                hwbd = REG_READ(&dma->regs->txdesc);
1927                tailbd = dma->tx_ring_tail->bd;
1928                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
1929                        (GRSPW_TXBD_NR - 1);
1930                /* Handle special case when HW and SW pointers are equal
1931                 * because all TX descriptors have been processed by HW.
1932                 */
1933                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
1934                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
1935                        diff = GRSPW_TXBD_NR;
1936                }
1937                *hw = diff;
1938        }
1939
1940        /* Unlock DMA channel */
1941        rtems_semaphore_release(dma->sem_dma);
1942}
1943
1944static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
1945{
1946        int send_val, sent_val;
1947
1948        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
1949                send_val = 1;
1950        else
1951                send_val = 0;
1952
1953        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
1954                sent_val = 1;
1955        else
1956                sent_val = 0;
1957
1958        /* AND or OR ? */
1959        if (dma->tx_wait.op == 0)
1960                return send_val & sent_val; /* AND */
1961        else
1962                return send_val | sent_val; /* OR */
1963}
1964
1965/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
1966 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
1967 * is met.
1968 * If a link error occurs and the Stop on Link error is defined, this function
1969 * will also return to caller.
1970 */
1971int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
1972{
1973        struct grspw_dma_priv *dma = c;
1974        int ret, rc, initialized = 0;
1975
1976        if (timeout == 0)
1977                timeout = RTEMS_NO_TIMEOUT;
1978
1979check_condition:
1980
1981        /* Take DMA channel lock */
1982        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1983            != RTEMS_SUCCESSFUL)
1984                return -1;
1985
1986        /* Check so that no other thread is waiting, this driver only supports
1987         * one waiter at a time.
1988         */
1989        if (initialized == 0 && dma->tx_wait.waiting) {
1990                ret = 3;
1991                goto out_release;
1992        }
1993
1994        /* Stop if link error or similar (DMA stopped), abort */
1995        if (dma->started == 0) {
1996                ret = 1;
1997                goto out_release;
1998        }
1999
2000        /* Set up Condition */
2001        dma->tx_wait.send_cnt = send_cnt;
2002        dma->tx_wait.op = op;
2003        dma->tx_wait.sent_cnt = sent_cnt;
2004
2005        if (grspw_tx_wait_eval(dma) == 0) {
2006                /* Prepare Wait */
2007                initialized = 1;
2008                dma->tx_wait.waiting = 1;
2009
2010                /* Release DMA channel lock */
2011                rtems_semaphore_release(dma->sem_dma);
2012
2013                /* Try to take Wait lock, if this fail link may have gone down
2014                 * or user stopped this DMA channel
2015                 */
2016                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2017                                                timeout);
2018                if (rc == RTEMS_TIMEOUT) {
2019                        ret = 2;
2020                        goto out;
2021                } else if (rc == RTEMS_UNSATISFIED ||
2022                           rc == RTEMS_OBJECT_WAS_DELETED) {
2023                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2024                        goto out;
2025                } else if (rc != RTEMS_SUCCESSFUL) {
2026                        /* Unknown Error */
2027                        ret = -1;
2028                        goto out;
2029                } else if (dma->started == 0) {
2030                        ret = 1;
2031                        goto out;
2032                }
2033
2034                /* Check condition once more */
2035                goto check_condition;
2036        }
2037
2038        ret = 0;
2039
2040out_release:
2041        /* Unlock DMA channel */
2042        rtems_semaphore_release(dma->sem_dma);
2043
2044out:
2045        if (initialized)
2046                dma->tx_wait.waiting = 0;
2047        return ret;
2048}
2049
2050int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2051{
2052        struct grspw_dma_priv *dma = c;
2053        struct grspw_pkt *pkt, *lastpkt;
2054        int cnt, started;
2055
2056        /* Take DMA channel lock */
2057        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2058            != RTEMS_SUCCESSFUL)
2059                return -1;
2060
2061        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2062        started = dma->started;
2063        if (((opts & 1) == 0) && (started > 0))
2064                grspw_rx_process_scheduled(dma);
2065
2066        /* Move all RECV packet to the callers list */
2067        if (pkts) {
2068                if ((count == NULL) || (*count == -1) ||
2069                    (*count >= dma->recv_cnt)) {
2070                        /* Move all Received packets */
2071                        *pkts = dma->recv;
2072                        grspw_list_clr(&dma->recv);
2073                        if ( count )
2074                                *count = dma->recv_cnt;
2075                        dma->recv_cnt = 0;
2076                } else {
2077                        /* Move a number of RECV Packets */
2078                        pkts->head = pkt = lastpkt = dma->recv.head;
2079                        cnt = 0;
2080                        while (cnt < *count) {
2081                                lastpkt = pkt;
2082                                pkt = pkt->next;
2083                                cnt++;
2084                        }
2085                        if (cnt > 0) {
2086                                pkts->tail = lastpkt;
2087                                grspw_list_remove_head_list(&dma->recv, pkts);
2088                                dma->recv_cnt -= cnt;
2089                        } else {
2090                                grspw_list_clr(pkts);
2091                        }
2092                }
2093        } else if (count) {
2094                *count = 0;
2095        }
2096
2097        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2098        if (((opts & 2) == 0) && (started > 0))
2099                grspw_rx_schedule_ready(dma);
2100
2101        /* Unlock DMA channel */
2102        rtems_semaphore_release(dma->sem_dma);
2103
2104        return (~started) & 1;
2105}
2106
2107int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2108{
2109        struct grspw_dma_priv *dma = c;
2110        int ret;
2111
2112        /* Take DMA channel lock */
2113        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2114            != RTEMS_SUCCESSFUL)
2115                return -1;
2116
2117        if (dma->started == 0) {
2118                ret = 1;
2119                goto out;
2120        }
2121
2122        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2123        if ((opts & 1) == 0)
2124                grspw_rx_process_scheduled(dma);
2125
2126        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2127        if (pkts && (count > 0)) {
2128                grspw_list_append_list(&dma->ready, pkts);
2129                dma->ready_cnt += count;
2130                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2131                        dma->stats.ready_cnt_max = dma->ready_cnt;
2132        }
2133
2134        /* 3. Schedule as many packets as possible (READY->SCHED) */
2135        if ((opts & 2) == 0)
2136                grspw_rx_schedule_ready(dma);
2137
2138        ret = 0;
2139out:
2140        /* Unlock DMA channel */
2141        rtems_semaphore_release(dma->sem_dma);
2142
2143        return ret;
2144}
2145
2146void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
2147{
2148        struct grspw_dma_priv *dma = c;
2149        int sched_cnt, diff;
2150        unsigned int hwbd;
2151        struct grspw_rxbd *tailbd;
2152
2153        /* Take device lock - Wait until we get semaphore.
2154         * The lock is taken so that the counters are in sync with each other
2155         * and that DMA descriptor table and rx_ring_tail is not being updated
2156         * during HW counter processing in this function.
2157         */
2158        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2159            != RTEMS_SUCCESSFUL)
2160                return;
2161
2162        if (ready)
2163                *ready = dma->ready_cnt;
2164        sched_cnt = dma->rx_sched_cnt;
2165        if (sched)
2166                *sched = sched_cnt;
2167        if (recv)
2168                *recv = dma->recv_cnt;
2169        if (hw) {
2170                /* Calculate number of descriptors (processed by HW) between
2171                 * HW pointer and oldest SW pointer.
2172                 */
2173                hwbd = REG_READ(&dma->regs->rxdesc);
2174                tailbd = dma->rx_ring_tail->bd;
2175                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2176                        (GRSPW_RXBD_NR - 1);
2177                /* Handle special case when HW and SW pointers are equal
2178                 * because all RX descriptors have been processed by HW.
2179                 */
2180                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2181                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2182                        diff = GRSPW_RXBD_NR;
2183                }
2184                *hw = diff;
2185        }
2186
2187        /* Unlock DMA channel */
2188        rtems_semaphore_release(dma->sem_dma);
2189}
2190
2191static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2192{
2193        int ready_val, recv_val;
2194
2195        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2196                ready_val = 1;
2197        else
2198                ready_val = 0;
2199
2200        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2201                recv_val = 1;
2202        else
2203                recv_val = 0;
2204
2205        /* AND or OR ? */
2206        if (dma->rx_wait.op == 0)
2207                return ready_val & recv_val; /* AND */
2208        else
2209                return ready_val | recv_val; /* OR */
2210}
2211
2212/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2213 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2214 * condition is met.
2215 * If a link error occurs and the Stop on Link error is defined, this function
2216 * will also return to caller, however with an error.
2217 */
2218int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2219{
2220        struct grspw_dma_priv *dma = c;
2221        int ret, rc, initialized = 0;
2222
2223        if (timeout == 0)
2224                timeout = RTEMS_NO_TIMEOUT;
2225
2226check_condition:
2227
2228        /* Take DMA channel lock */
2229        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2230            != RTEMS_SUCCESSFUL)
2231                return -1;
2232
2233        /* Check so that no other thread is waiting, this driver only supports
2234         * one waiter at a time.
2235         */
2236        if (initialized == 0 && dma->rx_wait.waiting) {
2237                ret = 3;
2238                goto out_release;
2239        }
2240
2241        /* Stop if link error or similar (DMA stopped), abort */
2242        if (dma->started == 0) {
2243                ret = 1;
2244                goto out_release;
2245        }
2246
2247        /* Set up Condition */
2248        dma->rx_wait.recv_cnt = recv_cnt;
2249        dma->rx_wait.op = op;
2250        dma->rx_wait.ready_cnt = ready_cnt;
2251
2252        if (grspw_rx_wait_eval(dma) == 0) {
2253                /* Prepare Wait */
2254                initialized = 1;
2255                dma->rx_wait.waiting = 1;
2256
2257                /* Release channel lock */
2258                rtems_semaphore_release(dma->sem_dma);
2259
2260                /* Try to take Wait lock, if this fail link may have gone down
2261                 * or user stopped this DMA channel
2262                 */
2263                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2264                                           timeout);
2265                if (rc == RTEMS_TIMEOUT) {
2266                        ret = 2;
2267                        goto out;
2268                } else if (rc == RTEMS_UNSATISFIED ||
2269                           rc == RTEMS_OBJECT_WAS_DELETED) {
2270                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2271                        goto out;
2272                } else if (rc != RTEMS_SUCCESSFUL) {
2273                        /* Unknown Error */
2274                        ret = -1;
2275                        goto out;
2276                } else if (dma->started == 0) {
2277                        ret = 1;
2278                        goto out;
2279                }
2280
2281                /* Check condition once more */
2282                goto check_condition;
2283        }
2284
2285        ret = 0;
2286
2287out_release:
2288        /* Unlock DMA channel */
2289        rtems_semaphore_release(dma->sem_dma);
2290
2291out:
2292        if (initialized)
2293                dma->rx_wait.waiting = 0;
2294        return ret;
2295}
2296
2297int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2298{
2299        struct grspw_dma_priv *dma = c;
2300
2301        if (dma->started || !cfg)
2302                return -1;
2303
2304        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
2305                return -1;
2306
2307        /* Update Configuration */
2308        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2309
2310        return 0;
2311}
2312
2313void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2314{
2315        struct grspw_dma_priv *dma = c;
2316
2317        /* Copy Current Configuration */
2318        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2319}
2320
2321void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2322{
2323        struct grspw_dma_priv *dma = c;
2324
2325        memcpy(sts, &dma->stats, sizeof(dma->stats));
2326}
2327
2328void grspw_dma_stats_clr(void *c)
2329{
2330        struct grspw_dma_priv *dma = c;
2331
2332        /* Clear most of the statistics */     
2333        memset(&dma->stats, 0, sizeof(dma->stats));
2334
2335        /* Init proper default values so that comparisons will work the
2336         * first time.
2337         */
2338        dma->stats.send_cnt_min = 0x3fffffff;
2339        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2340        dma->stats.ready_cnt_min = 0x3fffffff;
2341        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2342}
2343
2344int grspw_dma_start(void *c)
2345{
2346        struct grspw_dma_priv *dma = c;
2347        struct grspw_dma_regs *dregs = dma->regs;
2348        unsigned int ctrl;
2349        IRQFLAGS_TYPE irqflags;
2350
2351        if (dma->started)
2352                return 0;
2353
2354        /* Initialize Software Structures:
2355         *  - Clear all Queues
2356         *  - init BD ring
2357         *  - init IRQ counter
2358         *  - clear statistics counters
2359         *  - init wait structures and semaphores
2360         */
2361        grspw_dma_reset(dma);
2362
2363        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2364         * with SpaceWire Packet buffers. So we do not have to worry about
2365         * IRQs for this channel just yet. However other DMA channels
2366         * may be active.
2367         *
2368         * Some functionality that is not changed during started mode is set up
2369         * once and for all here:
2370         *
2371         *   - RX MAX Packet length
2372         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2373         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2374         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2375         *   - Strip PID
2376         *   - Strip Address
2377         *   - No Spill
2378         *   - Receiver Enable
2379         *   - disable on link error (LE)
2380         *
2381         * Note that the address register and the address enable bit in DMACTRL
2382         * register must be left untouched, they are configured on a GRSPW
2383         * core level.
2384         *
2385         * Note that the receiver is enabled here, but since descriptors are
2386         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2387         * descriptors are enabled or it may ignore RX packets (NS=0) until
2388         * descriptors are enabled (writing RD bit).
2389         */
2390        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2391        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2392
2393        /* MAX Packet length */
2394        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2395
2396        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2397                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2398                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2399        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
2400                ctrl |= GRSPW_DMACTRL_LE;
2401        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
2402                ctrl |= GRSPW_DMACTRL_RI;
2403        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
2404                ctrl |= GRSPW_DMACTRL_TI;
2405        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2406        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
2407        REG_WRITE(&dregs->ctrl, ctrl);
2408        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2409
2410        dma->started = 1; /* open up other DMA interfaces */
2411
2412        return 0;
2413}
2414
2415STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2416{
2417        IRQFLAGS_TYPE irqflags;
2418
2419        if (dma->started == 0)
2420                return;
2421        dma->started = 0;
2422
2423        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2424        grspw_hw_dma_stop(dma);
2425        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2426
2427        /* From here no more packets will be sent, however
2428         * there may still exist scheduled packets that has been
2429         * sent, and packets in the SEND Queue waiting for free
2430         * descriptors. All packets are moved to the SENT Queue
2431         * so that the user may get its buffers back, the user
2432         * must look at the TXPKT_FLAG_TX in order to determine
2433         * if the packet was sent or not.
2434         */
2435
2436        /* Retreive scheduled all sent packets */
2437        grspw_tx_process_scheduled(dma);
2438
2439        /* Move un-sent packets in SEND and SCHED queue to the
2440         * SENT Queue. (never marked sent)
2441         */
2442        if (!grspw_list_is_empty(&dma->tx_sched)) {
2443                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2444                grspw_list_clr(&dma->tx_sched);
2445                dma->sent_cnt += dma->tx_sched_cnt;
2446                dma->tx_sched_cnt = 0;
2447        }
2448        if (!grspw_list_is_empty(&dma->send)) {
2449                grspw_list_append_list(&dma->sent, &dma->send);
2450                grspw_list_clr(&dma->send);
2451                dma->sent_cnt += dma->send_cnt;
2452                dma->send_cnt = 0;
2453        }
2454
2455        /* Similar for RX */
2456        grspw_rx_process_scheduled(dma);
2457        if (!grspw_list_is_empty(&dma->rx_sched)) {
2458                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2459                grspw_list_clr(&dma->rx_sched);
2460                dma->recv_cnt += dma->rx_sched_cnt;
2461                dma->rx_sched_cnt = 0;
2462        }
2463        if (!grspw_list_is_empty(&dma->ready)) {
2464                grspw_list_append_list(&dma->recv, &dma->ready);
2465                grspw_list_clr(&dma->ready);
2466                dma->recv_cnt += dma->ready_cnt;
2467                dma->ready_cnt = 0;
2468        }
2469
2470        /* Throw out blocked threads */
2471        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2472        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2473}
2474
2475void grspw_dma_stop(void *c)
2476{
2477        struct grspw_dma_priv *dma = c;
2478
2479        /* If DMA channel is closed we should not access the semaphore */
2480        if (!dma->open)
2481                return;
2482
2483        /* Take DMA Channel lock */
2484        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2485            != RTEMS_SUCCESSFUL)
2486                return;
2487
2488        grspw_dma_stop_locked(dma);
2489
2490        rtems_semaphore_release(dma->sem_dma);
2491}
2492
2493/* Do general work, invoked indirectly from ISR */
2494static void grspw_work_shutdown_func(struct grspw_priv *priv)
2495{
2496        int i;
2497
2498        /* Link is down for some reason, and the user has configured
2499         * that we stop all (open) DMA channels and throw out all their
2500         * blocked threads.
2501         */
2502        for (i=0; i<priv->hwsup.ndma_chans; i++)
2503                grspw_dma_stop(&priv->dma[i]);
2504        grspw_hw_stop(priv);
2505}
2506
2507/* Do DMA work on one channel, invoked indirectly from ISR */
2508static void grspw_work_dma_func(struct grspw_dma_priv *dma)
2509{
2510        int tx_cond_true, rx_cond_true;
2511        unsigned int ctrl;
2512        IRQFLAGS_TYPE irqflags;
2513
2514        /* If DMA channel is closed we should not access the semaphore */
2515        if (dma->open == 0)
2516                return;
2517
2518        rx_cond_true = 0;
2519        tx_cond_true = 0;
2520        dma->stats.irq_cnt++;
2521
2522        /* Take DMA channel lock */
2523        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2524            != RTEMS_SUCCESSFUL)
2525                return;
2526
2527        /* If closing DMA channel or just shut down */
2528        if (dma->started == 0)
2529                goto out;
2530
2531        /* Look at cause we were woken up and clear source */
2532        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2533        ctrl = REG_READ(&dma->regs->ctrl);
2534
2535        /* Read/Write DMA error ? */
2536        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
2537                /* DMA error -> Stop DMA channel (both RX and TX) */
2538                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2539                grspw_dma_stop_locked(dma);
2540        } else if (ctrl & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS)) {
2541                /* DMA has finished a TX/RX packet */
2542                ctrl &= ~GRSPW_DMACTRL_AT;
2543                if (dma->cfg.rx_irq_en_cnt != 0 ||
2544                    (dma->cfg.flags & DMAFLAG2_RXIE))
2545                        ctrl |= GRSPW_DMACTRL_RI;
2546                if (dma->cfg.tx_irq_en_cnt != 0 ||
2547                    (dma->cfg.flags & DMAFLAG2_TXIE))
2548                        ctrl |= GRSPW_DMACTRL_TI;
2549                REG_WRITE(&dma->regs->ctrl, ctrl);
2550                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2551                if (ctrl & GRSPW_DMACTRL_PR) {
2552                        /* Do RX Work */
2553                        dma->stats.rx_work_cnt++;
2554                        grspw_rx_process_scheduled(dma);
2555                        dma->stats.rx_work_enabled += grspw_rx_schedule_ready(dma);
2556                        /* Check to see if condition for waking blocked USER
2557                         * task is fullfilled.
2558                         */
2559                        if (dma->rx_wait.waiting)
2560                                rx_cond_true = grspw_rx_wait_eval(dma);
2561                }
2562                if (ctrl & GRSPW_DMACTRL_PS) {
2563                        /* Do TX Work */
2564                        dma->stats.tx_work_cnt++;
2565                        grspw_tx_process_scheduled(dma);
2566                        dma->stats.tx_work_enabled += grspw_tx_schedule_send(dma);
2567                        if (dma->tx_wait.waiting)
2568                                tx_cond_true = grspw_tx_wait_eval(dma);
2569                }
2570        } else
2571                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2572
2573out:
2574        /* Release lock */
2575        rtems_semaphore_release(dma->sem_dma);
2576
2577        if (rx_cond_true)
2578                rtems_semaphore_release(dma->rx_wait.sem_wait);
2579
2580        if (tx_cond_true)
2581                rtems_semaphore_release(dma->tx_wait.sem_wait);
2582}
2583
2584/* Work task is receiving work for the work message queue posted from
2585 * the ISR.
2586 */
2587static void grspw_work_func(rtems_task_argument unused)
2588{
2589        rtems_status_code status;
2590        unsigned int message;
2591        size_t size;
2592        struct grspw_priv *priv;
2593        int i;
2594
2595        while (grspw_task_stop == 0) {
2596                /* Wait for ISR to schedule work */
2597                status = rtems_message_queue_receive(
2598                        grspw_work_queue, &message,
2599                        &size, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
2600                if (status != RTEMS_SUCCESSFUL)
2601                        break;
2602
2603                /* Handle work */
2604                priv = priv_tab[message >> WORK_CORE_BIT];
2605                if (message & WORK_SHUTDOWN)
2606                        grspw_work_shutdown_func(priv);
2607                else if (message & WORK_DMA_MASK) {
2608                        for (i = 0; i < 4; i++) {
2609                                if (message & WORK_DMA(i))
2610                                        grspw_work_dma_func(&priv->dma[i]);
2611                        }
2612                }
2613        }
2614        rtems_task_delete(RTEMS_SELF);
2615}
2616
2617STATIC void grspw_isr(void *data)
2618{
2619        struct grspw_priv *priv = data;
2620        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode;
2621        unsigned int rxirq, rxack, intto;
2622        int i, handled = 0, message = WORK_NONE, call_user_int_isr;
2623#ifdef RTEMS_HAS_SMP
2624        IRQFLAGS_TYPE irqflags;
2625#endif
2626
2627        /* Get Status from Hardware */
2628        stat = REG_READ(&priv->regs->status);
2629        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) & priv->stscfg;
2630
2631        /* Make sure to put the timecode handling first in order to get the
2632         * smallest possible interrupt latency
2633         */
2634        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2635                ctrl = REG_READ(&priv->regs->ctrl);
2636                if (ctrl & GRSPW_CTRL_TQ) {
2637                        /* Timecode received. Let custom function handle this */
2638                        timecode = REG_READ(&priv->regs->time) &
2639                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2640                        (priv->tcisr)(priv->tcisr_arg, timecode);
2641                }
2642        }
2643
2644        /* Get Interrupt status from hardware */
2645        icctrl = REG_READ(&priv->regs->icctrl);
2646        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2647                call_user_int_isr = 0;
2648                rxirq = rxack = intto = 0;
2649
2650                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2651                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2652                        call_user_int_isr = 1;
2653
2654                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2655                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2656                        call_user_int_isr = 1;
2657
2658                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2659                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2660                        call_user_int_isr = 1;                 
2661
2662                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2663                 * user function is called even if no such IRQ has happened!
2664                 * User must make sure to clear all interrupts that have been
2665                 * handled from the three registers by writing a one.
2666                 */
2667                if (call_user_int_isr)
2668                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2669        }
2670
2671        /* An Error occured? */
2672        if (stat & GRSPW_STAT_ERROR) {
2673                /* Wake Global WorkQ */
2674                handled = 1;
2675
2676                if (stat & GRSPW_STS_EE)
2677                        priv->stats.err_eeop++;
2678
2679                if (stat & GRSPW_STS_IA)
2680                        priv->stats.err_addr++;
2681
2682                if (stat & GRSPW_STS_PE)
2683                        priv->stats.err_parity++;
2684
2685                if (stat & GRSPW_STS_DE)
2686                        priv->stats.err_disconnect++;
2687
2688                if (stat & GRSPW_STS_ER)
2689                        priv->stats.err_escape++;
2690
2691                if (stat & GRSPW_STS_CE)
2692                        priv->stats.err_credit++;
2693
2694                if (stat & GRSPW_STS_WE)
2695                        priv->stats.err_wsync++;
2696
2697                if ((priv->dis_link_on_err >> 16) & stat) {
2698                        /* Disable the link, no more transfers are expected
2699                         * on any DMA channel.
2700                         */
2701                        SPIN_LOCK(&priv->devlock, irqflags);
2702                        ctrl = REG_READ(&priv->regs->ctrl);
2703                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2704                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2705                        SPIN_UNLOCK(&priv->devlock, irqflags);
2706                        /* Signal to work-thread to stop DMA and clean up */
2707                        message = WORK_SHUTDOWN;
2708                }
2709        }
2710
2711        /* Clear Status Flags */
2712        if (stat_clrmsk) {
2713                handled = 1;
2714                REG_WRITE(&priv->regs->status, stat_clrmsk);
2715        }
2716
2717        /* A DMA transfer or Error occured? In that case disable more IRQs
2718         * from the DMA channel, then invoke the workQ.
2719         *
2720         * Also the GI interrupt flag may not be available for older
2721         * designs where (was added together with mutiple DMA channels).
2722         */
2723        SPIN_LOCK(&priv->devlock, irqflags);
2724        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2725                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2726                /* Check for Errors and if Packets been sent or received if
2727                 * respective IRQ are enabled
2728                 */
2729#ifdef HW_WITH_GI
2730                if ( dma_stat & (GRSPW_DMA_STATUS_ERROR | GRSPW_DMACTRL_GI) ) {
2731#else
2732                if ( (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2733                     | GRSPW_DMA_STATUS_ERROR) & dma_stat ) {
2734#endif
2735                        /* Disable Further IRQs (until enabled again)
2736                         * from this DMA channel. Let the status
2737                         * bit remain so that they can be handled by
2738                         * work function.
2739                         */
2740                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2741                                ~(GRSPW_DMACTRL_RI|GRSPW_DMACTRL_TI|
2742                                GRSPW_DMACTRL_PR|GRSPW_DMACTRL_PS|
2743                                GRSPW_DMACTRL_RA|GRSPW_DMACTRL_TA|
2744                                GRSPW_DMACTRL_AT));
2745                        message |= WORK_DMA(i);
2746                        handled = 1;
2747                }
2748        }
2749        SPIN_UNLOCK(&priv->devlock, irqflags);
2750
2751        if (handled != 0)
2752                priv->stats.irq_cnt++;
2753
2754        /* Schedule work by sending message to work thread */
2755        if ((message != WORK_NONE) && grspw_work_queue) {
2756                message |= WORK_CORE(priv->index);
2757                stat = rtems_message_queue_send(grspw_work_queue, &message, 4);
2758                if (stat != RTEMS_SUCCESSFUL)
2759                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
2760                                priv->index, stat, message);
2761        }
2762}
2763
2764STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2765{
2766        unsigned int ctrl;
2767        struct grspw_dma_regs *dregs = dma->regs;
2768
2769        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2770               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2771        ctrl |= GRSPW_DMACTRL_AT;
2772        REG_WRITE(&dregs->ctrl, ctrl);
2773}
2774
2775STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2776{
2777        unsigned int ctrl;
2778        struct grspw_dma_regs *dregs = dma->regs;
2779
2780        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2781        REG_WRITE(&dregs->ctrl, ctrl);
2782
2783        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2784        REG_WRITE(&dregs->txdesc, 0);
2785        REG_WRITE(&dregs->rxdesc, 0);
2786}
2787
2788/* Hardware Action:
2789 *  - stop DMA
2790 *  - do not bring down the link (RMAP may be active)
2791 *  - RMAP settings untouched (RMAP may be active)
2792 *  - port select untouched (RMAP may be active)
2793 *  - timecodes are disabled
2794 *  - IRQ generation disabled
2795 *  - status not cleared (let user analyze it if requested later on)
2796 *  - Node address / First DMA channels Node address
2797 *    is untouched (RMAP may be active)
2798 */
2799STATIC void grspw_hw_stop(struct grspw_priv *priv)
2800{
2801        int i;
2802        unsigned int ctrl;
2803        IRQFLAGS_TYPE irqflags;
2804
2805        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2806
2807        for (i=0; i<priv->hwsup.ndma_chans; i++)
2808                grspw_hw_dma_stop(&priv->dma[i]);
2809
2810        ctrl = REG_READ(&priv->regs->ctrl);
2811        REG_WRITE(&priv->regs->ctrl, ctrl & (
2812                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2813                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2814                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2815
2816        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2817}
2818
2819/* Soft reset of GRSPW core registers */
2820STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2821{
2822        int i;
2823        unsigned int tmp;
2824
2825        for (i=0; i<priv->hwsup.ndma_chans; i++)
2826                grspw_hw_dma_softreset(&priv->dma[i]);
2827
2828        REG_WRITE(&priv->regs->status, 0xffffffff);
2829        REG_WRITE(&priv->regs->time, 0);
2830        /* Clear all but valuable reset values of ICCTRL */
2831        tmp = REG_READ(&priv->regs->icctrl);
2832        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2833        tmp |= GRSPW_ICCTRL_ID;
2834        REG_WRITE(&priv->regs->icctrl, tmp);
2835        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2836        REG_WRITE(&priv->regs->icack, 0xffffffff);
2837        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
2838}
2839
2840int grspw_dev_count(void)
2841{
2842        return grspw_count;
2843}
2844
2845void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2846{
2847        int i;
2848        struct grspw_priv *priv;
2849
2850        /* Set new Device Found Handler */
2851        grspw_dev_add = devfound;
2852        grspw_dev_del = devremove;
2853
2854        if (grspw_initialized == 1 && grspw_dev_add) {
2855                /* Call callback for every previously found device */
2856                for (i=0; i<grspw_count; i++) {
2857                        priv = priv_tab[i];
2858                        if (priv)
2859                                priv->data = grspw_dev_add(i);
2860                }
2861        }
2862}
2863
2864/******************* Driver manager interface ***********************/
2865
2866/* Driver prototypes */
2867static int grspw_common_init(void);
2868static int grspw2_init3(struct drvmgr_dev *dev);
2869
2870static struct drvmgr_drv_ops grspw2_ops =
2871{
2872        .init = {NULL,  NULL, grspw2_init3, NULL},
2873        .remove = NULL,
2874        .info = NULL
2875};
2876
2877static struct amba_dev_id grspw2_ids[] =
2878{
2879        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
2880        {VENDOR_GAISLER, GAISLER_SPW2},
2881        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
2882        {0, 0}          /* Mark end of table */
2883};
2884
2885static struct amba_drv_info grspw2_drv_info =
2886{
2887        {
2888                DRVMGR_OBJ_DRV,                 /* Driver */
2889                NULL,                           /* Next driver */
2890                NULL,                           /* Device list */
2891                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
2892                "GRSPW_PKT_DRV",                /* Driver Name */
2893                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
2894                &grspw2_ops,
2895                NULL,                           /* Funcs */
2896                0,                              /* No devices yet */
2897                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
2898        },
2899        &grspw2_ids[0]
2900};
2901
2902void grspw2_register_drv (void)
2903{
2904        GRSPW_DBG("Registering GRSPW2 packet driver\n");
2905        drvmgr_drv_register(&grspw2_drv_info.general);
2906}
2907
2908static int grspw2_init3(struct drvmgr_dev *dev)
2909{
2910        struct grspw_priv *priv;
2911        struct amba_dev_info *ambadev;
2912        struct ambapp_core *pnpinfo;
2913        int i, size;
2914        unsigned int ctrl, icctrl, numi;
2915        union drvmgr_key_value *value;
2916
2917        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
2918                dev->parent->dev->name);
2919
2920        if (grspw_count > GRSPW_MAX)
2921                return DRVMGR_ENORES;
2922
2923        priv = dev->priv;
2924        if (priv == NULL)
2925                return DRVMGR_NOMEM;
2926        priv->dev = dev;
2927
2928        /* If first device init common part of driver */
2929        if (grspw_common_init())
2930                return DRVMGR_FAIL;
2931
2932        /*** Now we take care of device initialization ***/
2933
2934        /* Get device information from AMBA PnP information */
2935        ambadev = (struct amba_dev_info *)dev->businfo;
2936        if (ambadev == NULL)
2937                return -1;
2938        pnpinfo = &ambadev->info;
2939        priv->irq = pnpinfo->irq;
2940        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
2941
2942        /* Read Hardware Support from Control Register */
2943        ctrl = REG_READ(&priv->regs->ctrl);
2944        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
2945        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
2946        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
2947        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
2948        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
2949        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
2950        icctrl = REG_READ(&priv->regs->icctrl);
2951        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
2952        if (numi > 0)
2953                priv->hwsup.irq_num = 1 << (numi - 1);
2954        else
2955                priv->hwsup.irq_num = 0;
2956
2957        /* Construct hardware version identification */
2958        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
2959
2960        if ((pnpinfo->device == GAISLER_SPW2) ||
2961            (pnpinfo->device == GAISLER_SPW2_DMA)) {
2962                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
2963                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
2964        } else {
2965                /* Autodetect GRSPW1 features? */
2966                priv->hwsup.strip_adr = 0;
2967                priv->hwsup.strip_pid = 0;
2968        }
2969
2970        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
2971         * width... so only the first is probed, if no timer result will be
2972         * zero.
2973         */
2974        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
2975        ctrl = REG_READ(&priv->regs->icrlpresc);
2976        REG_WRITE(&priv->regs->icrlpresc, 0);
2977        priv->hwsup.itmr_width = 0;
2978        while (ctrl & 1) {
2979                priv->hwsup.itmr_width++;
2980                ctrl = ctrl >> 1;
2981        }
2982
2983        /* Let user limit the number of DMA channels on this core to save
2984         * space. Only the first nDMA channels will be available.
2985         */
2986        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
2987        if (value && (value->i < priv->hwsup.ndma_chans))
2988                priv->hwsup.ndma_chans = value->i;
2989
2990        /* Allocate and init Memory for all DMA channels */
2991        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
2992        priv->dma = (struct grspw_dma_priv *) malloc(size);
2993        if (priv->dma == NULL)
2994                return DRVMGR_NOMEM;
2995        memset(priv->dma, 0, size);
2996        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2997                priv->dma[i].core = priv;
2998                priv->dma[i].index = i;
2999                priv->dma[i].regs = &priv->regs->dma[i];
3000        }
3001
3002        /* Startup Action:
3003         *  - stop DMA
3004         *  - do not bring down the link (RMAP may be active)
3005         *  - RMAP settings untouched (RMAP may be active)
3006         *  - port select untouched (RMAP may be active)
3007         *  - timecodes are diabled
3008         *  - IRQ generation disabled
3009         *  - status cleared
3010         *  - Node address / First DMA channels Node address
3011         *    is untouched (RMAP may be active)
3012         */
3013        grspw_hw_stop(priv);
3014        grspw_hw_softreset(priv);
3015
3016        /* Register character device in registered region */
3017        priv->index = grspw_count;
3018        priv_tab[priv->index] = priv;
3019        grspw_count++;
3020
3021        /* Device name */
3022        sprintf(priv->devname, "grspw%d", priv->index);
3023
3024        /* Tell above layer about new device */
3025        if (grspw_dev_add)
3026                priv->data = grspw_dev_add(priv->index);
3027
3028        return DRVMGR_OK;
3029}
3030
3031/******************* Driver Implementation ***********************/
3032
3033static int grspw_common_init(void)
3034{
3035        if (grspw_initialized == 1)
3036                return 0;
3037        if (grspw_initialized == -1)
3038                return -1;
3039        grspw_initialized = -1;
3040
3041        /* Device Semaphore created with count = 1 */
3042        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3043            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3044            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3045            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3046                return -1;
3047
3048        /* Work queue, Work thread. Not created if user disables it.
3049         * user can disable it when interrupt is not used to save resources
3050         */
3051        if (grspw_work_task_priority != -1) {
3052                if (rtems_message_queue_create(
3053                    rtems_build_name('S', 'G', 'L', 'Q'), 32, 4, RTEMS_FIFO,
3054                    &grspw_work_queue) != RTEMS_SUCCESSFUL)
3055                        return -1;
3056
3057                if (rtems_task_create(rtems_build_name('S', 'G', 'L', 'T'),
3058                    grspw_work_task_priority, RTEMS_MINIMUM_STACK_SIZE,
3059                    RTEMS_PREEMPT | RTEMS_NO_ASR, RTEMS_NO_FLOATING_POINT,
3060                    &grspw_work_task) != RTEMS_SUCCESSFUL)
3061                        return -1;
3062
3063                if (rtems_task_start(grspw_work_task, grspw_work_func, 0) !=
3064                    RTEMS_SUCCESSFUL)
3065                        return -1;
3066}
3067
3068        grspw_initialized = 1;
3069        return 0;
3070}
Note: See TracBrowser for help on using the repository browser.