source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ 49cf776e

5
Last change on this file since 49cf776e was 49cf776e, checked in by Daniel Hellstrom <daniel@…>, on 03/22/16 at 14:37:36

leon, grspw_pkt: added link_ctrl options

Improved the link error handling options. Its now possible to
disable the link on individual link errors/warnings instead of
always on all or none.

Changed name of LINKOPTS_IRQ to LINKOPTS_EIRQ to match Linux
and VxWorks? SpW driver.

  • Property mode set to 100644
File size: 79.9 KB
Line 
1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
29#include <bsp/grspw_pkt.h>
30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
117#define GRSPW_CTRL_TF_BIT       12
118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
269#define GRSPW_ICCTRL_INUM       (0x3f << GRSPW_ICCTRL_INUM_BIT)
270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
337#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
338#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
339
340/* Memory and HW Registers Access routines. All 32-bit access routines */
341#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
342/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
343#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
344#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
345#define REG_READ(addr) (*(volatile unsigned int *)(addr))
346
347struct grspw_ring {
348        struct grspw_ring *next;        /* Next Descriptor */
349        union {
350                struct grspw_txbd *tx;  /* Descriptor Address */
351                struct grspw_rxbd *rx;  /* Descriptor Address */
352        } bd;
353        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
354};
355
356/* An entry in the TX descriptor Ring */
357struct grspw_txring {
358        struct grspw_txring *next;      /* Next Descriptor */
359        struct grspw_txbd *bd;          /* Descriptor Address */
360        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
361};
362
363/* An entry in the RX descriptor Ring */
364struct grspw_rxring {
365        struct grspw_rxring *next;      /* Next Descriptor */
366        struct grspw_rxbd *bd;          /* Descriptor Address */
367        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
368};
369
370
371struct grspw_dma_priv {
372        struct grspw_priv *core;        /* GRSPW Core */
373        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
374        int index;                      /* DMA Channel Index @ GRSPW core */
375        int open;                       /* DMA Channel opened by user */
376        int started;                    /* DMA Channel activity (start|stop) */
377        rtems_id sem_dma;               /* DMA Channel Semaphore */
378        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
379        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
380
381        /*** RX ***/
382
383        /* RX Descriptor Ring */
384        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
385        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
386        struct grspw_rxring *rx_ring_base;
387        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
388        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
389        int rx_irq_en_cnt_curr;
390        struct {
391                int waiting;
392                int ready_cnt;
393                int op;
394                int recv_cnt;
395                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
396        } rx_wait;
397
398        /* Queue of Packets READY to be scheduled */
399        struct grspw_list ready;
400        int ready_cnt;
401
402        /* Scheduled RX Packets Queue */
403        struct grspw_list rx_sched;
404        int rx_sched_cnt;
405
406        /* Queue of Packets that has been RECIEVED */
407        struct grspw_list recv;
408        int recv_cnt;
409
410
411        /*** TX ***/
412
413        /* TX Descriptor Ring */
414        struct grspw_txbd *tx_bds;              /* Descriptor Address */
415        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
416        struct grspw_txring *tx_ring_base;
417        struct grspw_txring *tx_ring_head;
418        struct grspw_txring *tx_ring_tail;
419        int tx_irq_en_cnt_curr;
420        struct {
421                int waiting;
422                int send_cnt;
423                int op;
424                int sent_cnt;
425                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
426        } tx_wait;
427
428        /* Queue of Packets ready to be scheduled for transmission */
429        struct grspw_list send;
430        int send_cnt;
431
432        /* Scheduled TX Packets Queue */
433        struct grspw_list tx_sched;
434        int tx_sched_cnt;
435
436        /* Queue of Packets that has been SENT */
437        struct grspw_list sent;
438        int sent_cnt;
439};
440
441struct grspw_priv {
442        char devname[8];                /* Device name "grspw%d" */
443        struct drvmgr_dev *dev;         /* Device */
444        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
445        int irq;                        /* AMBA IRQ number of core */
446        int index;                      /* Index in order it was probed */
447        int core_index;                 /* Core Bus Index */
448        int open;                       /* If Device is alrady opened (=1) or not (=0) */
449        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
450
451        /* Features supported by Hardware */
452        struct grspw_hw_sup hwsup;
453
454        /* Pointer to an array of Maximally 4 DMA Channels */
455        struct grspw_dma_priv *dma;
456
457        /* Spin-lock ISR protection */
458        SPIN_DECLARE(devlock);
459
460        /* Descriptor Memory Area for TX & RX and all DMA channels */
461        unsigned int bd_mem;
462        unsigned int bd_mem_alloced;
463
464        /*** Time Code Handling ***/
465        void (*tcisr)(void *data, int timecode);
466        void *tcisr_arg;
467
468        /*** Interrupt-code Handling ***/
469        spwpkt_ic_isr_t icisr;
470        void *icisr_arg;
471
472        /* Bit mask representing events which shall cause link disable. */
473        unsigned int dis_link_on_err;
474
475        /* "Core Global" Statistics gathered, not dependent on DMA channel */
476        struct grspw_core_stats stats;
477};
478
479int grspw_initialized = 0;
480int grspw_count = 0;
481struct workqueue_struct *grspw_workq = NULL;
482rtems_id grspw_sem;
483static struct grspw_priv *priv_tab[GRSPW_MAX];
484
485/* callback to upper layer when devices are discovered/removed */
486void *(*grspw_dev_add)(int) = NULL;
487void (*grspw_dev_del)(int,void*) = NULL;
488
489/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
490 * the work-task and work-queue to save space.
491 */
492int grspw_work_task_priority __attribute__((weak)) = 100;
493int grspw_task_stop = 0;
494rtems_id grspw_work_task;
495rtems_id grspw_work_queue = 0;
496#define WORK_NONE         0
497#define WORK_SHUTDOWN     0x100
498#define WORK_DMA(channel) (0x1 << (channel))
499#define WORK_DMA_MASK     0xf /* max 4 channels */
500#define WORK_CORE_BIT     16
501#define WORK_CORE_MASK    0xffff
502#define WORK_CORE(device) ((device) << WORK_CORE_BIT)
503
504STATIC void grspw_hw_stop(struct grspw_priv *priv);
505STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
506STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
507STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
508STATIC void grspw_isr(void *data);
509
510void *grspw_open(int dev_no)
511{
512        struct grspw_priv *priv;
513        unsigned int bdtabsize, hwa;
514        int i;
515        union drvmgr_key_value *value;
516
517        if (grspw_initialized != 1 || (dev_no >= grspw_count))
518                return NULL;
519
520        priv = priv_tab[dev_no];
521
522        /* Take GRSPW lock - Wait until we get semaphore */
523        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
524            != RTEMS_SUCCESSFUL)
525                return NULL;
526
527        if (priv->open) {
528                priv = NULL;
529                goto out;
530        }
531
532        /* Initialize Spin-lock for GRSPW Device. This is to protect
533         * CTRL and DMACTRL registers from ISR.
534         */
535        SPIN_INIT(&priv->devlock);
536
537        priv->tcisr = NULL;
538        priv->tcisr_arg = NULL;
539        priv->icisr = NULL;
540        priv->icisr_arg = NULL;
541
542        grspw_stats_clr(priv);
543
544        /* Allocate TX & RX Descriptor memory area for all DMA
545         * channels. Max-size descriptor area is allocated (or user assigned):
546         *  - 128 RX descriptors per DMA Channel
547         *  - 64 TX descriptors per DMA Channel
548         * Specified address must be in CPU RAM.
549         */
550        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
551        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
552        if (value) {
553                priv->bd_mem = value->i;
554                priv->bd_mem_alloced = 0;
555                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
556                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
557                                  priv->index);
558                        priv = NULL;
559                        goto out;
560                }
561        } else {
562                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
563                if (priv->bd_mem_alloced == 0) {
564                        priv = NULL;
565                        goto out;
566                }
567                /* Align memory */
568                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
569                               ~(BDTAB_ALIGN-1);
570        }
571
572        /* Translate into DMA address that HW can use to access DMA
573         * descriptors
574         */
575        drvmgr_translate_check(
576                priv->dev,
577                CPUMEM_TO_DMA,
578                (void *)priv->bd_mem,
579                (void **)&hwa,
580                bdtabsize);
581
582        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
583                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
584        for (i=0; i<priv->hwsup.ndma_chans; i++) {
585                /* Do DMA Channel Init, other variables etc. are inited
586                 * when respective DMA channel is opened.
587                 *
588                 * index & core are initialized by probe function.
589                 */
590                priv->dma[i].open = 0;
591                priv->dma[i].rx_bds = (struct grspw_rxbd *)
592                        (priv->bd_mem + i*BDTAB_SIZE*2);
593                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
594                        (hwa + BDTAB_SIZE*(2*i));
595                priv->dma[i].tx_bds = (struct grspw_txbd *)
596                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
597                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
598                        (hwa + BDTAB_SIZE*(2*i+1));
599                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
600                        i,
601                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
602                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
603                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
604                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
605        }
606
607        /* Basic initialization of hardware, clear some registers but
608         * keep Link/RMAP/Node-Address registers intact.
609         */
610        grspw_hw_stop(priv);
611
612        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
613        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
614
615        /* Take the device */
616        priv->open = 1;
617out:
618        rtems_semaphore_release(grspw_sem);
619        return priv;
620}
621
622void grspw_close(void *d)
623{
624        struct grspw_priv *priv = d;
625        int i;
626
627        /* Take GRSPW lock - Wait until we get semaphore */
628        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
629            != RTEMS_SUCCESSFUL)
630                return;
631
632        /* Stop Hardware from doing DMA, put HW into "startup-state",
633         * Stop hardware from generating IRQ.
634         */
635        for (i=0; i<priv->hwsup.ndma_chans; i++)
636                grspw_dma_close(&priv->dma[i]);
637        grspw_hw_stop(priv);
638
639        /* Mark not open */
640        priv->open = 0;
641
642        rtems_semaphore_release(grspw_sem);
643
644        /* Check that all threads are out? */
645}
646
647void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
648{
649        struct grspw_priv *priv = d;
650
651        *hw = priv->hwsup;
652}
653
654void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
655{
656        struct grspw_priv *priv = d;
657        struct grspw_regs *regs = priv->regs;
658        unsigned int ctrl, nodeaddr;
659        IRQFLAGS_TYPE irqflags;
660        int i;
661
662        if (!priv || !cfg)
663                return;
664
665        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
666
667        if (cfg->promiscuous != -1) {
668                /* Set Configuration */
669                ctrl = REG_READ(&regs->ctrl);
670                if (cfg->promiscuous)
671                        ctrl |= GRSPW_CTRL_PM;
672                else
673                        ctrl &= ~GRSPW_CTRL_PM;
674                REG_WRITE(&regs->ctrl, ctrl);
675                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
676
677                for (i=0; i<priv->hwsup.ndma_chans; i++) {
678                        ctrl = REG_READ(&regs->dma[i].ctrl);
679                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
680                        if (cfg->dma_nacfg[i].node_en) {
681                                ctrl |= GRSPW_DMACTRL_EN;
682                                REG_WRITE(&regs->dma[i].addr,
683                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
684                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
685                        } else {
686                                ctrl &= ~GRSPW_DMACTRL_EN;
687                        }
688                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
689                }
690        }
691
692        /* Read Current Configuration */
693        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
694        nodeaddr = REG_READ(&regs->nodeaddr);
695        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
696        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
697        for (i=0; i<priv->hwsup.ndma_chans; i++) {
698                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
699                                                GRSPW_DMACTRL_EN;
700                ctrl = REG_READ(&regs->dma[i].addr);
701                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
702                                                GRSPW_DMAADR_ADDR_BIT;
703                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
704                                                GRSPW_DMAADR_MASK_BIT;
705        }
706        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
707        for (; i<4; i++) {
708                cfg->dma_nacfg[i].node_en = 0;
709                cfg->dma_nacfg[i].node_addr = 0;
710                cfg->dma_nacfg[i].node_mask = 0;
711        }
712}
713
714/* Return Current Status Register */
715unsigned int grspw_link_status(void *d)
716{
717        struct grspw_priv *priv = d;
718
719        return REG_READ(&priv->regs->status);
720}
721
722/* Clear Status Register bits */
723void grspw_link_status_clr(void *d, unsigned int mask)
724{
725        struct grspw_priv *priv = d;
726
727        REG_WRITE(&priv->regs->status, mask);
728}
729
730/* Return Current Link State */
731spw_link_state_t grspw_link_state(void *d)
732{
733        struct grspw_priv *priv = d;
734        unsigned int status = REG_READ(&priv->regs->status);
735
736        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
737}
738
739/* Enable Global IRQ only if some irq source is set */
740static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
741{
742        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
743                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
744}
745
746
747/* options and clkdiv [in/out]: set to -1 to only read current config */
748void grspw_link_ctrl(void *d, int *options, int *clkdiv)
749{
750        struct grspw_priv *priv = d;
751        struct grspw_regs *regs = priv->regs;
752        unsigned int ctrl;
753        IRQFLAGS_TYPE irqflags;
754
755        /* Write? */
756        if (clkdiv) {
757                if (*clkdiv != -1)
758                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
759                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
760        }
761        if (options) {
762                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
763                ctrl = REG_READ(&regs->ctrl);
764                if (*options != -1) {
765                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
766                                (*options & GRSPW_LINK_CFG);
767
768                        /* Enable Global IRQ only if some irq source is set */
769                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
770                                ctrl |= GRSPW_CTRL_IE;
771                        else
772                                ctrl &= ~GRSPW_CTRL_IE;
773
774                        REG_WRITE(&regs->ctrl, ctrl);
775                        /* Store the link disable events for use in
776                        ISR. The LINKOPTS_DIS_ON_* options are actually the
777                        corresponding bits in the status register, shifted
778                        by 16. */
779                        priv->dis_link_on_err = *options &
780                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
781                }
782                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
783                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
784        }
785}
786
787/* Generate Tick-In (increment Time Counter, Send Time Code) */
788void grspw_tc_tx(void *d)
789{
790        struct grspw_priv *priv = d;
791        struct grspw_regs *regs = priv->regs;
792        IRQFLAGS_TYPE irqflags;
793
794        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
795        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
796        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
797}
798
799void grspw_tc_ctrl(void *d, int *options)
800{
801        struct grspw_priv *priv = d;
802        struct grspw_regs *regs = priv->regs;
803        unsigned int ctrl;
804        IRQFLAGS_TYPE irqflags;
805
806        if (options == NULL)
807                return;
808
809        /* Write? */
810        if (*options != -1) {
811                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
812                ctrl = REG_READ(&regs->ctrl);
813                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
814                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
815
816                /* Enable Global IRQ only if some irq source is set */
817                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
818                        ctrl |= GRSPW_CTRL_IE;
819                else
820                        ctrl &= ~GRSPW_CTRL_IE;
821
822                REG_WRITE(&regs->ctrl, ctrl);
823                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
824        } else
825                ctrl = REG_READ(&regs->ctrl);
826        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
827}
828
829/* Assign ISR Function to TimeCode RX IRQ */
830void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
831{
832        struct grspw_priv *priv = d;
833
834        priv->tcisr_arg = data;
835        priv->tcisr = tcisr;
836}
837
838/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
839 * TCTRL   = bits 7 and 6
840 * TIMECNT = bits 5 to 0
841 */
842void grspw_tc_time(void *d, int *time)
843{
844        struct grspw_priv *priv = d;
845        struct grspw_regs *regs = priv->regs;
846
847        if (time == NULL)
848                return;
849        if (*time != -1)
850                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
851        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
852}
853
854/* Generate Tick-In for the given Interrupt-code and check for generation
855 * error.
856 *
857 * Returns zero on success and non-zero on failure
858 */
859int grspw_ic_tickin(void *d, int ic)
860{
861        struct grspw_priv *priv = d;
862        struct grspw_regs *regs = priv->regs;
863        IRQFLAGS_TYPE irqflags;
864        unsigned int icctrl, mask;
865
866        /* Prepare before turning off IRQ */
867        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
868        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
869             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
870
871        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
872        icctrl = REG_READ(&regs->icctrl);
873        icctrl &= ~mask;
874        icctrl |= ic;
875        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
876        /* the ID bit is valid after two clocks, so we not to wait here */
877        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
878        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
879
880        return icctrl & GRSPW_ICCTRL_ID;
881}
882
883#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
884#define ICOPTS_ICCTRL_MASK                                              \
885        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
886         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
887         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
888         ICOPTS_BASEIRQ)
889
890/* Control Interrupt-code settings of core
891 * Write if not pointing to -1, always read current value
892 *
893 * TODO: A lot of code duplication with grspw_tc_ctrl
894 */
895void grspw_ic_ctrl(void *d, unsigned int *options)
896{
897        struct grspw_priv *priv = d;
898        struct grspw_regs *regs = priv->regs;
899        unsigned int ctrl;
900        unsigned int icctrl;
901        IRQFLAGS_TYPE irqflags;
902
903        if (options == NULL)
904                return;
905
906        if (*options != -1) {
907                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
908
909                ctrl = REG_READ(&regs->ctrl);
910                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
911                                         * irqopts bits and ctrl bits */
912                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
913                        (GRSPW_CTRL_TF_BIT - 0);
914
915                icctrl = REG_READ(&regs->icctrl);
916                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
917                                                * irqopts bits and icctrl bits */
918                icctrl |= *options & ICOPTS_ICCTRL_MASK;
919
920                /* Enable Global IRQ only if some irq source is set */
921                if (grspw_is_irqsource_set(ctrl, icctrl))
922                        ctrl |= GRSPW_CTRL_IE;
923                else
924                        ctrl &= ~GRSPW_CTRL_IE;
925
926                REG_WRITE(&regs->ctrl, ctrl);
927                REG_WRITE(&regs->icctrl, icctrl);
928                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
929        }
930        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
931                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
932}
933
934void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
935{
936        struct grspw_priv *priv = d;
937        struct grspw_regs *regs = priv->regs;
938
939        if (!cfg)
940                return;
941
942        if (rw & 1) {
943                REG_WRITE(&regs->ictickomask, cfg->tomask);
944                REG_WRITE(&regs->icaamask, cfg->aamask);
945                REG_WRITE(&regs->icrlpresc, cfg->scaler);
946                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
947                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
948        }
949        if (rw & 2) {
950                cfg->tomask = REG_READ(&regs->ictickomask);
951                cfg->aamask = REG_READ(&regs->icaamask);
952                cfg->scaler = REG_READ(&regs->icrlpresc);
953                cfg->isr_reload = REG_READ(&regs->icrlisr);
954                cfg->ack_reload = REG_READ(&regs->icrlintack);
955        }
956}
957
958/* Read or Write Interrupt-code status registers */
959void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
960{
961        struct grspw_priv *priv = d;
962        struct grspw_regs *regs = priv->regs;
963
964        /* No locking needed since the status bits are clear-on-write */
965
966        if (rxirq) {
967                if (*rxirq != 0)
968                        REG_WRITE(&regs->icrx, *rxirq);
969                else
970                        *rxirq = REG_READ(&regs->icrx);
971        }
972
973        if (rxack) {
974                if (*rxack != 0)
975                        REG_WRITE(&regs->icack, *rxack);
976                else
977                        *rxack = REG_READ(&regs->icack);
978        }
979
980        if (intto) {
981                if (*intto != 0)
982                        REG_WRITE(&regs->ictimeout, *intto);
983                else
984                        *intto = REG_READ(&regs->ictimeout);
985        }
986}
987
988/* Assign handler function to Interrupt-code tick out IRQ */
989void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
990{
991        struct grspw_priv *priv = d;
992
993        priv->icisr_arg = data;
994        priv->icisr = handler;
995}
996
997/* Set (not -1) and/or read RMAP options. */
998int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
999{
1000        struct grspw_priv *priv = d;
1001        struct grspw_regs *regs = priv->regs;
1002        unsigned int ctrl;
1003        IRQFLAGS_TYPE irqflags;
1004
1005        if (dstkey) {
1006                if (*dstkey != -1)
1007                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1008                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1009        }
1010        if (options) {
1011                if (*options != -1) {
1012                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1013                                return -1;
1014
1015
1016                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1017                        ctrl = REG_READ(&regs->ctrl);
1018                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1019                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1020                        REG_WRITE(&regs->ctrl, ctrl);
1021                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1022                }
1023                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1024        }
1025
1026        return 0;
1027}
1028
1029void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1030{
1031        struct grspw_priv *priv = d;
1032
1033        if (rmap)
1034                *rmap = priv->hwsup.rmap;
1035        if (rmap_crc)
1036                *rmap_crc = priv->hwsup.rmap_crc;
1037}
1038
1039/* Select port, if
1040 * -1=The current selected port is returned
1041 * 0=Port 0
1042 * 1=Port 1
1043 * Others=Both Port0 and Port1
1044 */
1045int grspw_port_ctrl(void *d, int *port)
1046{
1047        struct grspw_priv *priv = d;
1048        struct grspw_regs *regs = priv->regs;
1049        unsigned int ctrl;
1050        IRQFLAGS_TYPE irqflags;
1051
1052        if (port == NULL)
1053                return -1;
1054
1055        if ((*port == 1) || (*port == 0)) {
1056                /* Select port user selected */
1057                if ((*port == 1) && (priv->hwsup.nports < 2))
1058                        return -1; /* Changing to Port 1, but only one port available */
1059                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1060                ctrl = REG_READ(&regs->ctrl);
1061                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1062                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1063                REG_WRITE(&regs->ctrl, ctrl);
1064                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1065        } else if (*port > 1) {
1066                /* Select both ports */
1067                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1068                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1069                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1070        }
1071
1072        /* Get current settings */
1073        ctrl = REG_READ(&regs->ctrl);
1074        if (ctrl & GRSPW_CTRL_NP) {
1075                /* Any port, selected by hardware */
1076                if (priv->hwsup.nports > 1)
1077                        *port = 3;
1078                else
1079                        *port = 0; /* Port0 the only port available */
1080        } else {
1081                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1082        }
1083
1084        return 0;
1085}
1086
1087/* Returns Number ports available in hardware */
1088int grspw_port_count(void *d)
1089{
1090        struct grspw_priv *priv = d;
1091
1092        return priv->hwsup.nports;
1093}
1094
1095/* Current active port: 0 or 1 */
1096int grspw_port_active(void *d)
1097{
1098        struct grspw_priv *priv = d;
1099        unsigned int status;
1100
1101        status = REG_READ(&priv->regs->status);
1102
1103        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1104}
1105
1106void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1107{
1108        struct grspw_priv *priv = d;
1109
1110        if (sts == NULL)
1111                return;
1112        memcpy(sts, &priv->stats, sizeof(priv->stats));
1113}
1114
1115void grspw_stats_clr(void *d)
1116{
1117        struct grspw_priv *priv = d;
1118
1119        /* Clear most of the statistics */     
1120        memset(&priv->stats, 0, sizeof(priv->stats));
1121}
1122
1123/*** DMA Interface ***/
1124
1125/* Initialize the RX and TX Descriptor Ring, empty of packets */
1126STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1127{
1128        struct grspw_ring *r;
1129        int i;
1130
1131        /* Empty BD rings */
1132        dma->rx_ring_head = dma->rx_ring_base;
1133        dma->rx_ring_tail = dma->rx_ring_base;
1134        dma->tx_ring_head = dma->tx_ring_base;
1135        dma->tx_ring_tail = dma->tx_ring_base;
1136
1137        /* Init RX Descriptors */
1138        r = (struct grspw_ring *)dma->rx_ring_base;
1139        for (i=0; i<GRSPW_RXBD_NR; i++) {
1140
1141                /* Init Ring Entry */
1142                r[i].next = &r[i+1];
1143                r[i].bd.rx = &dma->rx_bds[i];
1144                r[i].pkt = NULL;
1145
1146                /* Init HW Descriptor */
1147                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1148                BD_WRITE(&r[i].bd.rx->addr, 0);
1149        }
1150        r[GRSPW_RXBD_NR-1].next = &r[0];
1151
1152        /* Init TX Descriptors */
1153        r = (struct grspw_ring *)dma->tx_ring_base;
1154        for (i=0; i<GRSPW_TXBD_NR; i++) {
1155
1156                /* Init Ring Entry */
1157                r[i].next = &r[i+1];
1158                r[i].bd.tx = &dma->tx_bds[i];
1159                r[i].pkt = NULL;
1160
1161                /* Init HW Descriptor */
1162                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1163                BD_WRITE(&r[i].bd.tx->haddr, 0);
1164                BD_WRITE(&r[i].bd.tx->dlen, 0);
1165                BD_WRITE(&r[i].bd.tx->daddr, 0);
1166        }
1167        r[GRSPW_TXBD_NR-1].next = &r[0];
1168}
1169
1170/* Try to populate descriptor ring with as many as possible READY unused packet
1171 * buffers. The packets assigned with to a descriptor are put in the end of
1172 * the scheduled list.
1173 *
1174 * The number of Packets scheduled is returned.
1175 *
1176 *  - READY List -> RX-SCHED List
1177 *  - Descriptors are initialized and enabled for reception
1178 */
1179STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1180{
1181        int cnt;
1182        unsigned int ctrl, dmactrl;
1183        void *hwaddr;
1184        struct grspw_rxring *curr_bd;
1185        struct grspw_pkt *curr_pkt, *last_pkt;
1186        struct grspw_list lst;
1187        IRQFLAGS_TYPE irqflags;
1188
1189        /* Is Ready Q empty? */
1190        if (grspw_list_is_empty(&dma->ready))
1191                return 0;
1192
1193        cnt = 0;
1194        lst.head = curr_pkt = dma->ready.head;
1195        curr_bd = dma->rx_ring_head;
1196        while (!curr_bd->pkt) {
1197
1198                /* Assign Packet to descriptor */
1199                curr_bd->pkt = curr_pkt;
1200
1201                /* Prepare descriptor address. */
1202                hwaddr = curr_pkt->data;
1203                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1204                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1205                                         hwaddr, &hwaddr);
1206                        if (curr_pkt->data == hwaddr) /* translation needed? */
1207                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1208                }
1209                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1210
1211                ctrl = GRSPW_RXBD_EN;
1212                if (curr_bd->next == dma->rx_ring_base) {
1213                        /* Wrap around (only needed when smaller descriptor
1214                         * table)
1215                         */
1216                        ctrl |= GRSPW_RXBD_WR;
1217                }
1218
1219                /* Is this Packet going to be an interrupt Packet? */
1220                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1221                        if (dma->cfg.rx_irq_en_cnt == 0) {
1222                                /* IRQ is disabled. A big number to avoid
1223                                 * equal to zero too often
1224                                 */
1225                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1226                        } else {
1227                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1228                                ctrl |= GRSPW_RXBD_IE;
1229                        }
1230                }
1231
1232                if (curr_pkt->flags & RXPKT_FLAG_IE)
1233                        ctrl |= GRSPW_RXBD_IE;
1234
1235                /* Enable descriptor */
1236                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1237
1238                last_pkt = curr_pkt;
1239                curr_bd = curr_bd->next;
1240                cnt++;
1241
1242                /* Get Next Packet from Ready Queue */
1243                if (curr_pkt == dma->ready.tail) {
1244                        /* Handled all in ready queue. */
1245                        curr_pkt = NULL;
1246                        break;
1247                }
1248                curr_pkt = curr_pkt->next;
1249        }
1250
1251        /* Has Packets been scheduled? */
1252        if (cnt > 0) {
1253                /* Prepare list for insertion/deleation */
1254                lst.tail = last_pkt;
1255
1256                /* Remove scheduled packets from ready queue */
1257                grspw_list_remove_head_list(&dma->ready, &lst);
1258                dma->ready_cnt -= cnt;
1259                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1260                        dma->stats.ready_cnt_min = dma->ready_cnt;
1261
1262                /* Insert scheduled packets into scheduled queue */
1263                grspw_list_append_list(&dma->rx_sched, &lst);
1264                dma->rx_sched_cnt += cnt;
1265                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1266                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1267
1268                /* Update TX ring posistion */
1269                dma->rx_ring_head = curr_bd;
1270
1271                /* Make hardware aware of the newly enabled descriptors
1272                 * We must protect from ISR which writes RI|TI
1273                 */
1274                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1275                dmactrl = REG_READ(&dma->regs->ctrl);
1276                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1277                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1278                REG_WRITE(&dma->regs->ctrl, dmactrl);
1279                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1280        }
1281
1282        return cnt;
1283}
1284
1285/* Scans the RX desciptor table for scheduled Packet that has been received,
1286 * and moves these Packet from the head of the scheduled queue to the
1287 * tail of the recv queue.
1288 *
1289 * Also, for all packets the status is updated.
1290 *
1291 *  - SCHED List -> SENT List
1292 *
1293 * Return Value
1294 * Number of packets moved
1295 */
1296STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1297{
1298        struct grspw_rxring *curr;
1299        struct grspw_pkt *last_pkt;
1300        int recv_pkt_cnt = 0;
1301        unsigned int ctrl;
1302        struct grspw_list lst;
1303
1304        curr = dma->rx_ring_tail;
1305
1306        /* Step into RX ring to find if packets have been scheduled for
1307         * reception.
1308         */
1309        if (!curr->pkt)
1310                return 0; /* No scheduled packets, thus no received, abort */
1311
1312        /* There has been Packets scheduled ==> scheduled Packets may have been
1313         * received and needs to be collected into RECV List.
1314         *
1315         * A temporary list "lst" with all received packets is created.
1316         */
1317        lst.head = curr->pkt;
1318
1319        /* Loop until first enabled "unrecveived" SpW Packet is found.
1320         * An unused descriptor is indicated by an unassigned pkt field.
1321         */
1322        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1323                /* Handle one received Packet */
1324
1325                /* Remember last handled Packet so that insertion/removal from
1326                 * Packet lists go fast.
1327                 */
1328                last_pkt = curr->pkt;
1329
1330                /* Get Length of Packet in bytes, and reception options */
1331                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1332
1333                /* Set flags to indicate error(s) and CRC information,
1334                 * and Mark Received.
1335                 */
1336                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1337                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1338                                  RXPKT_FLAG_RX;
1339
1340                /* Packet was Truncated? */
1341                if (ctrl & GRSPW_RXBD_TR)
1342                        dma->stats.rx_err_trunk++;
1343
1344                /* Error End-Of-Packet? */
1345                if (ctrl & GRSPW_RXBD_EP)
1346                        dma->stats.rx_err_endpkt++;
1347                curr->pkt = NULL; /* Mark descriptor unused */
1348
1349                /* Increment */
1350                curr = curr->next;
1351                recv_pkt_cnt++;
1352        }
1353
1354        /* 1. Remove all handled packets from scheduled queue
1355         * 2. Put all handled packets into recv queue
1356         */
1357        if (recv_pkt_cnt > 0) {
1358
1359                /* Update Stats, Number of Received Packets */
1360                dma->stats.rx_pkts += recv_pkt_cnt;
1361
1362                /* Save RX ring posistion */
1363                dma->rx_ring_tail = curr;
1364
1365                /* Prepare list for insertion/deleation */
1366                lst.tail = last_pkt;
1367
1368                /* Remove received Packets from RX-SCHED queue */
1369                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1370                dma->rx_sched_cnt -= recv_pkt_cnt;
1371                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1372                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1373
1374                /* Insert received Packets into RECV queue */
1375                grspw_list_append_list(&dma->recv, &lst);
1376                dma->recv_cnt += recv_pkt_cnt;
1377                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1378                        dma->stats.recv_cnt_max = dma->recv_cnt;
1379        }
1380
1381        return recv_pkt_cnt;
1382}
1383
1384/* Try to populate descriptor ring with as many SEND packets as possible. The
1385 * packets assigned with to a descriptor are put in the end of
1386 * the scheduled list.
1387 *
1388 * The number of Packets scheduled is returned.
1389 *
1390 *  - SEND List -> TX-SCHED List
1391 *  - Descriptors are initialized and enabled for transmission
1392 */
1393STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1394{
1395        int cnt;
1396        unsigned int ctrl, dmactrl;
1397        void *hwaddr;
1398        struct grspw_txring *curr_bd;
1399        struct grspw_pkt *curr_pkt, *last_pkt;
1400        struct grspw_list lst;
1401        IRQFLAGS_TYPE irqflags;
1402
1403        /* Is Ready Q empty? */
1404        if (grspw_list_is_empty(&dma->send))
1405                return 0;
1406
1407        cnt = 0;
1408        lst.head = curr_pkt = dma->send.head;
1409        curr_bd = dma->tx_ring_head;
1410        while (!curr_bd->pkt) {
1411
1412                /* Assign Packet to descriptor */
1413                curr_bd->pkt = curr_pkt;
1414
1415                /* Set up header transmission */
1416                if (curr_pkt->hdr && curr_pkt->hlen) {
1417                        hwaddr = curr_pkt->hdr;
1418                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1419                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1420                                                 hwaddr, &hwaddr);
1421                                /* translation needed? */
1422                                if (curr_pkt->hdr == hwaddr)
1423                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1424                        }
1425                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1426                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1427                } else {
1428                        ctrl = GRSPW_TXBD_EN;
1429                }
1430                /* Enable IRQ generation and CRC options as specified
1431                 * by user.
1432                 */
1433                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1434
1435                if (curr_bd->next == dma->tx_ring_base) {
1436                        /* Wrap around (only needed when smaller descriptor table) */
1437                        ctrl |= GRSPW_TXBD_WR;
1438                }
1439
1440                /* Is this Packet going to be an interrupt Packet? */
1441                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1442                        if (dma->cfg.tx_irq_en_cnt == 0) {
1443                                /* IRQ is disabled.
1444                                 * A big number to avoid equal to zero too often
1445                                 */
1446                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1447                        } else {
1448                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1449                                ctrl |= GRSPW_TXBD_IE;
1450                        }
1451                }
1452
1453                /* Prepare descriptor address. Parts of CTRL is written to
1454                 * DLEN for debug-only (CTRL is cleared by HW).
1455                 */
1456                if (curr_pkt->data && curr_pkt->dlen) {
1457                        hwaddr = curr_pkt->data;
1458                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1459                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1460                                                 hwaddr, &hwaddr);
1461                                /* translation needed? */
1462                                if (curr_pkt->data == hwaddr)
1463                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1464                        }
1465                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1466                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1467                                                     ((ctrl & 0x3f000) << 12));
1468                } else {
1469                        BD_WRITE(&curr_bd->bd->daddr, 0);
1470                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1471                }
1472
1473                /* Enable descriptor */
1474                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1475
1476                last_pkt = curr_pkt;
1477                curr_bd = curr_bd->next;
1478                cnt++;
1479
1480                /* Get Next Packet from Ready Queue */
1481                if (curr_pkt == dma->send.tail) {
1482                        /* Handled all in ready queue. */
1483                        curr_pkt = NULL;
1484                        break;
1485                }
1486                curr_pkt = curr_pkt->next;
1487        }
1488
1489        /* Have Packets been scheduled? */
1490        if (cnt > 0) {
1491                /* Prepare list for insertion/deleation */
1492                lst.tail = last_pkt;
1493
1494                /* Remove scheduled packets from ready queue */
1495                grspw_list_remove_head_list(&dma->send, &lst);
1496                dma->send_cnt -= cnt;
1497                if (dma->stats.send_cnt_min > dma->send_cnt)
1498                        dma->stats.send_cnt_min = dma->send_cnt;
1499
1500                /* Insert scheduled packets into scheduled queue */
1501                grspw_list_append_list(&dma->tx_sched, &lst);
1502                dma->tx_sched_cnt += cnt;
1503                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1504                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1505
1506                /* Update TX ring posistion */
1507                dma->tx_ring_head = curr_bd;
1508
1509                /* Make hardware aware of the newly enabled descriptors */
1510                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1511                dmactrl = REG_READ(&dma->regs->ctrl);
1512                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1513                dmactrl |= GRSPW_DMACTRL_TE;
1514                REG_WRITE(&dma->regs->ctrl, dmactrl);
1515                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1516        }
1517        return cnt;
1518}
1519
1520/* Scans the TX desciptor table for transmitted packets, and moves these
1521 * packets from the head of the scheduled queue to the tail of the sent queue.
1522 *
1523 * Also, for all packets the status is updated.
1524 *
1525 *  - SCHED List -> SENT List
1526 *
1527 * Return Value
1528 * Number of packet moved
1529 */
1530STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1531{
1532        struct grspw_txring *curr;
1533        struct grspw_pkt *last_pkt;
1534        int sent_pkt_cnt = 0;
1535        unsigned int ctrl;
1536        struct grspw_list lst;
1537
1538        curr = dma->tx_ring_tail;
1539
1540        /* Step into TX ring to find if packets have been scheduled for
1541         * transmission.
1542         */
1543        if (!curr->pkt)
1544                return 0; /* No scheduled packets, thus no sent, abort */
1545
1546        /* There has been Packets scheduled ==> scheduled Packets may have been
1547         * transmitted and needs to be collected into SENT List.
1548         *
1549         * A temporary list "lst" with all sent packets is created.
1550         */
1551        lst.head = curr->pkt;
1552
1553        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1554         * An unused descriptor is indicated by an unassigned pkt field.
1555         */
1556        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1557                /* Handle one sent Packet */
1558
1559                /* Remember last handled Packet so that insertion/removal from
1560                 * packet lists go fast.
1561                 */
1562                last_pkt = curr->pkt;
1563
1564                /* Set flags to indicate error(s) and Mark Sent.
1565                 */
1566                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1567                                        (ctrl & TXPKT_FLAG_LINKERR) |
1568                                        TXPKT_FLAG_TX;
1569
1570                /* Sent packet experienced link error? */
1571                if (ctrl & GRSPW_TXBD_LE)
1572                        dma->stats.tx_err_link++;
1573
1574                curr->pkt = NULL; /* Mark descriptor unused */
1575
1576                /* Increment */
1577                curr = curr->next;
1578                sent_pkt_cnt++;
1579        }
1580
1581        /* 1. Remove all handled packets from TX-SCHED queue
1582         * 2. Put all handled packets into SENT queue
1583         */
1584        if (sent_pkt_cnt > 0) {
1585                /* Update Stats, Number of Transmitted Packets */
1586                dma->stats.tx_pkts += sent_pkt_cnt;
1587
1588                /* Save TX ring posistion */
1589                dma->tx_ring_tail = curr;
1590
1591                /* Prepare list for insertion/deleation */
1592                lst.tail = last_pkt;
1593
1594                /* Remove sent packets from TX-SCHED queue */
1595                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1596                dma->tx_sched_cnt -= sent_pkt_cnt;
1597                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1598                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1599
1600                /* Insert received packets into SENT queue */
1601                grspw_list_append_list(&dma->sent, &lst);
1602                dma->sent_cnt += sent_pkt_cnt;
1603                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1604                        dma->stats.sent_cnt_max = dma->sent_cnt;
1605        }
1606
1607        return sent_pkt_cnt;
1608}
1609
1610void *grspw_dma_open(void *d, int chan_no)
1611{
1612        struct grspw_priv *priv = d;
1613        struct grspw_dma_priv *dma;
1614        int size;
1615
1616        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
1617                return NULL;
1618
1619        dma = &priv->dma[chan_no];
1620
1621        /* Take GRSPW lock */
1622        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1623            != RTEMS_SUCCESSFUL)
1624                return NULL;
1625
1626        if (dma->open) {
1627                dma = NULL;
1628                goto out;
1629        }
1630
1631        dma->started = 0;
1632
1633        /* Set Default Configuration:
1634         *
1635         *  - MAX RX Packet Length =
1636         *  - Disable IRQ generation
1637         *  -
1638         */
1639        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1640        dma->cfg.rx_irq_en_cnt = 0;
1641        dma->cfg.tx_irq_en_cnt = 0;
1642        dma->cfg.flags = DMAFLAG_NO_SPILL;
1643
1644        /* DMA Channel Semaphore created with count = 1 */
1645        if (rtems_semaphore_create(
1646            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no), 1,
1647            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1648            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1649            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_dma) != RTEMS_SUCCESSFUL) {
1650                dma = NULL;
1651                goto out;
1652        }
1653
1654        /* Allocate memory for the two descriptor rings */
1655        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1656        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1657        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1658        if (dma->rx_ring_base == NULL) {
1659                dma = NULL;
1660                goto out;
1661        }
1662
1663        /* Create DMA RX and TX Channel sempahore with count = 0 */
1664        if (rtems_semaphore_create(
1665            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1666            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1667            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1668            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1669                dma = NULL;
1670                goto out;
1671        }
1672        if (rtems_semaphore_create(
1673            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1674            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1675            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1676            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1677                dma = NULL;
1678                goto out;
1679        }
1680
1681        /* Reset software structures */
1682        grspw_dma_reset(dma);
1683
1684        /* Take the device */
1685        dma->open = 1;
1686out:
1687        /* Return GRSPW Lock */
1688        rtems_semaphore_release(grspw_sem);
1689
1690        return dma;
1691}
1692
1693/* Initialize Software Structures:
1694 *  - Clear all Queues
1695 *  - init BD ring
1696 *  - init IRQ counter
1697 *  - clear statistics counters
1698 *  - init wait structures and semaphores
1699 */
1700STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1701{
1702        /* Empty RX and TX queues */
1703        grspw_list_clr(&dma->ready);
1704        grspw_list_clr(&dma->rx_sched);
1705        grspw_list_clr(&dma->recv);
1706        grspw_list_clr(&dma->send);
1707        grspw_list_clr(&dma->tx_sched);
1708        grspw_list_clr(&dma->sent);
1709        dma->ready_cnt = 0;
1710        dma->rx_sched_cnt = 0;
1711        dma->recv_cnt = 0;
1712        dma->send_cnt = 0;
1713        dma->tx_sched_cnt = 0;
1714        dma->sent_cnt = 0;
1715
1716        dma->rx_irq_en_cnt_curr = 0;
1717        dma->tx_irq_en_cnt_curr = 0;
1718
1719        grspw_bdrings_init(dma);
1720
1721        dma->rx_wait.waiting = 0;
1722        dma->tx_wait.waiting = 0;
1723
1724        grspw_dma_stats_clr(dma);
1725}
1726
1727void grspw_dma_close(void *c)
1728{
1729        struct grspw_dma_priv *dma = c;
1730
1731        if (!dma->open)
1732                return;
1733
1734        /* Take device lock - Wait until we get semaphore */
1735        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1736            != RTEMS_SUCCESSFUL)
1737                return;
1738
1739        grspw_dma_stop_locked(dma);
1740
1741        /* Free resources */
1742        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1743        rtems_semaphore_delete(dma->tx_wait.sem_wait);
1744        rtems_semaphore_delete(dma->sem_dma); /* Release and delete lock */
1745
1746        /* Free memory */
1747        if (dma->rx_ring_base)
1748                free(dma->rx_ring_base);
1749        dma->rx_ring_base = NULL;
1750        dma->tx_ring_base = NULL;
1751
1752        dma->open = 0;
1753}
1754
1755/* Schedule List of packets for transmission at some point in
1756 * future.
1757 *
1758 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1759 * 2. Add the requested packets to the SEND List (USER->SEND)
1760 * 3. Schedule as many packets as possible (SEND->SCHED)
1761 */
1762int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1763{
1764        struct grspw_dma_priv *dma = c;
1765        int ret;
1766
1767        /* Take DMA channel lock */
1768        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1769            != RTEMS_SUCCESSFUL)
1770                return -1;
1771
1772        if (dma->started == 0) {
1773                ret = 1; /* signal DMA has been stopped */
1774                goto out;
1775        }
1776        ret = 0;
1777
1778        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1779        if ((opts & 1) == 0)
1780                grspw_tx_process_scheduled(dma);
1781
1782        /* 2. Add the requested packets to the SEND List (USER->SEND) */
1783        if (pkts) {
1784                grspw_list_append_list(&dma->send, pkts);
1785                dma->send_cnt += count;
1786                if (dma->stats.send_cnt_max < dma->send_cnt)
1787                        dma->stats.send_cnt_max = dma->send_cnt;
1788        }
1789
1790        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1791        if ((opts & 2) == 0)
1792                grspw_tx_schedule_send(dma);
1793
1794out:
1795        /* Unlock DMA channel */
1796        rtems_semaphore_release(dma->sem_dma);
1797
1798        return ret;
1799}
1800
1801int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1802{
1803        struct grspw_dma_priv *dma = c;
1804        struct grspw_pkt *pkt, *lastpkt;
1805        int cnt, started;
1806
1807        /* Take DMA channel lock */
1808        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1809            != RTEMS_SUCCESSFUL)
1810                return -1;
1811
1812        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1813        started = dma->started;
1814        if ((started > 0) && ((opts & 1) == 0))
1815                grspw_tx_process_scheduled(dma);
1816
1817        /* Move all/count SENT packet to the callers list (SENT->USER) */
1818        if (pkts) {
1819                if ((count == NULL) || (*count == -1) ||
1820                    (*count >= dma->sent_cnt)) {
1821                        /* Move all SENT Packets */
1822                        *pkts = dma->sent;
1823                        grspw_list_clr(&dma->sent);
1824                        if (count)
1825                                *count = dma->sent_cnt;
1826                        dma->sent_cnt = 0;
1827                } else {
1828                        /* Move a number of SENT Packets */
1829                        pkts->head = pkt = lastpkt = dma->sent.head;
1830                        cnt = 0;
1831                        while (cnt < *count) {
1832                                lastpkt = pkt;
1833                                pkt = pkt->next;
1834                                cnt++;
1835                        }
1836                        if (cnt > 0) {
1837                                pkts->tail = lastpkt;
1838                                grspw_list_remove_head_list(&dma->sent, pkts);
1839                                dma->sent_cnt -= cnt;
1840                        } else {
1841                                grspw_list_clr(pkts);
1842                        }
1843                }
1844        } else if (count) {
1845                *count = 0;
1846        }
1847
1848        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1849        if ((started > 0) && ((opts & 2) == 0))
1850                grspw_tx_schedule_send(dma);
1851
1852        /* Unlock DMA channel */
1853        rtems_semaphore_release(dma->sem_dma);
1854
1855        return (~started) & 1; /* signal DMA has been stopped */
1856}
1857
1858void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent)
1859{
1860        struct grspw_dma_priv *dma = c;
1861
1862        if (send)
1863                *send = dma->send_cnt;
1864        if (sched)
1865                *sched = dma->tx_sched_cnt;
1866        if (sent)
1867                *sent = dma->sent_cnt;
1868}
1869
1870static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
1871{
1872        int send_val, sent_val;
1873
1874        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
1875                send_val = 1;
1876        else
1877                send_val = 0;
1878
1879        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
1880                sent_val = 1;
1881        else
1882                sent_val = 0;
1883
1884        /* AND or OR ? */
1885        if (dma->tx_wait.op == 0)
1886                return send_val & sent_val; /* AND */
1887        else
1888                return send_val | sent_val; /* OR */
1889}
1890
1891/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
1892 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
1893 * is met.
1894 * If a link error occurs and the Stop on Link error is defined, this function
1895 * will also return to caller.
1896 */
1897int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
1898{
1899        struct grspw_dma_priv *dma = c;
1900        int ret, rc;
1901
1902        if (timeout == 0)
1903                timeout = RTEMS_NO_TIMEOUT;
1904
1905check_condition:
1906
1907        /* Take DMA channel lock */
1908        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1909            != RTEMS_SUCCESSFUL)
1910                return -1;
1911
1912        /* Check so that no other thread is waiting, this driver only supports
1913         * one waiter at a time.
1914         */
1915        if (dma->tx_wait.waiting) {
1916                ret = -1;
1917                goto out;
1918        }
1919
1920        /* Stop if link error or similar, abort */
1921        if (dma->started == 0) {
1922                ret = 1;
1923                goto out;
1924        }
1925
1926        /* Set up Condition */
1927        dma->tx_wait.send_cnt = send_cnt;
1928        dma->tx_wait.op = op;
1929        dma->tx_wait.sent_cnt = sent_cnt;
1930
1931        if (grspw_tx_wait_eval(dma) == 0) {
1932                /* Prepare Wait */
1933                dma->tx_wait.waiting = 1;
1934
1935                /* Release DMA channel lock */
1936                rtems_semaphore_release(dma->sem_dma);
1937
1938                /* Try to take Wait lock, if this fail link may have gone down
1939                 * or user stopped this DMA channel
1940                 */
1941                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
1942                                                timeout);
1943                if (rc == RTEMS_TIMEOUT) {
1944                        dma->tx_wait.waiting = 0;
1945                        return 2;
1946                } else if (rc == RTEMS_UNSATISFIED ||
1947                           rc == RTEMS_OBJECT_WAS_DELETED) {
1948                        dma->tx_wait.waiting = 0;
1949                        return 1; /* sem was flushed/deleted, means DMA stop */
1950                } else if (rc != RTEMS_SUCCESSFUL)
1951                        return -1;
1952
1953                /* Check condition once more */
1954                goto check_condition;
1955        } else {
1956                /* No Wait needed */
1957                dma->tx_wait.waiting = 0;
1958        }
1959
1960        ret = 0;
1961out:
1962        /* Unlock DMA channel */
1963        rtems_semaphore_release(dma->sem_dma);
1964
1965        return ret;
1966}
1967
1968int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
1969{
1970        struct grspw_dma_priv *dma = c;
1971        struct grspw_pkt *pkt, *lastpkt;
1972        int cnt, started;
1973
1974        /* Take DMA channel lock */
1975        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1976            != RTEMS_SUCCESSFUL)
1977                return -1;
1978
1979        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
1980        started = dma->started;
1981        if (((opts & 1) == 0) && (started > 0))
1982                grspw_rx_process_scheduled(dma);
1983
1984        /* Move all RECV packet to the callers list */
1985        if (pkts) {
1986                if ((count == NULL) || (*count == -1) ||
1987                    (*count >= dma->recv_cnt)) {
1988                        /* Move all Received packets */
1989                        *pkts = dma->recv;
1990                        grspw_list_clr(&dma->recv);
1991                        if ( count )
1992                                *count = dma->recv_cnt;
1993                        dma->recv_cnt = 0;
1994                } else {
1995                        /* Move a number of RECV Packets */
1996                        pkts->head = pkt = lastpkt = dma->recv.head;
1997                        cnt = 0;
1998                        while (cnt < *count) {
1999                                lastpkt = pkt;
2000                                pkt = pkt->next;
2001                                cnt++;
2002                        }
2003                        if (cnt > 0) {
2004                                pkts->tail = lastpkt;
2005                                grspw_list_remove_head_list(&dma->recv, pkts);
2006                                dma->recv_cnt -= cnt;
2007                        } else {
2008                                grspw_list_clr(pkts);
2009                        }
2010                }
2011        } else if (count) {
2012                *count = 0;
2013        }
2014
2015        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2016        if (((opts & 2) == 0) && (started > 0))
2017                grspw_rx_schedule_ready(dma);
2018
2019        /* Unlock DMA channel */
2020        rtems_semaphore_release(dma->sem_dma);
2021
2022        return (~started) & 1;
2023}
2024
2025int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2026{
2027        struct grspw_dma_priv *dma = c;
2028        int ret;
2029
2030        /* Take DMA channel lock */
2031        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2032            != RTEMS_SUCCESSFUL)
2033                return -1;
2034
2035        if (dma->started == 0) {
2036                ret = 1;
2037                goto out;
2038        }
2039
2040        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2041        if ((opts & 1) == 0)
2042                grspw_rx_process_scheduled(dma);
2043
2044        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2045        if (pkts && (count > 0)) {
2046                grspw_list_append_list(&dma->ready, pkts);
2047                dma->ready_cnt += count;
2048                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2049                        dma->stats.ready_cnt_max = dma->ready_cnt;
2050        }
2051
2052        /* 3. Schedule as many packets as possible (READY->SCHED) */
2053        if ((opts & 2) == 0)
2054                grspw_rx_schedule_ready(dma);
2055
2056        ret = 0;
2057out:
2058        /* Unlock DMA channel */
2059        rtems_semaphore_release(dma->sem_dma);
2060
2061        return ret;
2062}
2063
2064void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv)
2065{
2066        struct grspw_dma_priv *dma = c;
2067
2068        if (ready)
2069                *ready = dma->ready_cnt;
2070        if (sched)
2071                *sched = dma->rx_sched_cnt;
2072        if (recv)
2073                *recv = dma->recv_cnt;
2074}
2075
2076static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2077{
2078        int ready_val, recv_val;
2079
2080        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2081                ready_val = 1;
2082        else
2083                ready_val = 0;
2084
2085        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2086                recv_val = 1;
2087        else
2088                recv_val = 0;
2089
2090        /* AND or OR ? */
2091        if (dma->rx_wait.op == 0)
2092                return ready_val & recv_val; /* AND */
2093        else
2094                return ready_val | recv_val; /* OR */
2095}
2096
2097/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2098 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2099 * condition is met.
2100 * If a link error occurs and the Stop on Link error is defined, this function
2101 * will also return to caller, however with an error.
2102 */
2103int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2104{
2105        struct grspw_dma_priv *dma = c;
2106        int ret, rc;
2107
2108        if (timeout == 0)
2109                timeout = RTEMS_NO_TIMEOUT;
2110
2111check_condition:
2112
2113        /* Take DMA channel lock */
2114        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2115            != RTEMS_SUCCESSFUL)
2116                return -1;
2117
2118        /* Check so that no other thread is waiting, this driver only supports
2119         * one waiter at a time.
2120         */
2121        if (dma->rx_wait.waiting) {
2122                ret = -1;
2123                goto out;
2124        }
2125
2126        /* Stop if link error or similar (MDA stopped) */
2127        if (dma->started == 0) {
2128                ret = 1;
2129                goto out;
2130        }
2131
2132        /* Set up Condition */
2133        dma->rx_wait.recv_cnt = recv_cnt;
2134        dma->rx_wait.op = op;
2135        dma->rx_wait.ready_cnt = ready_cnt;
2136
2137        if (grspw_rx_wait_eval(dma) == 0) {
2138                /* Prepare Wait */
2139                dma->rx_wait.waiting = 1;
2140
2141                /* Release channel lock */
2142                rtems_semaphore_release(dma->sem_dma);
2143
2144                /* Try to take Wait lock, if this fail link may have gone down
2145                 * or user stopped this DMA channel
2146                 */
2147                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2148                                           timeout);
2149                if (rc == RTEMS_TIMEOUT) {
2150                        dma->rx_wait.waiting = 0;
2151                        return 2;
2152                } else if (rc == RTEMS_UNSATISFIED ||
2153                           rc == RTEMS_OBJECT_WAS_DELETED) {
2154                        dma->rx_wait.waiting = 0;
2155                        return 1; /* sem was flushed/deleted, means DMA stop */
2156                } else if (rc != RTEMS_SUCCESSFUL)
2157                        return -1;
2158
2159                /* Check condition once more */
2160                goto check_condition;
2161        } else {
2162                /* No Wait needed */
2163                dma->rx_wait.waiting = 0;
2164        }
2165        ret = 0;
2166
2167out:
2168        /* Unlock DMA channel */
2169        rtems_semaphore_release(dma->sem_dma);
2170
2171        return ret;
2172}
2173
2174int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2175{
2176        struct grspw_dma_priv *dma = c;
2177
2178        if (dma->started || !cfg)
2179                return -1;
2180
2181        if (cfg->flags & ~DMAFLAG_MASK)
2182                return -1;
2183
2184        /* Update Configuration */
2185        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2186
2187        return 0;
2188}
2189
2190void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2191{
2192        struct grspw_dma_priv *dma = c;
2193
2194        /* Copy Current Configuration */
2195        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2196}
2197
2198void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2199{
2200        struct grspw_dma_priv *dma = c;
2201
2202        memcpy(sts, &dma->stats, sizeof(dma->stats));
2203}
2204
2205void grspw_dma_stats_clr(void *c)
2206{
2207        struct grspw_dma_priv *dma = c;
2208
2209        /* Clear most of the statistics */     
2210        memset(&dma->stats, 0, sizeof(dma->stats));
2211
2212        /* Init proper default values so that comparisons will work the
2213         * first time.
2214         */
2215        dma->stats.send_cnt_min = 0x3fffffff;
2216        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2217        dma->stats.ready_cnt_min = 0x3fffffff;
2218        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2219}
2220
2221int grspw_dma_start(void *c)
2222{
2223        struct grspw_dma_priv *dma = c;
2224        struct grspw_dma_regs *dregs = dma->regs;
2225        unsigned int ctrl;
2226        IRQFLAGS_TYPE irqflags;
2227
2228        if (dma->started)
2229                return 0;
2230
2231        /* Initialize Software Structures:
2232         *  - Clear all Queues
2233         *  - init BD ring
2234         *  - init IRQ counter
2235         *  - clear statistics counters
2236         *  - init wait structures and semaphores
2237         */
2238        grspw_dma_reset(dma);
2239
2240        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2241         * with SpaceWire Packet buffers. So we do not have to worry about
2242         * IRQs for this channel just yet. However other DMA channels
2243         * may be active.
2244         *
2245         * Some functionality that is not changed during started mode is set up
2246         * once and for all here:
2247         *
2248         *   - RX MAX Packet length
2249         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2250         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2251         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2252         *   - Strip PID
2253         *   - Strip Address
2254         *   - No Spill
2255         *   - Receiver Enable
2256         *   - disable on link error (LE)
2257         *
2258         * Note that the address register and the address enable bit in DMACTRL
2259         * register must be left untouched, they are configured on a GRSPW
2260         * core level.
2261         *
2262         * Note that the receiver is enabled here, but since descriptors are
2263         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2264         * descriptors are enabled or it may ignore RX packets (NS=0) until
2265         * descriptors are enabled (writing RD bit).
2266         */
2267        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2268        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2269
2270        /* MAX Packet length */
2271        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2272
2273        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2274                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2275                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2276        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
2277                ctrl |= GRSPW_DMACTRL_LE;
2278        if (dma->cfg.rx_irq_en_cnt != 0)
2279                ctrl |= GRSPW_DMACTRL_RI;
2280        if (dma->cfg.tx_irq_en_cnt != 0)
2281                ctrl |= GRSPW_DMACTRL_TI;
2282        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2283        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
2284        REG_WRITE(&dregs->ctrl, ctrl);
2285        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2286
2287        dma->started = 1; /* open up other DMA interfaces */
2288
2289        return 0;
2290}
2291
2292STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2293{
2294        IRQFLAGS_TYPE irqflags;
2295
2296        if (dma->started == 0)
2297                return;
2298        dma->started = 0;
2299
2300        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2301        grspw_hw_dma_stop(dma);
2302        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2303
2304        /* From here no more packets will be sent, however
2305         * there may still exist scheduled packets that has been
2306         * sent, and packets in the SEND Queue waiting for free
2307         * descriptors. All packets are moved to the SENT Queue
2308         * so that the user may get its buffers back, the user
2309         * must look at the TXPKT_FLAG_TX in order to determine
2310         * if the packet was sent or not.
2311         */
2312
2313        /* Retreive scheduled all sent packets */
2314        grspw_tx_process_scheduled(dma);
2315
2316        /* Move un-sent packets in SEND and SCHED queue to the
2317         * SENT Queue. (never marked sent)
2318         */
2319        if (!grspw_list_is_empty(&dma->tx_sched)) {
2320                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2321                grspw_list_clr(&dma->tx_sched);
2322                dma->sent_cnt += dma->tx_sched_cnt;
2323                dma->tx_sched_cnt = 0;
2324        }
2325        if (!grspw_list_is_empty(&dma->send)) {
2326                grspw_list_append_list(&dma->sent, &dma->send);
2327                grspw_list_clr(&dma->send);
2328                dma->sent_cnt += dma->send_cnt;
2329                dma->send_cnt = 0;
2330        }
2331
2332        /* Similar for RX */
2333        grspw_rx_process_scheduled(dma);
2334        if (!grspw_list_is_empty(&dma->rx_sched)) {
2335                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2336                grspw_list_clr(&dma->rx_sched);
2337                dma->recv_cnt += dma->rx_sched_cnt;
2338                dma->rx_sched_cnt = 0;
2339        }
2340        if (!grspw_list_is_empty(&dma->ready)) {
2341                grspw_list_append_list(&dma->recv, &dma->ready);
2342                grspw_list_clr(&dma->ready);
2343                dma->recv_cnt += dma->ready_cnt;
2344                dma->ready_cnt = 0;
2345        }
2346
2347        /* Throw out blocked threads */
2348        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2349        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2350}
2351
2352void grspw_dma_stop(void *c)
2353{
2354        struct grspw_dma_priv *dma = c;
2355
2356        /* Take DMA Channel lock */
2357        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2358            != RTEMS_SUCCESSFUL)
2359                return;
2360
2361        grspw_dma_stop_locked(dma);
2362
2363        rtems_semaphore_release(dma->sem_dma);
2364}
2365
2366/* Do general work, invoked indirectly from ISR */
2367static void grspw_work_shutdown_func(struct grspw_priv *priv)
2368{
2369        int i;
2370
2371        /* Link is down for some reason, and the user has configured
2372         * that we stop all DMA channels and throw out all blocked
2373         * threads.
2374         */
2375        for (i=0; i<priv->hwsup.ndma_chans; i++)
2376                grspw_dma_stop(&priv->dma[i]);
2377        grspw_hw_stop(priv);
2378}
2379
2380/* Do DMA work on one channel, invoked indirectly from ISR */
2381static void grspw_work_dma_func(struct grspw_dma_priv *dma)
2382{
2383        int tx_cond_true, rx_cond_true;
2384        unsigned int ctrl;
2385        IRQFLAGS_TYPE irqflags;
2386
2387        rx_cond_true = 0;
2388        tx_cond_true = 0;
2389        dma->stats.irq_cnt++;
2390
2391        /* Take DMA channel lock */
2392        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2393            != RTEMS_SUCCESSFUL)
2394                return;
2395
2396        /* Look at cause we were woken up and clear source */
2397        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2398        ctrl = REG_READ(&dma->regs->ctrl);
2399
2400        /* Read/Write DMA error ? */
2401        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
2402                /* DMA error -> Stop DMA channel (both RX and TX) */
2403                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2404                grspw_dma_stop_locked(dma);
2405        } else if (ctrl & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS)) {
2406                /* DMA has finished a TX/RX packet */
2407                ctrl &= ~GRSPW_DMACTRL_AT;
2408                if (dma->cfg.rx_irq_en_cnt != 0)
2409                        ctrl |= GRSPW_DMACTRL_RI;
2410                if (dma->cfg.tx_irq_en_cnt != 0)
2411                        ctrl |= GRSPW_DMACTRL_TI;
2412                REG_WRITE(&dma->regs->ctrl, ctrl);
2413                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2414                if (ctrl & GRSPW_DMACTRL_PR) {
2415                        /* Do RX Work */
2416                        dma->stats.rx_work_cnt++;
2417                        grspw_rx_process_scheduled(dma);
2418                        dma->stats.rx_work_enabled += grspw_rx_schedule_ready(dma);
2419                        /* Check to see if condition for waking blocked USER
2420                         * task is fullfilled.
2421                         */
2422                        if (dma->rx_wait.waiting) {
2423                                rx_cond_true = grspw_rx_wait_eval(dma);
2424                                if (rx_cond_true)
2425                                        dma->rx_wait.waiting = 0;
2426                        }
2427                }
2428                if (ctrl & GRSPW_DMACTRL_PS) {
2429                        /* Do TX Work */
2430                        dma->stats.tx_work_cnt++;
2431                        grspw_tx_process_scheduled(dma);
2432                        dma->stats.tx_work_enabled += grspw_tx_schedule_send(dma);
2433                        if (dma->tx_wait.waiting) {
2434                                tx_cond_true = grspw_tx_wait_eval(dma);
2435                                if (tx_cond_true)
2436                                        dma->tx_wait.waiting = 0;
2437                        }
2438                }
2439        } else
2440                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2441
2442        /* Release lock */
2443        rtems_semaphore_release(dma->sem_dma);
2444
2445        if (rx_cond_true)
2446                rtems_semaphore_release(dma->rx_wait.sem_wait);
2447
2448        if (tx_cond_true)
2449                rtems_semaphore_release(dma->tx_wait.sem_wait);
2450}
2451
2452/* Work task is receiving work for the work message queue posted from
2453 * the ISR.
2454 */
2455static void grspw_work_func(rtems_task_argument unused)
2456{
2457        rtems_status_code status;
2458        unsigned int message;
2459        size_t size;
2460        struct grspw_priv *priv;
2461        int i;
2462
2463        while (grspw_task_stop == 0) {
2464                /* Wait for ISR to schedule work */
2465                status = rtems_message_queue_receive(
2466                        grspw_work_queue, &message,
2467                        &size, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
2468                if (status != RTEMS_SUCCESSFUL)
2469                        break;
2470
2471                /* Handle work */
2472                priv = priv_tab[message >> WORK_CORE_BIT];
2473                if (message & WORK_SHUTDOWN)
2474                        grspw_work_shutdown_func(priv);
2475                else if (message & WORK_DMA_MASK) {
2476                        for (i = 0; i < 4; i++) {
2477                                if (message & WORK_DMA(i))
2478                                        grspw_work_dma_func(&priv->dma[i]);
2479                        }
2480                }
2481        }
2482        rtems_task_delete(RTEMS_SELF);
2483}
2484
2485STATIC void grspw_isr(void *data)
2486{
2487        struct grspw_priv *priv = data;
2488        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode;
2489        unsigned int rxirq, rxack, intto;
2490        int i, handled = 0, message = WORK_NONE, call_user_int_isr;
2491#ifdef RTEMS_HAS_SMP
2492        IRQFLAGS_TYPE irqflags;
2493#endif
2494
2495        /* Get Status from Hardware */
2496        stat = REG_READ(&priv->regs->status);
2497        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR);
2498
2499        /* Make sure to put the timecode handling first in order to get the
2500         * smallest possible interrupt latency
2501         */
2502        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2503                ctrl = REG_READ(&priv->regs->ctrl);
2504                if (ctrl & GRSPW_CTRL_TQ) {
2505                        /* Timecode received. Let custom function handle this */
2506                        timecode = REG_READ(&priv->regs->time) &
2507                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2508                        (priv->tcisr)(priv->tcisr_arg, timecode);
2509                }
2510        }
2511
2512        /* Get Interrupt status from hardware */
2513        icctrl = REG_READ(&priv->regs->icctrl);
2514        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2515                call_user_int_isr = 0;
2516                rxirq = rxack = intto = 0;
2517
2518                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2519                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2520                        call_user_int_isr = 1;
2521
2522                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2523                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2524                        call_user_int_isr = 1;
2525
2526                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2527                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2528                        call_user_int_isr = 1;                 
2529
2530                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2531                 * user function is called even if no such IRQ has happened!
2532                 * User must make sure to clear all interrupts that have been
2533                 * handled from the three registers by writing a one.
2534                 */
2535                if (call_user_int_isr)
2536                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2537        }
2538
2539        /* An Error occured? */
2540        if (stat & GRSPW_STAT_ERROR) {
2541                /* Wake Global WorkQ */
2542                handled = 1;
2543
2544                if (stat & GRSPW_STS_EE)
2545                        priv->stats.err_eeop++;
2546
2547                if (stat & GRSPW_STS_IA)
2548                        priv->stats.err_addr++;
2549
2550                if (stat & GRSPW_STS_PE)
2551                        priv->stats.err_parity++;
2552
2553                if (stat & GRSPW_STS_ER)
2554                        priv->stats.err_escape++;
2555
2556                if (stat & GRSPW_STS_CE)
2557                        priv->stats.err_credit++;
2558
2559                if (stat & GRSPW_STS_WE)
2560                        priv->stats.err_wsync++;
2561
2562                if ((priv->dis_link_on_err >> 16) & stat) {
2563                        /* Disable the link, no more transfers are expected
2564                         * on any DMA channel.
2565                         */
2566                        SPIN_LOCK(&priv->devlock, irqflags);
2567                        ctrl = REG_READ(&priv->regs->ctrl);
2568                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2569                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2570                        SPIN_UNLOCK(&priv->devlock, irqflags);
2571                        /* Signal to work-thread to stop DMA and clean up */
2572                        message = WORK_SHUTDOWN;
2573                }
2574        }
2575
2576        /* Clear Status Flags */
2577        if (stat_clrmsk) {
2578                handled = 1;
2579                REG_WRITE(&priv->regs->status, stat_clrmsk);
2580        }
2581
2582        /* A DMA transfer or Error occured? In that case disable more IRQs
2583         * from the DMA channel, then invoke the workQ.
2584         *
2585         * Also the GI interrupt flag may not be available for older
2586         * designs where (was added together with mutiple DMA channels).
2587         */
2588        SPIN_LOCK(&priv->devlock, irqflags);
2589        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2590                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2591                /* Check for Errors and if Packets been sent or received if
2592                 * respective IRQ are enabled
2593                 */
2594#ifdef HW_WITH_GI
2595                if ( dma_stat & (GRSPW_DMA_STATUS_ERROR | GRSPW_DMACTRL_GI) ) {
2596#else
2597                if ( (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2598                     | GRSPW_DMA_STATUS_ERROR) & dma_stat ) {
2599#endif
2600                        /* Disable Further IRQs (until enabled again)
2601                         * from this DMA channel. Let the status
2602                         * bit remain so that they can be handled by
2603                         * work function.
2604                         */
2605                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2606                                ~(GRSPW_DMACTRL_RI|GRSPW_DMACTRL_TI|
2607                                GRSPW_DMACTRL_PR|GRSPW_DMACTRL_PS|
2608                                GRSPW_DMACTRL_RA|GRSPW_DMACTRL_TA|
2609                                GRSPW_DMACTRL_AT));
2610                        message |= WORK_DMA(i);
2611                        handled = 1;
2612                }
2613        }
2614        SPIN_UNLOCK(&priv->devlock, irqflags);
2615
2616        if (handled != 0)
2617                priv->stats.irq_cnt++;
2618
2619        /* Schedule work by sending message to work thread */
2620        if ((message != WORK_NONE) && grspw_work_queue) {
2621                message |= WORK_CORE(priv->index);
2622                stat = rtems_message_queue_send(grspw_work_queue, &message, 4);
2623                if (stat != RTEMS_SUCCESSFUL)
2624                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
2625                                priv->index, stat, message);
2626        }
2627}
2628
2629STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2630{
2631        unsigned int ctrl;
2632        struct grspw_dma_regs *dregs = dma->regs;
2633
2634        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2635               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2636        ctrl |= GRSPW_DMACTRL_AT;
2637        REG_WRITE(&dregs->ctrl, ctrl);
2638}
2639
2640STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2641{
2642        unsigned int ctrl;
2643        struct grspw_dma_regs *dregs = dma->regs;
2644
2645        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2646        REG_WRITE(&dregs->ctrl, ctrl);
2647
2648        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2649        REG_WRITE(&dregs->txdesc, 0);
2650        REG_WRITE(&dregs->rxdesc, 0);
2651}
2652
2653/* Hardware Action:
2654 *  - stop DMA
2655 *  - do not bring down the link (RMAP may be active)
2656 *  - RMAP settings untouched (RMAP may be active)
2657 *  - port select untouched (RMAP may be active)
2658 *  - timecodes are disabled
2659 *  - IRQ generation disabled
2660 *  - status not cleared (let user analyze it if requested later on)
2661 *  - Node address / First DMA channels Node address
2662 *    is untouched (RMAP may be active)
2663 */
2664STATIC void grspw_hw_stop(struct grspw_priv *priv)
2665{
2666        int i;
2667        unsigned int ctrl;
2668        IRQFLAGS_TYPE irqflags;
2669
2670        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2671
2672        for (i=0; i<priv->hwsup.ndma_chans; i++)
2673                grspw_hw_dma_stop(&priv->dma[i]);
2674
2675        ctrl = REG_READ(&priv->regs->ctrl);
2676        REG_WRITE(&priv->regs->ctrl, ctrl & (
2677                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2678                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2679                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2680
2681        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2682}
2683
2684/* Soft reset of GRSPW core registers */
2685STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2686{
2687        int i;
2688        unsigned int tmp;
2689
2690        for (i=0; i<priv->hwsup.ndma_chans; i++)
2691                grspw_hw_dma_softreset(&priv->dma[i]);
2692
2693        REG_WRITE(&priv->regs->status, 0xffffffff);
2694        REG_WRITE(&priv->regs->time, 0);
2695        /* Clear all but valuable reset values of ICCTRL */
2696        tmp = REG_READ(&priv->regs->icctrl);
2697        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2698        tmp |= GRSPW_ICCTRL_ID;
2699        REG_WRITE(&priv->regs->icctrl, tmp);
2700        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2701        REG_WRITE(&priv->regs->icack, 0xffffffff);
2702        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
2703}
2704
2705int grspw_dev_count(void)
2706{
2707        return grspw_count;
2708}
2709
2710void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2711{
2712        int i;
2713        struct grspw_priv *priv;
2714
2715        /* Set new Device Found Handler */
2716        grspw_dev_add = devfound;
2717        grspw_dev_del = devremove;
2718
2719        if (grspw_initialized == 1 && grspw_dev_add) {
2720                /* Call callback for every previously found device */
2721                for (i=0; i<grspw_count; i++) {
2722                        priv = priv_tab[i];
2723                        if (priv)
2724                                priv->data = grspw_dev_add(i);
2725                }
2726        }
2727}
2728
2729/******************* Driver manager interface ***********************/
2730
2731/* Driver prototypes */
2732static int grspw_common_init(void);
2733static int grspw2_init3(struct drvmgr_dev *dev);
2734
2735static struct drvmgr_drv_ops grspw2_ops =
2736{
2737        .init = {NULL,  NULL, grspw2_init3, NULL},
2738        .remove = NULL,
2739        .info = NULL
2740};
2741
2742static struct amba_dev_id grspw2_ids[] =
2743{
2744        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
2745        {VENDOR_GAISLER, GAISLER_SPW2},
2746        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
2747        {0, 0}          /* Mark end of table */
2748};
2749
2750static struct amba_drv_info grspw2_drv_info =
2751{
2752        {
2753                DRVMGR_OBJ_DRV,                 /* Driver */
2754                NULL,                           /* Next driver */
2755                NULL,                           /* Device list */
2756                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
2757                "GRSPW_PKT_DRV",                /* Driver Name */
2758                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
2759                &grspw2_ops,
2760                NULL,                           /* Funcs */
2761                0,                              /* No devices yet */
2762                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
2763        },
2764        &grspw2_ids[0]
2765};
2766
2767void grspw2_register_drv (void)
2768{
2769        GRSPW_DBG("Registering GRSPW2 packet driver\n");
2770        drvmgr_drv_register(&grspw2_drv_info.general);
2771}
2772
2773static int grspw2_init3(struct drvmgr_dev *dev)
2774{
2775        struct grspw_priv *priv;
2776        struct amba_dev_info *ambadev;
2777        struct ambapp_core *pnpinfo;
2778        int i, size;
2779        unsigned int ctrl, icctrl, numi;
2780        union drvmgr_key_value *value;
2781
2782        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
2783                dev->parent->dev->name);
2784
2785        if (grspw_count > GRSPW_MAX)
2786                return DRVMGR_ENORES;
2787
2788        priv = dev->priv;
2789        if (priv == NULL)
2790                return DRVMGR_NOMEM;
2791        priv->dev = dev;
2792
2793        /* If first device init common part of driver */
2794        if (grspw_common_init())
2795                return DRVMGR_FAIL;
2796
2797        /*** Now we take care of device initialization ***/
2798
2799        /* Get device information from AMBA PnP information */
2800        ambadev = (struct amba_dev_info *)dev->businfo;
2801        if (ambadev == NULL)
2802                return -1;
2803        pnpinfo = &ambadev->info;
2804        priv->irq = pnpinfo->irq;
2805        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
2806
2807        /* Read Hardware Support from Control Register */
2808        ctrl = REG_READ(&priv->regs->ctrl);
2809        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
2810        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
2811        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
2812        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
2813        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
2814        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
2815        icctrl = REG_READ(&priv->regs->icctrl);
2816        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
2817        if (numi > 0)
2818                priv->hwsup.irq_num = 1 << (numi - 1);
2819        else
2820                priv->hwsup.irq_num = 0;
2821
2822        /* Construct hardware version identification */
2823        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
2824
2825        if ((pnpinfo->device == GAISLER_SPW2) ||
2826            (pnpinfo->device == GAISLER_SPW2_DMA)) {
2827                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
2828                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
2829        } else {
2830                /* Autodetect GRSPW1 features? */
2831                priv->hwsup.strip_adr = 0;
2832                priv->hwsup.strip_pid = 0;
2833        }
2834
2835        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
2836         * width... so only the first is probed, if no timer result will be
2837         * zero.
2838         */
2839        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
2840        ctrl = REG_READ(&priv->regs->icrlpresc);
2841        REG_WRITE(&priv->regs->icrlpresc, 0);
2842        priv->hwsup.itmr_width = 0;
2843        while (ctrl & 1) {
2844                priv->hwsup.itmr_width++;
2845                ctrl = ctrl >> 1;
2846        }
2847
2848        /* Let user limit the number of DMA channels on this core to save
2849         * space. Only the first nDMA channels will be available.
2850         */
2851        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
2852        if (value && (value->i < priv->hwsup.ndma_chans))
2853                priv->hwsup.ndma_chans = value->i;
2854
2855        /* Allocate and init Memory for all DMA channels */
2856        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
2857        priv->dma = (struct grspw_dma_priv *) malloc(size);
2858        if (priv->dma == NULL)
2859                return DRVMGR_NOMEM;
2860        memset(priv->dma, 0, size);
2861        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2862                priv->dma[i].core = priv;
2863                priv->dma[i].index = i;
2864                priv->dma[i].regs = &priv->regs->dma[i];
2865        }
2866
2867        /* Startup Action:
2868         *  - stop DMA
2869         *  - do not bring down the link (RMAP may be active)
2870         *  - RMAP settings untouched (RMAP may be active)
2871         *  - port select untouched (RMAP may be active)
2872         *  - timecodes are diabled
2873         *  - IRQ generation disabled
2874         *  - status cleared
2875         *  - Node address / First DMA channels Node address
2876         *    is untouched (RMAP may be active)
2877         */
2878        grspw_hw_stop(priv);
2879        grspw_hw_softreset(priv);
2880
2881        /* Register character device in registered region */
2882        priv->index = grspw_count;
2883        priv_tab[priv->index] = priv;
2884        grspw_count++;
2885
2886        /* Device name */
2887        sprintf(priv->devname, "grspw%d", priv->index);
2888
2889        /* Tell above layer about new device */
2890        if (grspw_dev_add)
2891                priv->data = grspw_dev_add(priv->index);
2892
2893        return DRVMGR_OK;
2894}
2895
2896/******************* Driver Implementation ***********************/
2897
2898static int grspw_common_init(void)
2899{
2900        if (grspw_initialized == 1)
2901                return 0;
2902        if (grspw_initialized == -1)
2903                return -1;
2904        grspw_initialized = -1;
2905
2906        /* Device Semaphore created with count = 1 */
2907        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
2908            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
2909            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
2910            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
2911                return -1;
2912
2913        /* Work queue, Work thread. Not created if user disables it.
2914         * user can disable it when interrupt is not used to save resources
2915         */
2916        if (grspw_work_task_priority != -1) {
2917                if (rtems_message_queue_create(
2918                    rtems_build_name('S', 'G', 'L', 'Q'), 32, 4, RTEMS_FIFO,
2919                    &grspw_work_queue) != RTEMS_SUCCESSFUL)
2920                        return -1;
2921
2922                if (rtems_task_create(rtems_build_name('S', 'G', 'L', 'T'),
2923                    grspw_work_task_priority, RTEMS_MINIMUM_STACK_SIZE,
2924                    RTEMS_PREEMPT | RTEMS_NO_ASR, RTEMS_NO_FLOATING_POINT,
2925                    &grspw_work_task) != RTEMS_SUCCESSFUL)
2926                        return -1;
2927
2928                if (rtems_task_start(grspw_work_task, grspw_work_func, 0) !=
2929                    RTEMS_SUCCESSFUL)
2930                        return -1;
2931        }
2932
2933        grspw_initialized = 1;
2934        return 0;
2935}
Note: See TracBrowser for help on using the repository browser.