source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ 5823bae8

4.115
Last change on this file since 5823bae8 was 5823bae8, checked in by Daniel Hellstrom <daniel@…>, on 02/27/15 at 13:03:15

LEON: move driver headers to bsp/ directory

  • Property mode set to 100644
File size: 79.4 KB
Line 
1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.com/license/LICENSE.
13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
29#include <bsp/grspw_pkt.h>
30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
117#define GRSPW_CTRL_TF_BIT       12
118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
269#define GRSPW_ICCTRL_INUM       (0x3f << GRSPW_ICCTRL_INUM_BIT)
270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
337#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
338#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
339
340/* Memory and HW Registers Access routines. All 32-bit access routines */
341#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
342/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
343#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
344#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
345#define REG_READ(addr) (*(volatile unsigned int *)(addr))
346
347struct grspw_ring {
348        struct grspw_ring *next;        /* Next Descriptor */
349        union {
350                struct grspw_txbd *tx;  /* Descriptor Address */
351                struct grspw_rxbd *rx;  /* Descriptor Address */
352        } bd;
353        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
354};
355
356/* An entry in the TX descriptor Ring */
357struct grspw_txring {
358        struct grspw_txring *next;      /* Next Descriptor */
359        struct grspw_txbd *bd;          /* Descriptor Address */
360        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
361};
362
363/* An entry in the RX descriptor Ring */
364struct grspw_rxring {
365        struct grspw_rxring *next;      /* Next Descriptor */
366        struct grspw_rxbd *bd;          /* Descriptor Address */
367        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
368};
369
370
371struct grspw_dma_priv {
372        struct grspw_priv *core;        /* GRSPW Core */
373        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
374        int index;                      /* DMA Channel Index @ GRSPW core */
375        int open;                       /* DMA Channel opened by user */
376        int started;                    /* DMA Channel activity (start|stop) */
377        rtems_id sem_dma;               /* DMA Channel Semaphore */
378        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
379        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
380
381        /*** RX ***/
382
383        /* RX Descriptor Ring */
384        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
385        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
386        struct grspw_rxring *rx_ring_base;
387        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
388        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
389        int rx_irq_en_cnt_curr;
390        struct {
391                int waiting;
392                int ready_cnt;
393                int op;
394                int recv_cnt;
395                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
396        } rx_wait;
397
398        /* Queue of Packets READY to be scheduled */
399        struct grspw_list ready;
400        int ready_cnt;
401
402        /* Scheduled RX Packets Queue */
403        struct grspw_list rx_sched;
404        int rx_sched_cnt;
405
406        /* Queue of Packets that has been RECIEVED */
407        struct grspw_list recv;
408        int recv_cnt;
409
410
411        /*** TX ***/
412
413        /* TX Descriptor Ring */
414        struct grspw_txbd *tx_bds;              /* Descriptor Address */
415        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
416        struct grspw_txring *tx_ring_base;
417        struct grspw_txring *tx_ring_head;
418        struct grspw_txring *tx_ring_tail;
419        int tx_irq_en_cnt_curr;
420        struct {
421                int waiting;
422                int send_cnt;
423                int op;
424                int sent_cnt;
425                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
426        } tx_wait;
427
428        /* Queue of Packets ready to be scheduled for transmission */
429        struct grspw_list send;
430        int send_cnt;
431
432        /* Scheduled TX Packets Queue */
433        struct grspw_list tx_sched;
434        int tx_sched_cnt;
435
436        /* Queue of Packets that has been SENT */
437        struct grspw_list sent;
438        int sent_cnt;
439};
440
441struct grspw_priv {
442        char devname[8];                /* Device name "grspw%d" */
443        struct drvmgr_dev *dev;         /* Device */
444        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
445        int irq;                        /* AMBA IRQ number of core */
446        int index;                      /* Index in order it was probed */
447        int core_index;                 /* Core Bus Index */
448        int open;                       /* If Device is alrady opened (=1) or not (=0) */
449        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
450
451        /* Features supported by Hardware */
452        struct grspw_hw_sup hwsup;
453
454        /* Pointer to an array of Maximally 4 DMA Channels */
455        struct grspw_dma_priv *dma;
456
457        /* Spin-lock ISR protection */
458        SPIN_DECLARE(devlock);
459
460        /* Descriptor Memory Area for TX & RX and all DMA channels */
461        unsigned int bd_mem;
462        unsigned int bd_mem_alloced;
463
464        /*** Time Code Handling ***/
465        void (*tcisr)(void *data, int timecode);
466        void *tcisr_arg;
467
468        /*** Interrupt-code Handling ***/
469        spwpkt_ic_isr_t icisr;
470        void *icisr_arg;
471
472        /* Disable Link on SpW Link error */
473        int dis_link_on_err;
474
475        /* "Core Global" Statistics gathered, not dependent on DMA channel */
476        struct grspw_core_stats stats;
477};
478
479int grspw_initialized = 0;
480int grspw_count = 0;
481struct workqueue_struct *grspw_workq = NULL;
482rtems_id grspw_sem;
483static struct grspw_priv *priv_tab[GRSPW_MAX];
484
485/* callback to upper layer when devices are discovered/removed */
486void *(*grspw_dev_add)(int) = NULL;
487void (*grspw_dev_del)(int,void*) = NULL;
488
489/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
490 * the work-task and work-queue to save space.
491 */
492int grspw_work_task_priority __attribute__((weak)) = 100;
493int grspw_task_stop = 0;
494rtems_id grspw_work_task;
495rtems_id grspw_work_queue = 0;
496#define WORK_NONE         0
497#define WORK_SHUTDOWN     0x100
498#define WORK_DMA(channel) (0x1 << (channel))
499#define WORK_DMA_MASK     0xf /* max 4 channels */
500#define WORK_CORE_BIT     16
501#define WORK_CORE_MASK    0xffff
502#define WORK_CORE(device) ((device) << WORK_CORE_BIT)
503
504STATIC void grspw_hw_stop(struct grspw_priv *priv);
505STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
506STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
507STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
508STATIC void grspw_isr(void *data);
509
510void *grspw_open(int dev_no)
511{
512        struct grspw_priv *priv;
513        unsigned int bdtabsize, hwa;
514        int i;
515        union drvmgr_key_value *value;
516
517        if (grspw_initialized != 1 || (dev_no >= grspw_count))
518                return NULL;
519
520        priv = priv_tab[dev_no];
521
522        /* Take GRSPW lock - Wait until we get semaphore */
523        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
524            != RTEMS_SUCCESSFUL)
525                return NULL;
526
527        if (priv->open) {
528                priv = NULL;
529                goto out;
530        }
531
532        /* Initialize Spin-lock for GRSPW Device. This is to protect
533         * CTRL and DMACTRL registers from ISR.
534         */
535        SPIN_INIT(&priv->devlock);
536
537        priv->tcisr = NULL;
538        priv->tcisr_arg = NULL;
539        priv->icisr = NULL;
540        priv->icisr_arg = NULL;
541
542        grspw_stats_clr(priv);
543
544        /* Allocate TX & RX Descriptor memory area for all DMA
545         * channels. Max-size descriptor area is allocated (or user assigned):
546         *  - 128 RX descriptors per DMA Channel
547         *  - 64 TX descriptors per DMA Channel
548         */
549        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
550        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", KEY_TYPE_INT);
551        if (value) {
552                priv->bd_mem = value->i;
553                priv->bd_mem_alloced = 0;
554                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
555                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
556                                  priv->index);
557                        priv = NULL;
558                        goto out;
559                }
560        } else {
561                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
562                if (priv->bd_mem_alloced == 0) {
563                        priv = NULL;
564                        goto out;
565                }
566                /* Align memory */
567                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
568                               ~(BDTAB_ALIGN-1);
569        }
570
571        /* Translate into DMA address that HW can use to access DMA
572         * descriptors
573         */
574        drvmgr_translate_check(
575                priv->dev,
576                CPUMEM_TO_DMA,
577                (void *)priv->bd_mem,
578                (void **)&hwa,
579                bdtabsize);
580
581        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
582                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
583        for (i=0; i<priv->hwsup.ndma_chans; i++) {
584                /* Do DMA Channel Init, other variables etc. are inited
585                 * when respective DMA channel is opened.
586                 *
587                 * index & core are initialized by probe function.
588                 */
589                priv->dma[i].open = 0;
590                priv->dma[i].rx_bds = (struct grspw_rxbd *)
591                        (priv->bd_mem + i*BDTAB_SIZE*2);
592                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
593                        (hwa + BDTAB_SIZE*(2*i));
594                priv->dma[i].tx_bds = (struct grspw_txbd *)
595                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
596                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
597                        (hwa + BDTAB_SIZE*(2*i+1));
598                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
599                        i,
600                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
601                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
602                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
603                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
604        }
605
606        /* Basic initialization of hardware, clear some registers but
607         * keep Link/RMAP/Node-Address registers intact.
608         */
609        grspw_hw_stop(priv);
610
611        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
612        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
613
614        /* Take the device */
615        priv->open = 1;
616out:
617        rtems_semaphore_release(grspw_sem);
618        return priv;
619}
620
621void grspw_close(void *d)
622{
623        struct grspw_priv *priv = d;
624        int i;
625
626        /* Take GRSPW lock - Wait until we get semaphore */
627        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
628            != RTEMS_SUCCESSFUL)
629                return;
630
631        /* Stop Hardware from doing DMA, put HW into "startup-state",
632         * Stop hardware from generating IRQ.
633         */
634        for (i=0; i<priv->hwsup.ndma_chans; i++)
635                grspw_dma_close(&priv->dma[i]);
636        grspw_hw_stop(priv);
637
638        /* Mark not open */
639        priv->open = 0;
640
641        rtems_semaphore_release(grspw_sem);
642
643        /* Check that all threads are out? */
644}
645
646void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
647{
648        struct grspw_priv *priv = d;
649
650        *hw = priv->hwsup;
651}
652
653void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
654{
655        struct grspw_priv *priv = d;
656        struct grspw_regs *regs = priv->regs;
657        unsigned int ctrl, nodeaddr;
658        IRQFLAGS_TYPE irqflags;
659        int i;
660
661        if (!priv || !cfg)
662                return;
663
664        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
665
666        if (cfg->promiscuous != -1) {
667                /* Set Configuration */
668                ctrl = REG_READ(&regs->ctrl);
669                if (cfg->promiscuous)
670                        ctrl |= GRSPW_CTRL_PM;
671                else
672                        ctrl &= ~GRSPW_CTRL_PM;
673                REG_WRITE(&regs->ctrl, ctrl);
674                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
675
676                for (i=0; i<priv->hwsup.ndma_chans; i++) {
677                        ctrl = REG_READ(&regs->dma[i].ctrl);
678                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
679                        if (cfg->dma_nacfg[i].node_en) {
680                                ctrl |= GRSPW_DMACTRL_EN;
681                                REG_WRITE(&regs->dma[i].addr,
682                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
683                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
684                        } else {
685                                ctrl &= ~GRSPW_DMACTRL_EN;
686                        }
687                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
688                }
689        }
690
691        /* Read Current Configuration */
692        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
693        nodeaddr = REG_READ(&regs->nodeaddr);
694        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
695        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
696        for (i=0; i<priv->hwsup.ndma_chans; i++) {
697                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
698                                                GRSPW_DMACTRL_EN;
699                ctrl = REG_READ(&regs->dma[i].addr);
700                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
701                                                GRSPW_DMAADR_ADDR_BIT;
702                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
703                                                GRSPW_DMAADR_MASK_BIT;
704        }
705        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
706        for (; i<4; i++) {
707                cfg->dma_nacfg[i].node_en = 0;
708                cfg->dma_nacfg[i].node_addr = 0;
709                cfg->dma_nacfg[i].node_mask = 0;
710        }
711}
712
713/* Return Current Status Register */
714unsigned int grspw_link_status(void *d)
715{
716        struct grspw_priv *priv = d;
717
718        return REG_READ(&priv->regs->status);
719}
720
721/* Return Current Link State */
722spw_link_state_t grspw_link_state(void *d)
723{
724        struct grspw_priv *priv = d;
725        unsigned int status = REG_READ(&priv->regs->status);
726
727        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
728}
729
730/* Enable Global IRQ only if some irq source is set */
731static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
732{
733        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
734                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
735}
736
737
738/* options and clkdiv [in/out]: set to -1 to only read current config */
739void grspw_link_ctrl(void *d, int *options, int *clkdiv)
740{
741        struct grspw_priv *priv = d;
742        struct grspw_regs *regs = priv->regs;
743        unsigned int ctrl;
744        IRQFLAGS_TYPE irqflags;
745
746        /* Write? */
747        if (clkdiv) {
748                if (*clkdiv != -1)
749                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
750                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
751        }
752        if (options) {
753                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
754                ctrl = REG_READ(&regs->ctrl);
755                if (*options != -1) {
756                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
757                                (*options & GRSPW_LINK_CFG);
758
759                        /* Enable Global IRQ only if some irq source is set */
760                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
761                                ctrl |= GRSPW_CTRL_IE;
762                        else
763                                ctrl &= ~GRSPW_CTRL_IE;
764
765                        REG_WRITE(&regs->ctrl, ctrl);
766                        priv->dis_link_on_err = (*options & LINKOPTS_DIS_ONERR) >> 3;
767                }
768                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
769                *options = (ctrl & GRSPW_LINK_CFG)|(priv->dis_link_on_err << 3);
770        }
771}
772
773/* Generate Tick-In (increment Time Counter, Send Time Code) */
774void grspw_tc_tx(void *d)
775{
776        struct grspw_priv *priv = d;
777        struct grspw_regs *regs = priv->regs;
778        IRQFLAGS_TYPE irqflags;
779
780        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
781        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
782        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
783}
784
785void grspw_tc_ctrl(void *d, int *options)
786{
787        struct grspw_priv *priv = d;
788        struct grspw_regs *regs = priv->regs;
789        unsigned int ctrl;
790        IRQFLAGS_TYPE irqflags;
791
792        if (options == NULL)
793                return;
794
795        /* Write? */
796        if (*options != -1) {
797                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
798                ctrl = REG_READ(&regs->ctrl);
799                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
800                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
801
802                /* Enable Global IRQ only if some irq source is set */
803                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
804                        ctrl |= GRSPW_CTRL_IE;
805                else
806                        ctrl &= ~GRSPW_CTRL_IE;
807
808                REG_WRITE(&regs->ctrl, ctrl);
809                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
810        } else
811                ctrl = REG_READ(&regs->ctrl);
812        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
813}
814
815/* Assign ISR Function to TimeCode RX IRQ */
816void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
817{
818        struct grspw_priv *priv = d;
819
820        priv->tcisr_arg = data;
821        priv->tcisr = tcisr;
822}
823
824/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
825 * TCTRL   = bits 7 and 6
826 * TIMECNT = bits 5 to 0
827 */
828void grspw_tc_time(void *d, int *time)
829{
830        struct grspw_priv *priv = d;
831        struct grspw_regs *regs = priv->regs;
832
833        if (time == NULL)
834                return;
835        if (*time != -1)
836                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
837        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
838}
839
840/* Generate Tick-In for the given Interrupt-code and check for generation
841 * error.
842 *
843 * Returns zero on success and non-zero on failure
844 */
845int grspw_ic_tickin(void *d, int ic)
846{
847        struct grspw_priv *priv = d;
848        struct grspw_regs *regs = priv->regs;
849        IRQFLAGS_TYPE irqflags;
850        unsigned int icctrl, mask;
851
852        /* Prepare before turning off IRQ */
853        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
854        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
855             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
856
857        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
858        icctrl = REG_READ(&regs->icctrl);
859        icctrl &= ~mask;
860        icctrl |= ic;
861        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
862        /* the ID bit is valid after two clocks, so we not to wait here */
863        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
864        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
865
866        return icctrl & GRSPW_ICCTRL_ID;
867}
868
869#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
870#define ICOPTS_ICCTRL_MASK                                              \
871        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
872         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
873         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
874         ICOPTS_BASEIRQ)
875
876/* Control Interrupt-code settings of core
877 * Write if not pointing to -1, always read current value
878 *
879 * TODO: A lot of code duplication with grspw_tc_ctrl
880 */
881void grspw_ic_ctrl(void *d, unsigned int *options)
882{
883        struct grspw_priv *priv = d;
884        struct grspw_regs *regs = priv->regs;
885        unsigned int ctrl;
886        unsigned int icctrl;
887        IRQFLAGS_TYPE irqflags;
888
889        if (options == NULL)
890                return;
891
892        if (*options != -1) {
893                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
894
895                ctrl = REG_READ(&regs->ctrl);
896                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
897                                         * irqopts bits and ctrl bits */
898                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
899                        (GRSPW_CTRL_TF_BIT - 0);
900
901                icctrl = REG_READ(&regs->icctrl);
902                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
903                                                * irqopts bits and icctrl bits */
904                icctrl |= *options & ICOPTS_ICCTRL_MASK;
905
906                /* Enable Global IRQ only if some irq source is set */
907                if (grspw_is_irqsource_set(ctrl, icctrl))
908                        ctrl |= GRSPW_CTRL_IE;
909                else
910                        ctrl &= ~GRSPW_CTRL_IE;
911
912                REG_WRITE(&regs->ctrl, ctrl);
913                REG_WRITE(&regs->icctrl, icctrl);
914                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
915        }
916        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
917                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
918}
919
920void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
921{
922        struct grspw_priv *priv = d;
923        struct grspw_regs *regs = priv->regs;
924
925        if (!cfg)
926                return;
927
928        if (rw & 1) {
929                REG_WRITE(&regs->ictickomask, cfg->tomask);
930                REG_WRITE(&regs->icaamask, cfg->aamask);
931                REG_WRITE(&regs->icrlpresc, cfg->scaler);
932                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
933                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
934        }
935        if (rw & 2) {
936                cfg->tomask = REG_READ(&regs->ictickomask);
937                cfg->aamask = REG_READ(&regs->icaamask);
938                cfg->scaler = REG_READ(&regs->icrlpresc);
939                cfg->isr_reload = REG_READ(&regs->icrlisr);
940                cfg->ack_reload = REG_READ(&regs->icrlintack);
941        }
942}
943
944/* Read or Write Interrupt-code status registers */
945void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
946{
947        struct grspw_priv *priv = d;
948        struct grspw_regs *regs = priv->regs;
949
950        /* No locking needed since the status bits are clear-on-write */
951
952        if (rxirq) {
953                if (*rxirq != 0)
954                        REG_WRITE(&regs->icrx, *rxirq);
955                else
956                        *rxirq = REG_READ(&regs->icrx);
957        }
958
959        if (rxack) {
960                if (*rxack != 0)
961                        REG_WRITE(&regs->icack, *rxack);
962                else
963                        *rxack = REG_READ(&regs->icack);
964        }
965
966        if (intto) {
967                if (*intto != 0)
968                        REG_WRITE(&regs->ictimeout, *intto);
969                else
970                        *intto = REG_READ(&regs->ictimeout);
971        }
972}
973
974/* Assign handler function to Interrupt-code tick out IRQ */
975void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
976{
977        struct grspw_priv *priv = d;
978
979        priv->icisr_arg = data;
980        priv->icisr = handler;
981}
982
983/* Set (not -1) and/or read RMAP options. */
984int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
985{
986        struct grspw_priv *priv = d;
987        struct grspw_regs *regs = priv->regs;
988        unsigned int ctrl;
989        IRQFLAGS_TYPE irqflags;
990
991        if (dstkey) {
992                if (*dstkey != -1)
993                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
994                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
995        }
996        if (options) {
997                if (*options != -1) {
998                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
999                                return -1;
1000
1001
1002                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1003                        ctrl = REG_READ(&regs->ctrl);
1004                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1005                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1006                        REG_WRITE(&regs->ctrl, ctrl);
1007                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1008                }
1009                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1010        }
1011
1012        return 0;
1013}
1014
1015void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1016{
1017        struct grspw_priv *priv = d;
1018
1019        if (rmap)
1020                *rmap = priv->hwsup.rmap;
1021        if (rmap_crc)
1022                *rmap_crc = priv->hwsup.rmap_crc;
1023}
1024
1025/* Select port, if
1026 * -1=The current selected port is returned
1027 * 0=Port 0
1028 * 1=Port 1
1029 * Others=Both Port0 and Port1
1030 */
1031int grspw_port_ctrl(void *d, int *port)
1032{
1033        struct grspw_priv *priv = d;
1034        struct grspw_regs *regs = priv->regs;
1035        unsigned int ctrl;
1036        IRQFLAGS_TYPE irqflags;
1037
1038        if (port == NULL)
1039                return -1;
1040
1041        if ((*port == 1) || (*port == 0)) {
1042                /* Select port user selected */
1043                if ((*port == 1) && (priv->hwsup.nports < 2))
1044                        return -1; /* Changing to Port 1, but only one port available */
1045                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1046                ctrl = REG_READ(&regs->ctrl);
1047                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1048                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1049                REG_WRITE(&regs->ctrl, ctrl);
1050                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1051        } else if (*port > 1) {
1052                /* Select both ports */
1053                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1054                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1055                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1056        }
1057
1058        /* Get current settings */
1059        ctrl = REG_READ(&regs->ctrl);
1060        if (ctrl & GRSPW_CTRL_NP) {
1061                /* Any port, selected by hardware */
1062                if (priv->hwsup.nports > 1)
1063                        *port = 3;
1064                else
1065                        *port = 0; /* Port0 the only port available */
1066        } else {
1067                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1068        }
1069
1070        return 0;
1071}
1072
1073/* Returns Number ports available in hardware */
1074int grspw_port_count(void *d)
1075{
1076        struct grspw_priv *priv = d;
1077
1078        return priv->hwsup.nports;
1079}
1080
1081/* Current active port: 0 or 1 */
1082int grspw_port_active(void *d)
1083{
1084        struct grspw_priv *priv = d;
1085        unsigned int status;
1086
1087        status = REG_READ(&priv->regs->status);
1088
1089        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1090}
1091
1092void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1093{
1094        struct grspw_priv *priv = d;
1095
1096        if (sts == NULL)
1097                return;
1098        memcpy(sts, &priv->stats, sizeof(priv->stats));
1099}
1100
1101void grspw_stats_clr(void *d)
1102{
1103        struct grspw_priv *priv = d;
1104
1105        /* Clear most of the statistics */     
1106        memset(&priv->stats, 0, sizeof(priv->stats));
1107}
1108
1109/*** DMA Interface ***/
1110
1111/* Initialize the RX and TX Descriptor Ring, empty of packets */
1112STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1113{
1114        struct grspw_ring *r;
1115        int i;
1116
1117        /* Empty BD rings */
1118        dma->rx_ring_head = dma->rx_ring_base;
1119        dma->rx_ring_tail = dma->rx_ring_base;
1120        dma->tx_ring_head = dma->tx_ring_base;
1121        dma->tx_ring_tail = dma->tx_ring_base;
1122
1123        /* Init RX Descriptors */
1124        r = (struct grspw_ring *)dma->rx_ring_base;
1125        for (i=0; i<GRSPW_RXBD_NR; i++) {
1126
1127                /* Init Ring Entry */
1128                r[i].next = &r[i+1];
1129                r[i].bd.rx = &dma->rx_bds[i];
1130                r[i].pkt = NULL;
1131
1132                /* Init HW Descriptor */
1133                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1134                BD_WRITE(&r[i].bd.rx->addr, 0);
1135        }
1136        r[GRSPW_RXBD_NR-1].next = &r[0];
1137
1138        /* Init TX Descriptors */
1139        r = (struct grspw_ring *)dma->tx_ring_base;
1140        for (i=0; i<GRSPW_TXBD_NR; i++) {
1141
1142                /* Init Ring Entry */
1143                r[i].next = &r[i+1];
1144                r[i].bd.tx = &dma->tx_bds[i];
1145                r[i].pkt = NULL;
1146
1147                /* Init HW Descriptor */
1148                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1149                BD_WRITE(&r[i].bd.tx->haddr, 0);
1150                BD_WRITE(&r[i].bd.tx->dlen, 0);
1151                BD_WRITE(&r[i].bd.tx->daddr, 0);
1152        }
1153        r[GRSPW_TXBD_NR-1].next = &r[0];
1154}
1155
1156/* Try to populate descriptor ring with as many as possible READY unused packet
1157 * buffers. The packets assigned with to a descriptor are put in the end of
1158 * the scheduled list.
1159 *
1160 * The number of Packets scheduled is returned.
1161 *
1162 *  - READY List -> RX-SCHED List
1163 *  - Descriptors are initialized and enabled for reception
1164 */
1165STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1166{
1167        int cnt;
1168        unsigned int ctrl, dmactrl;
1169        void *hwaddr;
1170        struct grspw_rxring *curr_bd;
1171        struct grspw_pkt *curr_pkt, *last_pkt;
1172        struct grspw_list lst;
1173        IRQFLAGS_TYPE irqflags;
1174
1175        /* Is Ready Q empty? */
1176        if (grspw_list_is_empty(&dma->ready))
1177                return 0;
1178
1179        cnt = 0;
1180        lst.head = curr_pkt = dma->ready.head;
1181        curr_bd = dma->rx_ring_head;
1182        while (!curr_bd->pkt) {
1183
1184                /* Assign Packet to descriptor */
1185                curr_bd->pkt = curr_pkt;
1186
1187                /* Prepare descriptor address. */
1188                hwaddr = curr_pkt->data;
1189                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1190                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1191                                         hwaddr, &hwaddr);
1192                        if (curr_pkt->data == hwaddr) /* translation needed? */
1193                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1194                }
1195                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1196
1197                ctrl = GRSPW_RXBD_EN;
1198                if (curr_bd->next == dma->rx_ring_base) {
1199                        /* Wrap around (only needed when smaller descriptor
1200                         * table)
1201                         */
1202                        ctrl |= GRSPW_RXBD_WR;
1203                }
1204
1205                /* Is this Packet going to be an interrupt Packet? */
1206                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1207                        if (dma->cfg.rx_irq_en_cnt == 0) {
1208                                /* IRQ is disabled. A big number to avoid
1209                                 * equal to zero too often
1210                                 */
1211                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1212                        } else {
1213                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1214                                ctrl |= GRSPW_RXBD_IE;
1215                        }
1216                }
1217
1218                if (curr_pkt->flags & RXPKT_FLAG_IE)
1219                        ctrl |= GRSPW_RXBD_IE;
1220
1221                /* Enable descriptor */
1222                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1223
1224                last_pkt = curr_pkt;
1225                curr_bd = curr_bd->next;
1226                cnt++;
1227
1228                /* Get Next Packet from Ready Queue */
1229                if (curr_pkt == dma->ready.tail) {
1230                        /* Handled all in ready queue. */
1231                        curr_pkt = NULL;
1232                        break;
1233                }
1234                curr_pkt = curr_pkt->next;
1235        }
1236
1237        /* Has Packets been scheduled? */
1238        if (cnt > 0) {
1239                /* Prepare list for insertion/deleation */
1240                lst.tail = last_pkt;
1241
1242                /* Remove scheduled packets from ready queue */
1243                grspw_list_remove_head_list(&dma->ready, &lst);
1244                dma->ready_cnt -= cnt;
1245                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1246                        dma->stats.ready_cnt_min = dma->ready_cnt;
1247
1248                /* Insert scheduled packets into scheduled queue */
1249                grspw_list_append_list(&dma->rx_sched, &lst);
1250                dma->rx_sched_cnt += cnt;
1251                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1252                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1253
1254                /* Update TX ring posistion */
1255                dma->rx_ring_head = curr_bd;
1256
1257                /* Make hardware aware of the newly enabled descriptors
1258                 * We must protect from ISR which writes RI|TI
1259                 */
1260                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1261                dmactrl = REG_READ(&dma->regs->ctrl);
1262                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1263                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1264                REG_WRITE(&dma->regs->ctrl, dmactrl);
1265                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1266        }
1267
1268        return cnt;
1269}
1270
1271/* Scans the RX desciptor table for scheduled Packet that has been received,
1272 * and moves these Packet from the head of the scheduled queue to the
1273 * tail of the recv queue.
1274 *
1275 * Also, for all packets the status is updated.
1276 *
1277 *  - SCHED List -> SENT List
1278 *
1279 * Return Value
1280 * Number of packets moved
1281 */
1282STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1283{
1284        struct grspw_rxring *curr;
1285        struct grspw_pkt *last_pkt;
1286        int recv_pkt_cnt = 0;
1287        unsigned int ctrl;
1288        struct grspw_list lst;
1289
1290        curr = dma->rx_ring_tail;
1291
1292        /* Step into RX ring to find if packets have been scheduled for
1293         * reception.
1294         */
1295        if (!curr->pkt)
1296                return 0; /* No scheduled packets, thus no received, abort */
1297
1298        /* There has been Packets scheduled ==> scheduled Packets may have been
1299         * received and needs to be collected into RECV List.
1300         *
1301         * A temporary list "lst" with all received packets is created.
1302         */
1303        lst.head = curr->pkt;
1304
1305        /* Loop until first enabled "unrecveived" SpW Packet is found.
1306         * An unused descriptor is indicated by an unassigned pkt field.
1307         */
1308        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1309                /* Handle one received Packet */
1310
1311                /* Remember last handled Packet so that insertion/removal from
1312                 * Packet lists go fast.
1313                 */
1314                last_pkt = curr->pkt;
1315
1316                /* Get Length of Packet in bytes, and reception options */
1317                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1318
1319                /* Set flags to indicate error(s) and CRC information,
1320                 * and Mark Received.
1321                 */
1322                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1323                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1324                                  RXPKT_FLAG_RX;
1325
1326                /* Packet was Truncated? */
1327                if (ctrl & GRSPW_RXBD_TR)
1328                        dma->stats.rx_err_trunk++;
1329
1330                /* Error End-Of-Packet? */
1331                if (ctrl & GRSPW_RXBD_EP)
1332                        dma->stats.rx_err_endpkt++;
1333                curr->pkt = NULL; /* Mark descriptor unused */
1334
1335                /* Increment */
1336                curr = curr->next;
1337                recv_pkt_cnt++;
1338        }
1339
1340        /* 1. Remove all handled packets from scheduled queue
1341         * 2. Put all handled packets into recv queue
1342         */
1343        if (recv_pkt_cnt > 0) {
1344
1345                /* Update Stats, Number of Received Packets */
1346                dma->stats.rx_pkts += recv_pkt_cnt;
1347
1348                /* Save RX ring posistion */
1349                dma->rx_ring_tail = curr;
1350
1351                /* Prepare list for insertion/deleation */
1352                lst.tail = last_pkt;
1353
1354                /* Remove received Packets from RX-SCHED queue */
1355                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1356                dma->rx_sched_cnt -= recv_pkt_cnt;
1357                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1358                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1359
1360                /* Insert received Packets into RECV queue */
1361                grspw_list_append_list(&dma->recv, &lst);
1362                dma->recv_cnt += recv_pkt_cnt;
1363                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1364                        dma->stats.recv_cnt_max = dma->recv_cnt;
1365        }
1366
1367        return recv_pkt_cnt;
1368}
1369
1370/* Try to populate descriptor ring with as many SEND packets as possible. The
1371 * packets assigned with to a descriptor are put in the end of
1372 * the scheduled list.
1373 *
1374 * The number of Packets scheduled is returned.
1375 *
1376 *  - SEND List -> TX-SCHED List
1377 *  - Descriptors are initialized and enabled for transmission
1378 */
1379STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1380{
1381        int cnt;
1382        unsigned int ctrl, dmactrl;
1383        void *hwaddr;
1384        struct grspw_txring *curr_bd;
1385        struct grspw_pkt *curr_pkt, *last_pkt;
1386        struct grspw_list lst;
1387        IRQFLAGS_TYPE irqflags;
1388
1389        /* Is Ready Q empty? */
1390        if (grspw_list_is_empty(&dma->send))
1391                return 0;
1392
1393        cnt = 0;
1394        lst.head = curr_pkt = dma->send.head;
1395        curr_bd = dma->tx_ring_head;
1396        while (!curr_bd->pkt) {
1397
1398                /* Assign Packet to descriptor */
1399                curr_bd->pkt = curr_pkt;
1400
1401                /* Set up header transmission */
1402                if (curr_pkt->hdr && curr_pkt->hlen) {
1403                        hwaddr = curr_pkt->hdr;
1404                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1405                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1406                                                 hwaddr, &hwaddr);
1407                                /* translation needed? */
1408                                if (curr_pkt->hdr == hwaddr)
1409                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1410                        }
1411                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1412                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1413                } else {
1414                        ctrl = GRSPW_TXBD_EN;
1415                }
1416                /* Enable IRQ generation and CRC options as specified
1417                 * by user.
1418                 */
1419                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1420
1421                if (curr_bd->next == dma->tx_ring_base) {
1422                        /* Wrap around (only needed when smaller descriptor table) */
1423                        ctrl |= GRSPW_TXBD_WR;
1424                }
1425
1426                /* Is this Packet going to be an interrupt Packet? */
1427                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1428                        if (dma->cfg.tx_irq_en_cnt == 0) {
1429                                /* IRQ is disabled.
1430                                 * A big number to avoid equal to zero too often
1431                                 */
1432                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1433                        } else {
1434                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1435                                ctrl |= GRSPW_TXBD_IE;
1436                        }
1437                }
1438
1439                /* Prepare descriptor address. Parts of CTRL is written to
1440                 * DLEN for debug-only (CTRL is cleared by HW).
1441                 */
1442                if (curr_pkt->data && curr_pkt->dlen) {
1443                        hwaddr = curr_pkt->data;
1444                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1445                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1446                                                 hwaddr, &hwaddr);
1447                                /* translation needed? */
1448                                if (curr_pkt->data == hwaddr)
1449                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1450                        }
1451                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1452                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1453                                                     ((ctrl & 0x3f000) << 12));
1454                } else {
1455                        BD_WRITE(&curr_bd->bd->daddr, 0);
1456                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1457                }
1458
1459                /* Enable descriptor */
1460                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1461
1462                last_pkt = curr_pkt;
1463                curr_bd = curr_bd->next;
1464                cnt++;
1465
1466                /* Get Next Packet from Ready Queue */
1467                if (curr_pkt == dma->send.tail) {
1468                        /* Handled all in ready queue. */
1469                        curr_pkt = NULL;
1470                        break;
1471                }
1472                curr_pkt = curr_pkt->next;
1473        }
1474
1475        /* Have Packets been scheduled? */
1476        if (cnt > 0) {
1477                /* Prepare list for insertion/deleation */
1478                lst.tail = last_pkt;
1479
1480                /* Remove scheduled packets from ready queue */
1481                grspw_list_remove_head_list(&dma->send, &lst);
1482                dma->send_cnt -= cnt;
1483                if (dma->stats.send_cnt_min > dma->send_cnt)
1484                        dma->stats.send_cnt_min = dma->send_cnt;
1485
1486                /* Insert scheduled packets into scheduled queue */
1487                grspw_list_append_list(&dma->tx_sched, &lst);
1488                dma->tx_sched_cnt += cnt;
1489                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1490                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1491
1492                /* Update TX ring posistion */
1493                dma->tx_ring_head = curr_bd;
1494
1495                /* Make hardware aware of the newly enabled descriptors */
1496                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1497                dmactrl = REG_READ(&dma->regs->ctrl);
1498                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1499                dmactrl |= GRSPW_DMACTRL_TE;
1500                REG_WRITE(&dma->regs->ctrl, dmactrl);
1501                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1502        }
1503        return cnt;
1504}
1505
1506/* Scans the TX desciptor table for transmitted packets, and moves these
1507 * packets from the head of the scheduled queue to the tail of the sent queue.
1508 *
1509 * Also, for all packets the status is updated.
1510 *
1511 *  - SCHED List -> SENT List
1512 *
1513 * Return Value
1514 * Number of packet moved
1515 */
1516STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1517{
1518        struct grspw_txring *curr;
1519        struct grspw_pkt *last_pkt;
1520        int sent_pkt_cnt = 0;
1521        unsigned int ctrl;
1522        struct grspw_list lst;
1523
1524        curr = dma->tx_ring_tail;
1525
1526        /* Step into TX ring to find if packets have been scheduled for
1527         * transmission.
1528         */
1529        if (!curr->pkt)
1530                return 0; /* No scheduled packets, thus no sent, abort */
1531
1532        /* There has been Packets scheduled ==> scheduled Packets may have been
1533         * transmitted and needs to be collected into SENT List.
1534         *
1535         * A temporary list "lst" with all sent packets is created.
1536         */
1537        lst.head = curr->pkt;
1538
1539        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1540         * An unused descriptor is indicated by an unassigned pkt field.
1541         */
1542        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1543                /* Handle one sent Packet */
1544
1545                /* Remember last handled Packet so that insertion/removal from
1546                 * packet lists go fast.
1547                 */
1548                last_pkt = curr->pkt;
1549
1550                /* Set flags to indicate error(s) and Mark Sent.
1551                 */
1552                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1553                                        (ctrl & TXPKT_FLAG_LINKERR) |
1554                                        TXPKT_FLAG_TX;
1555
1556                /* Sent packet experienced link error? */
1557                if (ctrl & GRSPW_TXBD_LE)
1558                        dma->stats.tx_err_link++;
1559
1560                curr->pkt = NULL; /* Mark descriptor unused */
1561
1562                /* Increment */
1563                curr = curr->next;
1564                sent_pkt_cnt++;
1565        }
1566
1567        /* 1. Remove all handled packets from TX-SCHED queue
1568         * 2. Put all handled packets into SENT queue
1569         */
1570        if (sent_pkt_cnt > 0) {
1571                /* Update Stats, Number of Transmitted Packets */
1572                dma->stats.tx_pkts += sent_pkt_cnt;
1573
1574                /* Save TX ring posistion */
1575                dma->tx_ring_tail = curr;
1576
1577                /* Prepare list for insertion/deleation */
1578                lst.tail = last_pkt;
1579
1580                /* Remove sent packets from TX-SCHED queue */
1581                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1582                dma->tx_sched_cnt -= sent_pkt_cnt;
1583                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1584                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1585
1586                /* Insert received packets into SENT queue */
1587                grspw_list_append_list(&dma->sent, &lst);
1588                dma->sent_cnt += sent_pkt_cnt;
1589                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1590                        dma->stats.sent_cnt_max = dma->sent_cnt;
1591        }
1592
1593        return sent_pkt_cnt;
1594}
1595
1596void *grspw_dma_open(void *d, int chan_no)
1597{
1598        struct grspw_priv *priv = d;
1599        struct grspw_dma_priv *dma;
1600        int size;
1601
1602        if ((chan_no < 0) && (priv->hwsup.ndma_chans <= chan_no))
1603                return NULL;
1604
1605        dma = &priv->dma[chan_no];
1606
1607        /* Take GRSPW lock */
1608        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1609            != RTEMS_SUCCESSFUL)
1610                return NULL;
1611
1612        if (dma->open) {
1613                dma = NULL;
1614                goto out;
1615        }
1616
1617        dma->started = 0;
1618
1619        /* Set Default Configuration:
1620         *
1621         *  - MAX RX Packet Length =
1622         *  - Disable IRQ generation
1623         *  -
1624         */
1625        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1626        dma->cfg.rx_irq_en_cnt = 0;
1627        dma->cfg.tx_irq_en_cnt = 0;
1628        dma->cfg.flags = DMAFLAG_NO_SPILL;
1629
1630        /* DMA Channel Semaphore created with count = 1 */
1631        if (rtems_semaphore_create(
1632            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no), 1,
1633            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1634            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1635            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_dma) != RTEMS_SUCCESSFUL) {
1636                dma = NULL;
1637                goto out;
1638        }
1639
1640        /* Allocate memory for the two descriptor rings */
1641        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1642        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1643        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1644        if (dma->rx_ring_base == NULL) {
1645                dma = NULL;
1646                goto out;
1647        }
1648
1649        /* Create DMA RX and TX Channel sempahore with count = 0 */
1650        if (rtems_semaphore_create(
1651            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1652            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1653            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1654            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1655                dma = NULL;
1656                goto out;
1657        }
1658        if (rtems_semaphore_create(
1659            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1660            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1661            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1662            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1663                dma = NULL;
1664                goto out;
1665        }
1666
1667        /* Reset software structures */
1668        grspw_dma_reset(dma);
1669
1670        /* Take the device */
1671        dma->open = 1;
1672out:
1673        /* Return GRSPW Lock */
1674        rtems_semaphore_release(grspw_sem);
1675
1676        return dma;
1677}
1678
1679/* Initialize Software Structures:
1680 *  - Clear all Queues
1681 *  - init BD ring
1682 *  - init IRQ counter
1683 *  - clear statistics counters
1684 *  - init wait structures and semaphores
1685 */
1686STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1687{
1688        /* Empty RX and TX queues */
1689        grspw_list_clr(&dma->ready);
1690        grspw_list_clr(&dma->rx_sched);
1691        grspw_list_clr(&dma->recv);
1692        grspw_list_clr(&dma->send);
1693        grspw_list_clr(&dma->tx_sched);
1694        grspw_list_clr(&dma->sent);
1695        dma->ready_cnt = 0;
1696        dma->rx_sched_cnt = 0;
1697        dma->recv_cnt = 0;
1698        dma->send_cnt = 0;
1699        dma->tx_sched_cnt = 0;
1700        dma->sent_cnt = 0;
1701
1702        dma->rx_irq_en_cnt_curr = 0;
1703        dma->tx_irq_en_cnt_curr = 0;
1704
1705        grspw_bdrings_init(dma);
1706
1707        dma->rx_wait.waiting = 0;
1708        dma->tx_wait.waiting = 0;
1709
1710        grspw_dma_stats_clr(dma);
1711}
1712
1713void grspw_dma_close(void *c)
1714{
1715        struct grspw_dma_priv *dma = c;
1716
1717        if (!dma->open)
1718                return;
1719
1720        /* Take device lock - Wait until we get semaphore */
1721        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1722            != RTEMS_SUCCESSFUL)
1723                return;
1724
1725        grspw_dma_stop_locked(dma);
1726
1727        /* Free resources */
1728        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1729        rtems_semaphore_delete(dma->tx_wait.sem_wait);
1730        rtems_semaphore_delete(dma->sem_dma); /* Release and delete lock */
1731
1732        /* Free memory */
1733        if (dma->rx_ring_base)
1734                free(dma->rx_ring_base);
1735        dma->rx_ring_base = NULL;
1736        dma->tx_ring_base = NULL;
1737
1738        dma->open = 0;
1739}
1740
1741/* Schedule List of packets for transmission at some point in
1742 * future.
1743 *
1744 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1745 * 2. Add the requested packets to the SEND List (USER->SEND)
1746 * 3. Schedule as many packets as possible (SEND->SCHED)
1747 */
1748int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1749{
1750        struct grspw_dma_priv *dma = c;
1751        int ret;
1752
1753        /* Take DMA channel lock */
1754        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1755            != RTEMS_SUCCESSFUL)
1756                return -1;
1757
1758        if (dma->started == 0) {
1759                ret = 1; /* signal DMA has been stopped */
1760                goto out;
1761        }
1762        ret = 0;
1763
1764        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1765        if ((opts & 1) == 0)
1766                grspw_tx_process_scheduled(dma);
1767
1768        /* 2. Add the requested packets to the SEND List (USER->SEND) */
1769        if (pkts) {
1770                grspw_list_append_list(&dma->send, pkts);
1771                dma->send_cnt += count;
1772                if (dma->stats.send_cnt_max < dma->send_cnt)
1773                        dma->stats.send_cnt_max = dma->send_cnt;
1774        }
1775
1776        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1777        if ((opts & 2) == 0)
1778                grspw_tx_schedule_send(dma);
1779
1780out:
1781        /* Unlock DMA channel */
1782        rtems_semaphore_release(dma->sem_dma);
1783
1784        return ret;
1785}
1786
1787int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1788{
1789        struct grspw_dma_priv *dma = c;
1790        struct grspw_pkt *pkt, *lastpkt;
1791        int cnt, started;
1792
1793        /* Take DMA channel lock */
1794        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1795            != RTEMS_SUCCESSFUL)
1796                return -1;
1797
1798        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1799        started = dma->started;
1800        if ((started > 0) && ((opts & 1) == 0))
1801                grspw_tx_process_scheduled(dma);
1802
1803        /* Move all/count SENT packet to the callers list (SENT->USER) */
1804        if (pkts) {
1805                if ((count == NULL) || (*count == -1) ||
1806                    (*count >= dma->sent_cnt)) {
1807                        /* Move all SENT Packets */
1808                        *pkts = dma->sent;
1809                        grspw_list_clr(&dma->sent);
1810                        if (count)
1811                                *count = dma->sent_cnt;
1812                        dma->sent_cnt = 0;
1813                } else {
1814                        /* Move a number of SENT Packets */
1815                        pkts->head = pkt = lastpkt = dma->sent.head;
1816                        cnt = 0;
1817                        while (cnt < *count) {
1818                                lastpkt = pkt;
1819                                pkt = pkt->next;
1820                                cnt++;
1821                        }
1822                        if (cnt > 0) {
1823                                pkts->tail = lastpkt;
1824                                grspw_list_remove_head_list(&dma->sent, pkts);
1825                                dma->sent_cnt -= cnt;
1826                        } else {
1827                                grspw_list_clr(pkts);
1828                        }
1829                }
1830        } else if (count) {
1831                *count = 0;
1832        }
1833
1834        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1835        if ((started > 0 ) && ((opts & 2) == 0))
1836                grspw_tx_schedule_send(dma);
1837
1838        /* Unlock DMA channel */
1839        rtems_semaphore_release(dma->sem_dma);
1840
1841        return (~started) & 1; /* signal DMA has been stopped */
1842}
1843
1844void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent)
1845{
1846        struct grspw_dma_priv *dma = c;
1847
1848        if (send)
1849                *send = dma->send_cnt;
1850        if (sched)
1851                *sched = dma->tx_sched_cnt;
1852        if (sent)
1853                *sent = dma->sent_cnt;
1854}
1855
1856static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
1857{
1858        int send_val, sent_val;
1859
1860        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
1861                send_val = 1;
1862        else
1863                send_val = 0;
1864
1865        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
1866                sent_val = 1;
1867        else
1868                sent_val = 0;
1869
1870        /* AND or OR ? */
1871        if (dma->tx_wait.op == 0)
1872                return send_val & sent_val; /* AND */
1873        else
1874                return send_val | sent_val; /* OR */
1875}
1876
1877/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
1878 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
1879 * is met.
1880 * If a link error occurs and the Stop on Link error is defined, this function
1881 * will also return to caller.
1882 */
1883int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
1884{
1885        struct grspw_dma_priv *dma = c;
1886        int ret, rc;
1887
1888        if (timeout == 0)
1889                timeout = RTEMS_NO_TIMEOUT;
1890
1891check_condition:
1892
1893        /* Take DMA channel lock */
1894        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1895            != RTEMS_SUCCESSFUL)
1896                return -1;
1897
1898        /* Check so that no other thread is waiting, this driver only supports
1899         * one waiter at a time.
1900         */
1901        if (dma->tx_wait.waiting) {
1902                ret = -1;
1903                goto out;
1904        }
1905
1906        /* Stop if link error or similar, abort */
1907        if (dma->started == 0) {
1908                ret = 1;
1909                goto out;
1910        }
1911
1912        /* Set up Condition */
1913        dma->tx_wait.send_cnt = send_cnt;
1914        dma->tx_wait.op = op;
1915        dma->tx_wait.sent_cnt = sent_cnt;
1916
1917        if (grspw_tx_wait_eval(dma) == 0) {
1918                /* Prepare Wait */
1919                dma->tx_wait.waiting = 1;
1920
1921                /* Release DMA channel lock */
1922                rtems_semaphore_release(dma->sem_dma);
1923
1924                /* Try to take Wait lock, if this fail link may have gone down
1925                 * or user stopped this DMA channel
1926                 */
1927                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
1928                                                timeout);
1929                if (rc == RTEMS_TIMEOUT) {
1930                        dma->tx_wait.waiting = 0;
1931                        return 2;
1932                } else if (rc == RTEMS_UNSATISFIED ||
1933                           rc == RTEMS_OBJECT_WAS_DELETED) {
1934                        dma->tx_wait.waiting = 0;
1935                        return 1; /* sem was flushed/deleted, means DMA stop */
1936                } else if (rc != RTEMS_SUCCESSFUL)
1937                        return -1;
1938
1939                /* Check condition once more */
1940                goto check_condition;
1941        } else {
1942                /* No Wait needed */
1943                dma->tx_wait.waiting = 0;
1944        }
1945
1946        ret = 0;
1947out:
1948        /* Unlock DMA channel */
1949        rtems_semaphore_release(dma->sem_dma);
1950
1951        return ret;
1952}
1953
1954int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
1955{
1956        struct grspw_dma_priv *dma = c;
1957        struct grspw_pkt *pkt, *lastpkt;
1958        int cnt, started;
1959
1960        /* Take DMA channel lock */
1961        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1962            != RTEMS_SUCCESSFUL)
1963                return -1;
1964
1965        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
1966        started = dma->started;
1967        if (((opts & 1) == 0) && (started > 0))
1968                grspw_rx_process_scheduled(dma);
1969
1970        /* Move all RECV packet to the callers list */
1971        if (pkts) {
1972                if ((count == NULL) || (*count == -1) ||
1973                    (*count >= dma->recv_cnt)) {
1974                        /* Move all Received packets */
1975                        *pkts = dma->recv;
1976                        grspw_list_clr(&dma->recv);
1977                        if ( count )
1978                                *count = dma->recv_cnt;
1979                        dma->recv_cnt = 0;
1980                } else {
1981                        /* Move a number of RECV Packets */
1982                        pkts->head = pkt = lastpkt = dma->recv.head;
1983                        cnt = 0;
1984                        while (cnt < *count) {
1985                                lastpkt = pkt;
1986                                pkt = pkt->next;
1987                                cnt++;
1988                        }
1989                        if (cnt > 0) {
1990                                pkts->tail = lastpkt;
1991                                grspw_list_remove_head_list(&dma->recv, pkts);
1992                                dma->recv_cnt -= cnt;
1993                        } else {
1994                                grspw_list_clr(pkts);
1995                        }
1996                }
1997        } else if (count) {
1998                *count = 0;
1999        }
2000
2001        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2002        if (((opts & 2) == 0) && (started > 0))
2003                grspw_rx_schedule_ready(dma);
2004
2005        /* Unlock DMA channel */
2006        rtems_semaphore_release(dma->sem_dma);
2007
2008        return (~started) & 1;
2009}
2010
2011int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2012{
2013        struct grspw_dma_priv *dma = c;
2014        int ret;
2015
2016        /* Take DMA channel lock */
2017        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2018            != RTEMS_SUCCESSFUL)
2019                return -1;
2020
2021        if (dma->started == 0) {
2022                ret = 1;
2023                goto out;
2024        }
2025
2026        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2027        if ((opts & 1) == 0)
2028                grspw_rx_process_scheduled(dma);
2029
2030        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2031        if (pkts && (count > 0)) {
2032                grspw_list_append_list(&dma->ready, pkts);
2033                dma->ready_cnt += count;
2034                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2035                        dma->stats.ready_cnt_max = dma->ready_cnt;
2036        }
2037
2038        /* 3. Schedule as many packets as possible (READY->SCHED) */
2039        if ((opts & 2) == 0)
2040                grspw_rx_schedule_ready(dma);
2041
2042        ret = 0;
2043out:
2044        /* Unlock DMA channel */
2045        rtems_semaphore_release(dma->sem_dma);
2046
2047        return ret;
2048}
2049
2050void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv)
2051{
2052        struct grspw_dma_priv *dma = c;
2053
2054        if (ready)
2055                *ready = dma->ready_cnt;
2056        if (sched)
2057                *sched = dma->rx_sched_cnt;
2058        if (recv)
2059                *recv = dma->recv_cnt;
2060}
2061
2062static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2063{
2064        int ready_val, recv_val;
2065
2066        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2067                ready_val = 1;
2068        else
2069                ready_val = 0;
2070
2071        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2072                recv_val = 1;
2073        else
2074                recv_val = 0;
2075
2076        /* AND or OR ? */
2077        if (dma->rx_wait.op == 0)
2078                return ready_val & recv_val; /* AND */
2079        else
2080                return ready_val | recv_val; /* OR */
2081}
2082
2083/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2084 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2085 * condition is met.
2086 * If a link error occurs and the Stop on Link error is defined, this function
2087 * will also return to caller, however with an error.
2088 */
2089int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2090{
2091        struct grspw_dma_priv *dma = c;
2092        int ret, rc;
2093
2094        if (timeout == 0)
2095                timeout = RTEMS_NO_TIMEOUT;
2096
2097check_condition:
2098
2099        /* Take DMA channel lock */
2100        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2101            != RTEMS_SUCCESSFUL)
2102                return -1;
2103
2104        /* Check so that no other thread is waiting, this driver only supports
2105         * one waiter at a time.
2106         */
2107        if (dma->rx_wait.waiting) {
2108                ret = -1;
2109                goto out;
2110        }
2111
2112        /* Stop if link error or similar (MDA stopped) */
2113        if (dma->started == 0) {
2114                ret = 1;
2115                goto out;
2116        }
2117
2118        /* Set up Condition */
2119        dma->rx_wait.recv_cnt = recv_cnt;
2120        dma->rx_wait.op = op;
2121        dma->rx_wait.ready_cnt = ready_cnt;
2122
2123        if (grspw_rx_wait_eval(dma) == 0) {
2124                /* Prepare Wait */
2125                dma->rx_wait.waiting = 1;
2126
2127                /* Release channel lock */
2128                rtems_semaphore_release(dma->sem_dma);
2129
2130                /* Try to take Wait lock, if this fail link may have gone down
2131                 * or user stopped this DMA channel
2132                 */
2133                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2134                                           timeout);
2135                if (rc == RTEMS_TIMEOUT) {
2136                        dma->rx_wait.waiting = 0;
2137                        return 2;
2138                } else if (rc == RTEMS_UNSATISFIED ||
2139                           rc == RTEMS_OBJECT_WAS_DELETED) {
2140                        dma->rx_wait.waiting = 0;
2141                        return 1; /* sem was flushed/deleted, means DMA stop */
2142                } else if (rc != RTEMS_SUCCESSFUL)
2143                        return -1;
2144
2145                /* Check condition once more */
2146                goto check_condition;
2147        } else {
2148                /* No Wait needed */
2149                dma->rx_wait.waiting = 0;
2150        }
2151        ret = 0;
2152
2153out:
2154        /* Unlock DMA channel */
2155        rtems_semaphore_release(dma->sem_dma);
2156
2157        return ret;
2158}
2159
2160int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2161{
2162        struct grspw_dma_priv *dma = c;
2163
2164        if (dma->started || !cfg)
2165                return -1;
2166
2167        if (cfg->flags & ~DMAFLAG_MASK)
2168                return -1;
2169
2170        /* Update Configuration */
2171        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2172
2173        return 0;
2174}
2175
2176void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2177{
2178        struct grspw_dma_priv *dma = c;
2179
2180        /* Copy Current Configuration */
2181        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2182}
2183
2184void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2185{
2186        struct grspw_dma_priv *dma = c;
2187
2188        memcpy(sts, &dma->stats, sizeof(dma->stats));
2189}
2190
2191void grspw_dma_stats_clr(void *c)
2192{
2193        struct grspw_dma_priv *dma = c;
2194
2195        /* Clear most of the statistics */     
2196        memset(&dma->stats, 0, sizeof(dma->stats));
2197
2198        /* Init proper default values so that comparisons will work the
2199         * first time.
2200         */
2201        dma->stats.send_cnt_min = 0x3fffffff;
2202        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2203        dma->stats.ready_cnt_min = 0x3fffffff;
2204        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2205}
2206
2207int grspw_dma_start(void *c)
2208{
2209        struct grspw_dma_priv *dma = c;
2210        struct grspw_dma_regs *dregs = dma->regs;
2211        unsigned int ctrl;
2212
2213        if (dma->started)
2214                return 0;
2215
2216        /* Initialize Software Structures:
2217         *  - Clear all Queues
2218         *  - init BD ring
2219         *  - init IRQ counter
2220         *  - clear statistics counters
2221         *  - init wait structures and semaphores
2222         */
2223        grspw_dma_reset(dma);
2224
2225        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2226         * with SpaceWire Packet buffers. So we do not have to worry about
2227         * IRQs for this channel just yet. However other DMA channels
2228         * may be active.
2229         *
2230         * Some functionality that is not changed during started mode is set up
2231         * once and for all here:
2232         *
2233         *   - RX MAX Packet length
2234         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2235         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2236         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2237         *   - Strip PID
2238         *   - Strip Address
2239         *   - No Spill
2240         *   - Receiver Enable
2241         *   - disable on link error (LE)
2242         *
2243         * Note that the address register and the address enable bit in DMACTRL
2244         * register must be left untouched, they are configured on a GRSPW
2245         * core level.
2246         *
2247         * Note that the receiver is enabled here, but since descriptors are
2248         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2249         * descriptors are enabled or it may ignore RX packets (NS=0) until
2250         * descriptors are enabled (writing RD bit).
2251         */
2252        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2253        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2254
2255        /* MAX Packet length */
2256        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2257
2258        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2259                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2260                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2261        if (dma->core->dis_link_on_err)
2262                ctrl |= GRSPW_DMACTRL_LE;
2263        if (dma->cfg.rx_irq_en_cnt != 0)
2264                ctrl |= GRSPW_DMACTRL_RI;
2265        if (dma->cfg.tx_irq_en_cnt != 0)
2266                ctrl |= GRSPW_DMACTRL_TI;
2267        REG_WRITE(&dregs->ctrl, ctrl);
2268
2269        dma->started = 1; /* open up other DMA interfaces */
2270
2271        return 0;
2272}
2273
2274STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2275{
2276        IRQFLAGS_TYPE irqflags;
2277
2278        if (dma->started == 0)
2279                return;
2280        dma->started = 0;
2281
2282        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2283        grspw_hw_dma_stop(dma);
2284        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2285
2286        /* From here no more packets will be sent, however
2287         * there may still exist scheduled packets that has been
2288         * sent, and packets in the SEND Queue waiting for free
2289         * descriptors. All packets are moved to the SENT Queue
2290         * so that the user may get its buffers back, the user
2291         * must look at the TXPKT_FLAG_TX in order to determine
2292         * if the packet was sent or not.
2293         */
2294
2295        /* Retreive scheduled all sent packets */
2296        grspw_tx_process_scheduled(dma);
2297
2298        /* Move un-sent packets in SEND and SCHED queue to the
2299         * SENT Queue. (never marked sent)
2300         */
2301        if (!grspw_list_is_empty(&dma->tx_sched)) {
2302                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2303                grspw_list_clr(&dma->tx_sched);
2304                dma->sent_cnt += dma->tx_sched_cnt;
2305                dma->tx_sched_cnt = 0;
2306        }
2307        if (!grspw_list_is_empty(&dma->send)) {
2308                grspw_list_append_list(&dma->sent, &dma->send);
2309                grspw_list_clr(&dma->send);
2310                dma->sent_cnt += dma->send_cnt;
2311                dma->send_cnt = 0;
2312        }
2313
2314        /* Similar for RX */
2315        grspw_rx_process_scheduled(dma);
2316        if (!grspw_list_is_empty(&dma->rx_sched)) {
2317                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2318                grspw_list_clr(&dma->rx_sched);
2319                dma->recv_cnt += dma->rx_sched_cnt;
2320                dma->rx_sched_cnt = 0;
2321        }
2322        if (!grspw_list_is_empty(&dma->ready)) {
2323                grspw_list_append_list(&dma->recv, &dma->ready);
2324                grspw_list_clr(&dma->ready);
2325                dma->recv_cnt += dma->ready_cnt;
2326                dma->ready_cnt = 0;
2327        }
2328
2329        /* Throw out blocked threads */
2330        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2331        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2332}
2333
2334void grspw_dma_stop(void *c)
2335{
2336        struct grspw_dma_priv *dma = c;
2337
2338        /* Take DMA Channel lock */
2339        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2340            != RTEMS_SUCCESSFUL)
2341                return;
2342
2343        grspw_dma_stop_locked(dma);
2344
2345        rtems_semaphore_release(dma->sem_dma);
2346}
2347
2348/* Do general work, invoked indirectly from ISR */
2349static void grspw_work_shutdown_func(struct grspw_priv *priv)
2350{
2351        int i;
2352
2353        /* Link is down for some reason, and the user has configured
2354         * that we stop all DMA channels and throw out all blocked
2355         * threads.
2356         */
2357        for (i=0; i<priv->hwsup.ndma_chans; i++)
2358                grspw_dma_stop(&priv->dma[i]);
2359        grspw_hw_stop(priv);
2360}
2361
2362/* Do DMA work on one channel, invoked indirectly from ISR */
2363static void grspw_work_dma_func(struct grspw_dma_priv *dma)
2364{
2365        int tx_cond_true, rx_cond_true;
2366        unsigned int ctrl;
2367        IRQFLAGS_TYPE irqflags;
2368
2369        rx_cond_true = 0;
2370        tx_cond_true = 0;
2371        dma->stats.irq_cnt++;
2372
2373        /* Take DMA channel lock */
2374        if (rtems_semaphore_obtain(dma->sem_dma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2375            != RTEMS_SUCCESSFUL)
2376                return;
2377
2378        /* Look at cause we were woken up and clear source */
2379        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2380        ctrl = REG_READ(&dma->regs->ctrl);
2381
2382        /* Read/Write DMA error ? */
2383        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
2384                /* DMA error -> Stop DMA channel (both RX and TX) */
2385                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2386                grspw_dma_stop_locked(dma);
2387        } else if (ctrl & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS)) {
2388                /* DMA has finished a TX/RX packet */
2389                ctrl &= ~GRSPW_DMACTRL_AT;
2390                if (dma->cfg.rx_irq_en_cnt != 0)
2391                        ctrl |= GRSPW_DMACTRL_RI;
2392                if (dma->cfg.tx_irq_en_cnt != 0)
2393                        ctrl |= GRSPW_DMACTRL_TI;
2394                REG_WRITE(&dma->regs->ctrl, ctrl);
2395                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2396                if (ctrl & GRSPW_DMACTRL_PR) {
2397                        /* Do RX Work */
2398                        dma->stats.rx_work_cnt++;
2399                        grspw_rx_process_scheduled(dma);
2400                        dma->stats.rx_work_enabled += grspw_rx_schedule_ready(dma);
2401                        /* Check to see if condition for waking blocked USER
2402                         * task is fullfilled.
2403                         */
2404                        if (dma->rx_wait.waiting) {
2405                                rx_cond_true = grspw_rx_wait_eval(dma);
2406                                if (rx_cond_true)
2407                                        dma->rx_wait.waiting = 0;
2408                        }
2409                }
2410                if (ctrl & GRSPW_DMACTRL_PS) {
2411                        /* Do TX Work */
2412                        dma->stats.tx_work_cnt++;
2413                        grspw_tx_process_scheduled(dma);
2414                        dma->stats.tx_work_enabled += grspw_tx_schedule_send(dma);
2415                        if (dma->tx_wait.waiting) {
2416                                tx_cond_true = grspw_tx_wait_eval(dma);
2417                                if (tx_cond_true)
2418                                        dma->tx_wait.waiting = 0;
2419                        }
2420                }
2421        } else
2422                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2423
2424        /* Release lock */
2425        rtems_semaphore_release(dma->sem_dma);
2426
2427        if (rx_cond_true)
2428                rtems_semaphore_release(dma->rx_wait.sem_wait);
2429
2430        if (tx_cond_true)
2431                rtems_semaphore_release(dma->tx_wait.sem_wait);
2432}
2433
2434/* Work task is receiving work for the work message queue posted from
2435 * the ISR.
2436 */
2437static void grspw_work_func(rtems_task_argument unused)
2438{
2439        rtems_status_code status;
2440        unsigned int message;
2441        size_t size;
2442        struct grspw_priv *priv;
2443        int i;
2444
2445        while (grspw_task_stop == 0) {
2446                /* Wait for ISR to schedule work */
2447                status = rtems_message_queue_receive(
2448                        grspw_work_queue, &message,
2449                        &size, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
2450                if (status != RTEMS_SUCCESSFUL)
2451                        break;
2452
2453                /* Handle work */
2454                priv = priv_tab[message >> WORK_CORE_BIT];
2455                if (message & WORK_SHUTDOWN)
2456                        grspw_work_shutdown_func(priv);
2457                else if (message & WORK_DMA_MASK) {
2458                        for (i = 0; i < 4; i++) {
2459                                if (message & WORK_DMA(i))
2460                                        grspw_work_dma_func(&priv->dma[i]);
2461                        }
2462                }
2463        }
2464        rtems_task_delete(RTEMS_SELF);
2465}
2466
2467STATIC void grspw_isr(void *data)
2468{
2469        struct grspw_priv *priv = data;
2470        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode;
2471        unsigned int rxirq, rxack, intto;
2472        int i, handled = 0, message = WORK_NONE, call_user_int_isr;
2473#ifdef RTEMS_HAS_SMP
2474        IRQFLAGS_TYPE irqflags;
2475#endif
2476
2477        /* Get Status from Hardware */
2478        stat = REG_READ(&priv->regs->status);
2479        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR);
2480
2481        /* Make sure to put the timecode handling first in order to get the
2482         * smallest possible interrupt latency
2483         */
2484        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2485                ctrl = REG_READ(&priv->regs->ctrl);
2486                if (ctrl & GRSPW_CTRL_TQ) {
2487                        /* Timecode received. Let custom function handle this */
2488                        timecode = REG_READ(&priv->regs->time) &
2489                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2490                        (priv->tcisr)(priv->tcisr_arg, timecode);
2491                }
2492        }
2493
2494        /* Get Interrupt status from hardware */
2495        icctrl = REG_READ(&priv->regs->icctrl);
2496        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2497                call_user_int_isr = 0;
2498                rxirq = rxack = intto = 0;
2499
2500                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2501                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2502                        call_user_int_isr = 1;
2503
2504                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2505                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2506                        call_user_int_isr = 1;
2507
2508                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2509                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2510                        call_user_int_isr = 1;                 
2511
2512                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2513                 * user function is called even if no such IRQ has happened!
2514                 * User must make sure to clear all interrupts that have been
2515                 * handled from the three registers by writing a one.
2516                 */
2517                if (call_user_int_isr)
2518                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2519        }
2520
2521        /* An Error occured? */
2522        if (stat & GRSPW_STAT_ERROR) {
2523                /* Wake Global WorkQ */
2524                handled = 1;
2525
2526                if (stat & GRSPW_STS_EE)
2527                        priv->stats.err_eeop++;
2528
2529                if (stat & GRSPW_STS_IA)
2530                        priv->stats.err_addr++;
2531
2532                if (stat & GRSPW_STS_PE)
2533                        priv->stats.err_parity++;
2534
2535                if (stat & GRSPW_STS_ER)
2536                        priv->stats.err_escape++;
2537
2538                if (stat & GRSPW_STS_CE)
2539                        priv->stats.err_credit++;
2540
2541                if (stat & GRSPW_STS_WE)
2542                        priv->stats.err_wsync++;
2543
2544                if (priv->dis_link_on_err) {
2545                        /* Disable the link, no more transfers are expected
2546                         * on any DMA channel.
2547                         */
2548                        SPIN_LOCK(&priv->devlock, irqflags);
2549                        ctrl = REG_READ(&priv->regs->ctrl);
2550                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2551                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2552                        SPIN_UNLOCK(&priv->devlock, irqflags);
2553                        /* Signal to work-thread to stop DMA and clean up */
2554                        message = WORK_SHUTDOWN;
2555                }
2556        }
2557
2558        /* Clear Status Flags */
2559        if (stat_clrmsk) {
2560                handled = 1;
2561                REG_WRITE(&priv->regs->status, stat_clrmsk);
2562        }
2563
2564        /* A DMA transfer or Error occured? In that case disable more IRQs
2565         * from the DMA channel, then invoke the workQ.
2566         *
2567         * Also the GI interrupt flag may not be available for older
2568         * designs where (was added together with mutiple DMA channels).
2569         */
2570        SPIN_LOCK(&priv->devlock, irqflags);
2571        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2572                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2573                /* Check for Errors and if Packets been sent or received if
2574                 * respective IRQ are enabled
2575                 */
2576#ifdef HW_WITH_GI
2577                if ( dma_stat & (GRSPW_DMA_STATUS_ERROR | GRSPW_DMACTRL_GI) ) {
2578#else
2579                if ( (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2580                     | GRSPW_DMA_STATUS_ERROR) & dma_stat ) {
2581#endif
2582                        /* Disable Further IRQs (until enabled again)
2583                         * from this DMA channel. Let the status
2584                         * bit remain so that they can be handled by
2585                         * work function.
2586                         */
2587                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2588                                ~(GRSPW_DMACTRL_RI|GRSPW_DMACTRL_TI|
2589                                GRSPW_DMACTRL_PR|GRSPW_DMACTRL_PS|
2590                                GRSPW_DMACTRL_RA|GRSPW_DMACTRL_TA|
2591                                GRSPW_DMACTRL_AT));
2592                        message |= WORK_DMA(i);
2593                        handled = 1;
2594                }
2595        }
2596        SPIN_UNLOCK(&priv->devlock, irqflags);
2597
2598        if (handled != 0)
2599                priv->stats.irq_cnt++;
2600
2601        /* Schedule work by sending message to work thread */
2602        if ((message != WORK_NONE) && grspw_work_queue) {
2603                message |= WORK_CORE(priv->index);
2604                stat = rtems_message_queue_send(grspw_work_queue, &message, 4);
2605                if (stat != RTEMS_SUCCESSFUL)
2606                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
2607                                priv->index, stat, message);
2608        }
2609}
2610
2611STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2612{
2613        unsigned int ctrl;
2614        struct grspw_dma_regs *dregs = dma->regs;
2615
2616        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2617               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2618        ctrl |= GRSPW_DMACTRL_AT;
2619        REG_WRITE(&dregs->ctrl, ctrl);
2620}
2621
2622STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2623{
2624        unsigned int ctrl;
2625        struct grspw_dma_regs *dregs = dma->regs;
2626
2627        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2628        REG_WRITE(&dregs->ctrl, ctrl);
2629
2630        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2631        REG_WRITE(&dregs->txdesc, 0);
2632        REG_WRITE(&dregs->rxdesc, 0);
2633}
2634
2635/* Hardware Action:
2636 *  - stop DMA
2637 *  - do not bring down the link (RMAP may be active)
2638 *  - RMAP settings untouched (RMAP may be active)
2639 *  - port select untouched (RMAP may be active)
2640 *  - timecodes are disabled
2641 *  - IRQ generation disabled
2642 *  - status not cleared (let user analyze it if requested later on)
2643 *  - Node address / First DMA channels Node address
2644 *    is untouched (RMAP may be active)
2645 */
2646STATIC void grspw_hw_stop(struct grspw_priv *priv)
2647{
2648        int i;
2649        unsigned int ctrl;
2650        IRQFLAGS_TYPE irqflags;
2651
2652        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2653
2654        for (i=0; i<priv->hwsup.ndma_chans; i++)
2655                grspw_hw_dma_stop(&priv->dma[i]);
2656
2657        ctrl = REG_READ(&priv->regs->ctrl);
2658        REG_WRITE(&priv->regs->ctrl, ctrl & (
2659                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2660                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2661                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2662
2663        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2664}
2665
2666/* Soft reset of GRSPW core registers */
2667STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2668{
2669        int i;
2670        unsigned int tmp;
2671
2672        for (i=0; i<priv->hwsup.ndma_chans; i++)
2673                grspw_hw_dma_softreset(&priv->dma[i]);
2674
2675        REG_WRITE(&priv->regs->status, 0xffffffff);
2676        REG_WRITE(&priv->regs->time, 0);
2677        /* Clear all but valuable reset values of ICCTRL */
2678        tmp = REG_READ(&priv->regs->icctrl);
2679        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2680        tmp |= GRSPW_ICCTRL_ID;
2681        REG_WRITE(&priv->regs->icctrl, tmp);
2682        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2683        REG_WRITE(&priv->regs->icack, 0xffffffff);
2684        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
2685}
2686
2687int grspw_dev_count(void)
2688{
2689        return grspw_count;
2690}
2691
2692void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2693{
2694        int i;
2695        struct grspw_priv *priv;
2696
2697        /* Set new Device Found Handler */
2698        grspw_dev_add = devfound;
2699        grspw_dev_del = devremove;
2700
2701        if (grspw_initialized == 1 && grspw_dev_add) {
2702                /* Call callback for every previously found device */
2703                for (i=0; i<grspw_count; i++) {
2704                        priv = priv_tab[i];
2705                        if (priv)
2706                                priv->data = grspw_dev_add(i);
2707                }
2708        }
2709}
2710
2711/******************* Driver manager interface ***********************/
2712
2713/* Driver prototypes */
2714static int grspw_common_init(void);
2715static int grspw2_init3(struct drvmgr_dev *dev);
2716
2717static struct drvmgr_drv_ops grspw2_ops =
2718{
2719        .init = {NULL,  NULL, grspw2_init3, NULL},
2720        .remove = NULL,
2721        .info = NULL
2722};
2723
2724static struct amba_dev_id grspw2_ids[] =
2725{
2726        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
2727        {VENDOR_GAISLER, GAISLER_SPW2},
2728        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
2729        {0, 0}          /* Mark end of table */
2730};
2731
2732static struct amba_drv_info grspw2_drv_info =
2733{
2734        {
2735                DRVMGR_OBJ_DRV,                 /* Driver */
2736                NULL,                           /* Next driver */
2737                NULL,                           /* Device list */
2738                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
2739                "GRSPW_PKT_DRV",                /* Driver Name */
2740                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
2741                &grspw2_ops,
2742                NULL,                           /* Funcs */
2743                0,                              /* No devices yet */
2744                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
2745        },
2746        &grspw2_ids[0]
2747};
2748
2749void grspw2_register_drv (void)
2750{
2751        GRSPW_DBG("Registering GRSPW2 packet driver\n");
2752        drvmgr_drv_register(&grspw2_drv_info.general);
2753}
2754
2755static int grspw2_init3(struct drvmgr_dev *dev)
2756{
2757        struct grspw_priv *priv;
2758        struct amba_dev_info *ambadev;
2759        struct ambapp_core *pnpinfo;
2760        int i, size;
2761        unsigned int ctrl, icctrl, numi;
2762        union drvmgr_key_value *value;
2763
2764        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
2765                dev->parent->dev->name);
2766
2767        if (grspw_count > GRSPW_MAX)
2768                return DRVMGR_ENORES;
2769
2770        priv = dev->priv;
2771        if (priv == NULL)
2772                return DRVMGR_NOMEM;
2773        priv->dev = dev;
2774
2775        /* If first device init common part of driver */
2776        if (grspw_common_init())
2777                return DRVMGR_FAIL;
2778
2779        /*** Now we take care of device initialization ***/
2780
2781        /* Get device information from AMBA PnP information */
2782        ambadev = (struct amba_dev_info *)dev->businfo;
2783        if (ambadev == NULL)
2784                return -1;
2785        pnpinfo = &ambadev->info;
2786        priv->irq = pnpinfo->irq;
2787        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
2788
2789        /* Read Hardware Support from Control Register */
2790        ctrl = REG_READ(&priv->regs->ctrl);
2791        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
2792        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
2793        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
2794        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
2795        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
2796        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
2797        icctrl = REG_READ(&priv->regs->icctrl);
2798        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
2799        if (numi > 0)
2800                priv->hwsup.irq_num = 1 << (numi - 1);
2801        else
2802                priv->hwsup.irq_num = 0;
2803
2804        /* Construct hardware version identification */
2805        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
2806
2807        if ((pnpinfo->device == GAISLER_SPW2) ||
2808            (pnpinfo->device == GAISLER_SPW2_DMA)) {
2809                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
2810                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
2811        } else {
2812                /* Autodetect GRSPW1 features? */
2813                priv->hwsup.strip_adr = 0;
2814                priv->hwsup.strip_pid = 0;
2815        }
2816
2817        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
2818         * width... so only the first is probed, if no timer result will be
2819         * zero.
2820         */
2821        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
2822        ctrl = REG_READ(&priv->regs->icrlpresc);
2823        REG_WRITE(&priv->regs->icrlpresc, 0);
2824        priv->hwsup.itmr_width = 0;
2825        while (ctrl & 1) {
2826                priv->hwsup.itmr_width++;
2827                ctrl = ctrl >> 1;
2828        }
2829
2830        /* Let user limit the number of DMA channels on this core to save
2831         * space. Only the first nDMA channels will be available.
2832         */
2833        value = drvmgr_dev_key_get(priv->dev, "nDMA", KEY_TYPE_INT);
2834        if (value && (value->i < priv->hwsup.ndma_chans))
2835                priv->hwsup.ndma_chans = value->i;
2836
2837        /* Allocate and init Memory for all DMA channels */
2838        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
2839        priv->dma = (struct grspw_dma_priv *) malloc(size);
2840        if (priv->dma == NULL)
2841                return DRVMGR_NOMEM;
2842        memset(priv->dma, 0, size);
2843        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2844                priv->dma[i].core = priv;
2845                priv->dma[i].index = i;
2846                priv->dma[i].regs = &priv->regs->dma[i];
2847        }
2848
2849        /* Startup Action:
2850         *  - stop DMA
2851         *  - do not bring down the link (RMAP may be active)
2852         *  - RMAP settings untouched (RMAP may be active)
2853         *  - port select untouched (RMAP may be active)
2854         *  - timecodes are diabled
2855         *  - IRQ generation disabled
2856         *  - status cleared
2857         *  - Node address / First DMA channels Node address
2858         *    is untouched (RMAP may be active)
2859         */
2860        grspw_hw_stop(priv);
2861        grspw_hw_softreset(priv);
2862
2863        /* Register character device in registered region */
2864        priv->index = grspw_count;
2865        priv_tab[priv->index] = priv;
2866        grspw_count++;
2867
2868        /* Device name */
2869        sprintf(priv->devname, "grspw%d", priv->index);
2870
2871        /* Tell above layer about new device */
2872        if (grspw_dev_add)
2873                priv->data = grspw_dev_add(priv->index);
2874
2875        return DRVMGR_OK;
2876}
2877
2878/******************* Driver Implementation ***********************/
2879
2880static int grspw_common_init(void)
2881{
2882        if (grspw_initialized == 1)
2883                return 0;
2884        if (grspw_initialized == -1)
2885                return -1;
2886        grspw_initialized = -1;
2887
2888        /* Device Semaphore created with count = 1 */
2889        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
2890            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
2891            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
2892            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
2893                return -1;
2894
2895        /* Work queue, Work thread. Not created if user disables it.
2896         * user can disable it when interrupt is not used to save resources
2897         */
2898        if (grspw_work_task_priority != -1) {
2899                if (rtems_message_queue_create(
2900                    rtems_build_name('S', 'G', 'L', 'Q'), 32, 4, RTEMS_FIFO,
2901                    &grspw_work_queue) != RTEMS_SUCCESSFUL)
2902                        return -1;
2903
2904                if (rtems_task_create(rtems_build_name('S', 'G', 'L', 'T'),
2905                    grspw_work_task_priority, RTEMS_MINIMUM_STACK_SIZE,
2906                    RTEMS_PREEMPT | RTEMS_NO_ASR, RTEMS_NO_FLOATING_POINT,
2907                    &grspw_work_task) != RTEMS_SUCCESSFUL)
2908                        return -1;
2909
2910                if (rtems_task_start(grspw_work_task, grspw_work_func, 0) !=
2911                    RTEMS_SUCCESSFUL)
2912                        return -1;
2913        }
2914
2915        grspw_initialized = 1;
2916        return 0;
2917}
Note: See TracBrowser for help on using the repository browser.