source: rtems/c/src/lib/libbsp/sparc/shared/spw/grspw_pkt.c @ f46f5f84

5
Last change on this file since f46f5f84 was ef94150f, checked in by Daniel Hellstrom <daniel@…>, on 04/12/16 at 11:53:42

leon, grspw_pkt: Added checks for special list cases

  • Fixed grspw_dma_tx_send() so that it does not fail when an empty user packet is provided.
  • Added empty checks on some of the list handling inline functions for GRSPW_PKT. Their use by the driver may be correct already, but the user might not have been aware of the assumptions that certain lists had to be non-empty.
  • Property mode set to 100644
File size: 85.5 KB
Line 
1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
5 * or used directly. NOTE SMP support has not been tested.
6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <malloc.h>
24#include <rtems/bspIo.h>
25
26#include <drvmgr/drvmgr.h>
27#include <ambapp.h>
28#include <drvmgr/ambapp_bus.h>
29#include <bsp/grspw_pkt.h>
30
31/* This driver has been prepared for SMP operation however never tested
32 * on a SMP system - use on your own risk.
33 */
34#ifdef RTEMS_HAS_SMP
35
36#include <rtems/score/smplock.h> /* spin-lock */
37
38/* SPIN_LOCK() and SPIN_UNLOCK() NOT_IMPLEMENTED_BY_RTEMS. Use _IRQ version
39 * to implement.
40 */
41#define SPIN_DECLARE(name) SMP_lock_spinlock_simple_Control name
42#define SPIN_INIT(lock) _SMP_lock_spinlock_simple_Initialize(lock)
43#define SPIN_LOCK(lock, level) SPIN_LOCK_IRQ(lock, level)
44#define SPIN_LOCK_IRQ(lock, level) (level) = _SMP_lock_spinlock_simple_Obtain(lock)
45#define SPIN_UNLOCK(lock, level) SPIN_UNLOCK_IRQ(lock, level)
46#define SPIN_UNLOCK_IRQ(lock, level) _SMP_lock_spinlock_simple_Release(lock, level)
47#define IRQFLAGS_TYPE ISR_Level
48
49#else
50
51#define SPIN_DECLARE(name)
52#define SPIN_INIT(lock)
53#define SPIN_LOCK(lock, level)
54#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_disable(level)
55#define SPIN_UNLOCK(lock, level)
56#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_enable(level)
57#define IRQFLAGS_TYPE rtems_interrupt_level
58
59#endif
60
61/*#define STATIC*/
62#define STATIC static
63
64/*#define GRSPW_DBG(args...) printk(args)*/
65#define GRSPW_DBG(args...)
66
67struct grspw_dma_regs {
68        volatile unsigned int ctrl;     /* DMA Channel Control */
69        volatile unsigned int rxmax;    /* RX Max Packet Length */
70        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
71        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
72        volatile unsigned int addr;     /* Address Register */
73        volatile unsigned int resv[3];
74};
75
76struct grspw_regs {
77        volatile unsigned int ctrl;
78        volatile unsigned int status;
79        volatile unsigned int nodeaddr;
80        volatile unsigned int clkdiv;
81        volatile unsigned int destkey;
82        volatile unsigned int time;
83        volatile unsigned int timer;    /* Used only in GRSPW1 */
84        volatile unsigned int resv1;
85
86        /* DMA Registers, ctrl.NCH determines number of ports,
87         * up to 4 channels are supported
88         */
89        struct grspw_dma_regs dma[4];
90
91        volatile unsigned int icctrl;
92        volatile unsigned int icrx;
93        volatile unsigned int icack;
94        volatile unsigned int ictimeout;
95        volatile unsigned int ictickomask;
96        volatile unsigned int icaamask;
97        volatile unsigned int icrlpresc;
98        volatile unsigned int icrlisr;
99        volatile unsigned int icrlintack;
100        volatile unsigned int resv2;
101        volatile unsigned int icisr;
102        volatile unsigned int resv3;
103};
104
105/* GRSPW - Control Register - 0x00 */
106#define GRSPW_CTRL_RA_BIT       31
107#define GRSPW_CTRL_RX_BIT       30
108#define GRSPW_CTRL_RC_BIT       29
109#define GRSPW_CTRL_NCH_BIT      27
110#define GRSPW_CTRL_PO_BIT       26
111#define GRSPW_CTRL_ID_BIT       24
112#define GRSPW_CTRL_LE_BIT       22
113#define GRSPW_CTRL_PS_BIT       21
114#define GRSPW_CTRL_NP_BIT       20
115#define GRSPW_CTRL_RD_BIT       17
116#define GRSPW_CTRL_RE_BIT       16
117#define GRSPW_CTRL_TF_BIT       12
118#define GRSPW_CTRL_TR_BIT       11
119#define GRSPW_CTRL_TT_BIT       10
120#define GRSPW_CTRL_LI_BIT       9
121#define GRSPW_CTRL_TQ_BIT       8
122#define GRSPW_CTRL_RS_BIT       6
123#define GRSPW_CTRL_PM_BIT       5
124#define GRSPW_CTRL_TI_BIT       4
125#define GRSPW_CTRL_IE_BIT       3
126#define GRSPW_CTRL_AS_BIT       2
127#define GRSPW_CTRL_LS_BIT       1
128#define GRSPW_CTRL_LD_BIT       0
129
130#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
131#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
132#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
133#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
134#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
135#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
136#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
137#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
138#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
139#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
140#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
141#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
142#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
143#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
144#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
145#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
146#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
147#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
148#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
149#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
150#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
151#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
152#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
153
154#define GRSPW_CTRL_IRQSRC_MASK \
155        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
156#define GRSPW_ICCTRL_IRQSRC_MASK \
157        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
158
159
160/* GRSPW - Status Register - 0x04 */
161#define GRSPW_STS_LS_BIT        21
162#define GRSPW_STS_AP_BIT        9
163#define GRSPW_STS_EE_BIT        8
164#define GRSPW_STS_IA_BIT        7
165#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
166#define GRSPW_STS_PE_BIT        4
167#define GRSPW_STS_DE_BIT        3
168#define GRSPW_STS_ER_BIT        2
169#define GRSPW_STS_CE_BIT        1
170#define GRSPW_STS_TO_BIT        0
171
172#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
173#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
174#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
175#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
176#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
177#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
178#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
179#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
180#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
181#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
182
183/* GRSPW - Default Address Register - 0x08 */
184#define GRSPW_DEF_ADDR_BIT      0
185#define GRSPW_DEF_MASK_BIT      8
186#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
187#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
188
189/* GRSPW - Clock Divisor Register - 0x0C */
190#define GRSPW_CLKDIV_START_BIT  8
191#define GRSPW_CLKDIV_RUN_BIT    0
192#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
193#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
194#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
195
196/* GRSPW - Destination key Register - 0x10 */
197#define GRSPW_DK_DESTKEY_BIT    0
198#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
199
200/* GRSPW - Time Register - 0x14 */
201#define GRSPW_TIME_CTRL_BIT     6
202#define GRSPW_TIME_CNT_BIT      0
203#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
204#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
205
206/* GRSPW - DMA Control Register - 0x20*N */
207#define GRSPW_DMACTRL_LE_BIT    16
208#define GRSPW_DMACTRL_SP_BIT    15
209#define GRSPW_DMACTRL_SA_BIT    14
210#define GRSPW_DMACTRL_EN_BIT    13
211#define GRSPW_DMACTRL_NS_BIT    12
212#define GRSPW_DMACTRL_RD_BIT    11
213#define GRSPW_DMACTRL_RX_BIT    10
214#define GRSPW_DMACTRL_AT_BIT    9
215#define GRSPW_DMACTRL_RA_BIT    8
216#define GRSPW_DMACTRL_TA_BIT    7
217#define GRSPW_DMACTRL_PR_BIT    6
218#define GRSPW_DMACTRL_PS_BIT    5
219#define GRSPW_DMACTRL_AI_BIT    4
220#define GRSPW_DMACTRL_RI_BIT    3
221#define GRSPW_DMACTRL_TI_BIT    2
222#define GRSPW_DMACTRL_RE_BIT    1
223#define GRSPW_DMACTRL_TE_BIT    0
224
225#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
226#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
227#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
228#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
229#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
230#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
231#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
232#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
233#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
234#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
235#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
236#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
237#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
238#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
239#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
240#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
241#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
242
243/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
244#define GRSPW_DMARXLEN_MAX_BIT  0
245#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
246
247/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
248#define GRSPW_DMAADR_ADDR_BIT   0
249#define GRSPW_DMAADR_MASK_BIT   8
250#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
251#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
252
253/* GRSPW - Interrupt code receive register - 0xa4 */
254#define GRSPW_ICCTRL_INUM_BIT   27
255#define GRSPW_ICCTRL_IA_BIT     24
256#define GRSPW_ICCTRL_LE_BIT     23
257#define GRSPW_ICCTRL_PR_BIT     22
258#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
259#define GRSPW_ICCTRL_TQ_BIT     20
260#define GRSPW_ICCTRL_AQ_BIT     19
261#define GRSPW_ICCTRL_IQ_BIT     18
262#define GRSPW_ICCTRL_IR_BIT     17
263#define GRSPW_ICCTRL_IT_BIT     16
264#define GRSPW_ICCTRL_NUMI_BIT   13
265#define GRSPW_ICCTRL_BIRQ_BIT   8
266#define GRSPW_ICCTRL_ID_BIT     7
267#define GRSPW_ICCTRL_II_BIT     6
268#define GRSPW_ICCTRL_TXIRQ_BIT  0
269#define GRSPW_ICCTRL_INUM       (0x3f << GRSPW_ICCTRL_INUM_BIT)
270#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
271#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
272#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
273#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
274#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
275#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
276#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
277#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
278#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
279#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
280#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
281#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
282#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
283#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
284
285/* RX Buffer Descriptor */
286struct grspw_rxbd {
287   volatile unsigned int ctrl;
288   volatile unsigned int addr;
289};
290
291/* TX Buffer Descriptor */
292struct grspw_txbd {
293   volatile unsigned int ctrl;
294   volatile unsigned int haddr;
295   volatile unsigned int dlen;
296   volatile unsigned int daddr;
297};
298
299/* GRSPW - DMA RXBD Ctrl */
300#define GRSPW_RXBD_LEN_BIT 0
301#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
302#define GRSPW_RXBD_EN   (1<<25)
303#define GRSPW_RXBD_WR   (1<<26)
304#define GRSPW_RXBD_IE   (1<<27)
305#define GRSPW_RXBD_EP   (1<<28)
306#define GRSPW_RXBD_HC   (1<<29)
307#define GRSPW_RXBD_DC   (1<<30)
308#define GRSPW_RXBD_TR   (1<<31)
309
310#define GRSPW_TXBD_HLEN (0xff<<0)
311#define GRSPW_TXBD_NCL  (0xf<<8)
312#define GRSPW_TXBD_EN   (1<<12)
313#define GRSPW_TXBD_WR   (1<<13)
314#define GRSPW_TXBD_IE   (1<<14)
315#define GRSPW_TXBD_LE   (1<<15)
316#define GRSPW_TXBD_HC   (1<<16)
317#define GRSPW_TXBD_DC   (1<<17)
318
319#define GRSPW_DMAADR_MASK_BIT   8
320#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
321#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
322
323
324/* GRSPW Error Condition */
325#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
326#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
327/* GRSPW Link configuration options */
328#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
329#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
330
331/* Software Defaults */
332#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
333
334/* GRSPW Constants */
335#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
336#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
337#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
338#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
339#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
340#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
341
342/* Memory and HW Registers Access routines. All 32-bit access routines */
343#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
344/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
345#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
346#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
347#define REG_READ(addr) (*(volatile unsigned int *)(addr))
348
349struct grspw_ring {
350        struct grspw_ring *next;        /* Next Descriptor */
351        union {
352                struct grspw_txbd *tx;  /* Descriptor Address */
353                struct grspw_rxbd *rx;  /* Descriptor Address */
354        } bd;
355        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
356};
357
358/* An entry in the TX descriptor Ring */
359struct grspw_txring {
360        struct grspw_txring *next;      /* Next Descriptor */
361        struct grspw_txbd *bd;          /* Descriptor Address */
362        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
363};
364
365/* An entry in the RX descriptor Ring */
366struct grspw_rxring {
367        struct grspw_rxring *next;      /* Next Descriptor */
368        struct grspw_rxbd *bd;          /* Descriptor Address */
369        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
370};
371
372
373struct grspw_dma_priv {
374        struct grspw_priv *core;        /* GRSPW Core */
375        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
376        int index;                      /* DMA Channel Index @ GRSPW core */
377        int open;                       /* DMA Channel opened by user */
378        int started;                    /* DMA Channel activity (start|stop) */
379        rtems_id sem_rxdma;             /* DMA Channel RX Semaphore */
380        rtems_id sem_txdma;             /* DMA Channel TX Semaphore */
381        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
382        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
383
384        /*** RX ***/
385
386        /* RX Descriptor Ring */
387        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
388        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
389        struct grspw_rxring *rx_ring_base;
390        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
391        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
392        int rx_irq_en_cnt_curr;
393        struct {
394                int waiting;
395                int ready_cnt;
396                int op;
397                int recv_cnt;
398                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
399        } rx_wait;
400
401        /* Queue of Packets READY to be scheduled */
402        struct grspw_list ready;
403        int ready_cnt;
404
405        /* Scheduled RX Packets Queue */
406        struct grspw_list rx_sched;
407        int rx_sched_cnt;
408
409        /* Queue of Packets that has been RECIEVED */
410        struct grspw_list recv;
411        int recv_cnt;
412
413
414        /*** TX ***/
415
416        /* TX Descriptor Ring */
417        struct grspw_txbd *tx_bds;              /* Descriptor Address */
418        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
419        struct grspw_txring *tx_ring_base;
420        struct grspw_txring *tx_ring_head;
421        struct grspw_txring *tx_ring_tail;
422        int tx_irq_en_cnt_curr;
423        struct {
424                int waiting;
425                int send_cnt;
426                int op;
427                int sent_cnt;
428                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
429        } tx_wait;
430
431        /* Queue of Packets ready to be scheduled for transmission */
432        struct grspw_list send;
433        int send_cnt;
434
435        /* Scheduled TX Packets Queue */
436        struct grspw_list tx_sched;
437        int tx_sched_cnt;
438
439        /* Queue of Packets that has been SENT */
440        struct grspw_list sent;
441        int sent_cnt;
442};
443
444struct grspw_priv {
445        char devname[8];                /* Device name "grspw%d" */
446        struct drvmgr_dev *dev;         /* Device */
447        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
448        int irq;                        /* AMBA IRQ number of core */
449        int index;                      /* Index in order it was probed */
450        int core_index;                 /* Core Bus Index */
451        int open;                       /* If Device is alrady opened (=1) or not (=0) */
452        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
453
454        /* Features supported by Hardware */
455        struct grspw_hw_sup hwsup;
456
457        /* Pointer to an array of Maximally 4 DMA Channels */
458        struct grspw_dma_priv *dma;
459
460        /* Spin-lock ISR protection */
461        SPIN_DECLARE(devlock);
462
463        /* Descriptor Memory Area for TX & RX and all DMA channels */
464        unsigned int bd_mem;
465        unsigned int bd_mem_alloced;
466
467        /*** Time Code Handling ***/
468        void (*tcisr)(void *data, int timecode);
469        void *tcisr_arg;
470
471        /*** Interrupt-code Handling ***/
472        spwpkt_ic_isr_t icisr;
473        void *icisr_arg;
474
475        /* Bit mask representing events which shall cause link disable. */
476        unsigned int dis_link_on_err;
477
478        /* Bit mask for link status bits to clear by ISR */
479        unsigned int stscfg;
480
481        /* "Core Global" Statistics gathered, not dependent on DMA channel */
482        struct grspw_core_stats stats;
483};
484
485int grspw_initialized = 0;
486int grspw_count = 0;
487struct workqueue_struct *grspw_workq = NULL;
488rtems_id grspw_sem;
489static struct grspw_priv *priv_tab[GRSPW_MAX];
490
491/* callback to upper layer when devices are discovered/removed */
492void *(*grspw_dev_add)(int) = NULL;
493void (*grspw_dev_del)(int,void*) = NULL;
494
495/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
496 * the work-task and work-queue to save space.
497 */
498int grspw_work_task_priority __attribute__((weak)) = 100;
499int grspw_task_stop = 0;
500rtems_id grspw_work_task;
501rtems_id grspw_work_queue = 0;
502#define WORK_NONE         0
503#define WORK_SHUTDOWN     0x100
504#define WORK_DMA(channel) (0x1 << (channel))
505#define WORK_DMA_MASK     0xf /* max 4 channels */
506#define WORK_CORE_BIT     16
507#define WORK_CORE_MASK    0xffff
508#define WORK_CORE(device) ((device) << WORK_CORE_BIT)
509
510STATIC void grspw_hw_stop(struct grspw_priv *priv);
511STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
512STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
513STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
514STATIC void grspw_isr(void *data);
515
516void *grspw_open(int dev_no)
517{
518        struct grspw_priv *priv;
519        unsigned int bdtabsize, hwa;
520        int i;
521        union drvmgr_key_value *value;
522
523        if (grspw_initialized != 1 || (dev_no >= grspw_count))
524                return NULL;
525
526        priv = priv_tab[dev_no];
527
528        /* Take GRSPW lock - Wait until we get semaphore */
529        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
530            != RTEMS_SUCCESSFUL)
531                return NULL;
532
533        if (priv->open) {
534                priv = NULL;
535                goto out;
536        }
537
538        /* Initialize Spin-lock for GRSPW Device. This is to protect
539         * CTRL and DMACTRL registers from ISR.
540         */
541        SPIN_INIT(&priv->devlock);
542
543        priv->tcisr = NULL;
544        priv->tcisr_arg = NULL;
545        priv->icisr = NULL;
546        priv->icisr_arg = NULL;
547        priv->stscfg = LINKSTS_MASK;
548
549        grspw_stats_clr(priv);
550
551        /* Allocate TX & RX Descriptor memory area for all DMA
552         * channels. Max-size descriptor area is allocated (or user assigned):
553         *  - 128 RX descriptors per DMA Channel
554         *  - 64 TX descriptors per DMA Channel
555         * Specified address must be in CPU RAM.
556         */
557        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
558        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
559        if (value) {
560                priv->bd_mem = value->i;
561                priv->bd_mem_alloced = 0;
562                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
563                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
564                                  priv->index);
565                        priv = NULL;
566                        goto out;
567                }
568        } else {
569                priv->bd_mem_alloced = (unsigned int)malloc(bdtabsize + BDTAB_ALIGN - 1);
570                if (priv->bd_mem_alloced == 0) {
571                        priv = NULL;
572                        goto out;
573                }
574                /* Align memory */
575                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
576                               ~(BDTAB_ALIGN-1);
577        }
578
579        /* Translate into DMA address that HW can use to access DMA
580         * descriptors
581         */
582        drvmgr_translate_check(
583                priv->dev,
584                CPUMEM_TO_DMA,
585                (void *)priv->bd_mem,
586                (void **)&hwa,
587                bdtabsize);
588
589        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
590                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
591        for (i=0; i<priv->hwsup.ndma_chans; i++) {
592                /* Do DMA Channel Init, other variables etc. are inited
593                 * when respective DMA channel is opened.
594                 *
595                 * index & core are initialized by probe function.
596                 */
597                priv->dma[i].open = 0;
598                priv->dma[i].rx_bds = (struct grspw_rxbd *)
599                        (priv->bd_mem + i*BDTAB_SIZE*2);
600                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
601                        (hwa + BDTAB_SIZE*(2*i));
602                priv->dma[i].tx_bds = (struct grspw_txbd *)
603                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
604                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
605                        (hwa + BDTAB_SIZE*(2*i+1));
606                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
607                        i,
608                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
609                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
610                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
611                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
612        }
613
614        /* Basic initialization of hardware, clear some registers but
615         * keep Link/RMAP/Node-Address registers intact.
616         */
617        grspw_hw_stop(priv);
618
619        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
620        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
621
622        /* Take the device */
623        priv->open = 1;
624out:
625        rtems_semaphore_release(grspw_sem);
626        return priv;
627}
628
629int grspw_close(void *d)
630{
631        struct grspw_priv *priv = d;
632        int i;
633
634        /* Take GRSPW lock - Wait until we get semaphore */
635        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
636            != RTEMS_SUCCESSFUL)
637                return -1;
638
639        /* Check that user has stopped and closed all DMA channels
640         * appropriately. At this point the Hardware shall not be doing DMA
641         * or generating Interrupts. We want HW in a "startup-state".
642         */
643        for (i=0; i<priv->hwsup.ndma_chans; i++) {
644                if (priv->dma[i].open) {
645                        rtems_semaphore_release(grspw_sem);
646                        return 1;
647                }
648        }
649        grspw_hw_stop(priv);
650
651        /* Mark not open */
652        priv->open = 0;
653        rtems_semaphore_release(grspw_sem);
654        return 0;
655}
656
657void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
658{
659        struct grspw_priv *priv = d;
660
661        *hw = priv->hwsup;
662}
663
664void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
665{
666        struct grspw_priv *priv = d;
667        struct grspw_regs *regs = priv->regs;
668        unsigned int ctrl, nodeaddr;
669        IRQFLAGS_TYPE irqflags;
670        int i;
671
672        if (!priv || !cfg)
673                return;
674
675        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
676
677        if (cfg->promiscuous != -1) {
678                /* Set Configuration */
679                ctrl = REG_READ(&regs->ctrl);
680                if (cfg->promiscuous)
681                        ctrl |= GRSPW_CTRL_PM;
682                else
683                        ctrl &= ~GRSPW_CTRL_PM;
684                REG_WRITE(&regs->ctrl, ctrl);
685                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
686
687                for (i=0; i<priv->hwsup.ndma_chans; i++) {
688                        ctrl = REG_READ(&regs->dma[i].ctrl);
689                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
690                        if (cfg->dma_nacfg[i].node_en) {
691                                ctrl |= GRSPW_DMACTRL_EN;
692                                REG_WRITE(&regs->dma[i].addr,
693                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
694                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
695                        } else {
696                                ctrl &= ~GRSPW_DMACTRL_EN;
697                        }
698                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
699                }
700        }
701
702        /* Read Current Configuration */
703        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
704        nodeaddr = REG_READ(&regs->nodeaddr);
705        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
706        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
707        for (i=0; i<priv->hwsup.ndma_chans; i++) {
708                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
709                                                GRSPW_DMACTRL_EN;
710                ctrl = REG_READ(&regs->dma[i].addr);
711                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
712                                                GRSPW_DMAADR_ADDR_BIT;
713                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
714                                                GRSPW_DMAADR_MASK_BIT;
715        }
716        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
717        for (; i<4; i++) {
718                cfg->dma_nacfg[i].node_en = 0;
719                cfg->dma_nacfg[i].node_addr = 0;
720                cfg->dma_nacfg[i].node_mask = 0;
721        }
722}
723
724/* Return Current Status Register */
725unsigned int grspw_link_status(void *d)
726{
727        struct grspw_priv *priv = d;
728
729        return REG_READ(&priv->regs->status);
730}
731
732/* Clear Status Register bits */
733void grspw_link_status_clr(void *d, unsigned int mask)
734{
735        struct grspw_priv *priv = d;
736
737        REG_WRITE(&priv->regs->status, mask);
738}
739
740/* Return Current Link State */
741spw_link_state_t grspw_link_state(void *d)
742{
743        struct grspw_priv *priv = d;
744        unsigned int status = REG_READ(&priv->regs->status);
745
746        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
747}
748
749/* Enable Global IRQ only if some irq source is set */
750static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
751{
752        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
753                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
754}
755
756
757/* options and clkdiv [in/out]: set to -1 to only read current config */
758void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
759{
760        struct grspw_priv *priv = d;
761        struct grspw_regs *regs = priv->regs;
762        unsigned int ctrl;
763        IRQFLAGS_TYPE irqflags;
764
765        /* Write? */
766        if (clkdiv) {
767                if (*clkdiv != -1)
768                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
769                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
770        }
771        if (options) {
772                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
773                ctrl = REG_READ(&regs->ctrl);
774                if (*options != -1) {
775                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
776                                (*options & GRSPW_LINK_CFG);
777
778                        /* Enable Global IRQ only if some irq source is set */
779                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
780                                ctrl |= GRSPW_CTRL_IE;
781                        else
782                                ctrl &= ~GRSPW_CTRL_IE;
783
784                        REG_WRITE(&regs->ctrl, ctrl);
785                        /* Store the link disable events for use in
786                        ISR. The LINKOPTS_DIS_ON_* options are actually the
787                        corresponding bits in the status register, shifted
788                        by 16. */
789                        priv->dis_link_on_err = *options &
790                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
791                }
792                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
793                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
794        }
795        if (stscfg) {
796                if (*stscfg != -1) {
797                        priv->stscfg = *stscfg & LINKSTS_MASK;
798                }
799                *stscfg = priv->stscfg;
800        }
801}
802
803/* Generate Tick-In (increment Time Counter, Send Time Code) */
804void grspw_tc_tx(void *d)
805{
806        struct grspw_priv *priv = d;
807        struct grspw_regs *regs = priv->regs;
808        IRQFLAGS_TYPE irqflags;
809
810        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
811        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
812        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
813}
814
815void grspw_tc_ctrl(void *d, int *options)
816{
817        struct grspw_priv *priv = d;
818        struct grspw_regs *regs = priv->regs;
819        unsigned int ctrl;
820        IRQFLAGS_TYPE irqflags;
821
822        if (options == NULL)
823                return;
824
825        /* Write? */
826        if (*options != -1) {
827                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
828                ctrl = REG_READ(&regs->ctrl);
829                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
830                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
831
832                /* Enable Global IRQ only if some irq source is set */
833                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
834                        ctrl |= GRSPW_CTRL_IE;
835                else
836                        ctrl &= ~GRSPW_CTRL_IE;
837
838                REG_WRITE(&regs->ctrl, ctrl);
839                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
840        } else
841                ctrl = REG_READ(&regs->ctrl);
842        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
843}
844
845/* Assign ISR Function to TimeCode RX IRQ */
846void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
847{
848        struct grspw_priv *priv = d;
849
850        priv->tcisr_arg = data;
851        priv->tcisr = tcisr;
852}
853
854/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
855 * TCTRL   = bits 7 and 6
856 * TIMECNT = bits 5 to 0
857 */
858void grspw_tc_time(void *d, int *time)
859{
860        struct grspw_priv *priv = d;
861        struct grspw_regs *regs = priv->regs;
862
863        if (time == NULL)
864                return;
865        if (*time != -1)
866                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
867        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
868}
869
870/* Generate Tick-In for the given Interrupt-code and check for generation
871 * error.
872 *
873 * Returns zero on success and non-zero on failure
874 */
875int grspw_ic_tickin(void *d, int ic)
876{
877        struct grspw_priv *priv = d;
878        struct grspw_regs *regs = priv->regs;
879        IRQFLAGS_TYPE irqflags;
880        unsigned int icctrl, mask;
881
882        /* Prepare before turning off IRQ */
883        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
884        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
885             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
886
887        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
888        icctrl = REG_READ(&regs->icctrl);
889        icctrl &= ~mask;
890        icctrl |= ic;
891        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
892        /* the ID bit is valid after two clocks, so we not to wait here */
893        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
894        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
895
896        return icctrl & GRSPW_ICCTRL_ID;
897}
898
899#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
900#define ICOPTS_ICCTRL_MASK                                              \
901        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
902         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
903         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
904         ICOPTS_BASEIRQ)
905
906/* Control Interrupt-code settings of core
907 * Write if not pointing to -1, always read current value
908 *
909 * TODO: A lot of code duplication with grspw_tc_ctrl
910 */
911void grspw_ic_ctrl(void *d, unsigned int *options)
912{
913        struct grspw_priv *priv = d;
914        struct grspw_regs *regs = priv->regs;
915        unsigned int ctrl;
916        unsigned int icctrl;
917        IRQFLAGS_TYPE irqflags;
918
919        if (options == NULL)
920                return;
921
922        if (*options != -1) {
923                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
924
925                ctrl = REG_READ(&regs->ctrl);
926                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
927                                         * irqopts bits and ctrl bits */
928                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
929                        (GRSPW_CTRL_TF_BIT - 0);
930
931                icctrl = REG_READ(&regs->icctrl);
932                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
933                                                * irqopts bits and icctrl bits */
934                icctrl |= *options & ICOPTS_ICCTRL_MASK;
935
936                /* Enable Global IRQ only if some irq source is set */
937                if (grspw_is_irqsource_set(ctrl, icctrl))
938                        ctrl |= GRSPW_CTRL_IE;
939                else
940                        ctrl &= ~GRSPW_CTRL_IE;
941
942                REG_WRITE(&regs->ctrl, ctrl);
943                REG_WRITE(&regs->icctrl, icctrl);
944                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
945        }
946        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
947                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
948}
949
950void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
951{
952        struct grspw_priv *priv = d;
953        struct grspw_regs *regs = priv->regs;
954
955        if (!cfg)
956                return;
957
958        if (rw & 1) {
959                REG_WRITE(&regs->ictickomask, cfg->tomask);
960                REG_WRITE(&regs->icaamask, cfg->aamask);
961                REG_WRITE(&regs->icrlpresc, cfg->scaler);
962                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
963                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
964        }
965        if (rw & 2) {
966                cfg->tomask = REG_READ(&regs->ictickomask);
967                cfg->aamask = REG_READ(&regs->icaamask);
968                cfg->scaler = REG_READ(&regs->icrlpresc);
969                cfg->isr_reload = REG_READ(&regs->icrlisr);
970                cfg->ack_reload = REG_READ(&regs->icrlintack);
971        }
972}
973
974/* Read or Write Interrupt-code status registers */
975void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
976{
977        struct grspw_priv *priv = d;
978        struct grspw_regs *regs = priv->regs;
979
980        /* No locking needed since the status bits are clear-on-write */
981
982        if (rxirq) {
983                if (*rxirq != 0)
984                        REG_WRITE(&regs->icrx, *rxirq);
985                else
986                        *rxirq = REG_READ(&regs->icrx);
987        }
988
989        if (rxack) {
990                if (*rxack != 0)
991                        REG_WRITE(&regs->icack, *rxack);
992                else
993                        *rxack = REG_READ(&regs->icack);
994        }
995
996        if (intto) {
997                if (*intto != 0)
998                        REG_WRITE(&regs->ictimeout, *intto);
999                else
1000                        *intto = REG_READ(&regs->ictimeout);
1001        }
1002}
1003
1004/* Assign handler function to Interrupt-code tick out IRQ */
1005void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1006{
1007        struct grspw_priv *priv = d;
1008
1009        priv->icisr_arg = data;
1010        priv->icisr = handler;
1011}
1012
1013/* Set (not -1) and/or read RMAP options. */
1014int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1015{
1016        struct grspw_priv *priv = d;
1017        struct grspw_regs *regs = priv->regs;
1018        unsigned int ctrl;
1019        IRQFLAGS_TYPE irqflags;
1020
1021        if (dstkey) {
1022                if (*dstkey != -1)
1023                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1024                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1025        }
1026        if (options) {
1027                if (*options != -1) {
1028                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1029                                return -1;
1030
1031
1032                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1033                        ctrl = REG_READ(&regs->ctrl);
1034                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1035                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1036                        REG_WRITE(&regs->ctrl, ctrl);
1037                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1038                }
1039                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1040        }
1041
1042        return 0;
1043}
1044
1045void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1046{
1047        struct grspw_priv *priv = d;
1048
1049        if (rmap)
1050                *rmap = priv->hwsup.rmap;
1051        if (rmap_crc)
1052                *rmap_crc = priv->hwsup.rmap_crc;
1053}
1054
1055/* Select port, if
1056 * -1=The current selected port is returned
1057 * 0=Port 0
1058 * 1=Port 1
1059 * Others=Both Port0 and Port1
1060 */
1061int grspw_port_ctrl(void *d, int *port)
1062{
1063        struct grspw_priv *priv = d;
1064        struct grspw_regs *regs = priv->regs;
1065        unsigned int ctrl;
1066        IRQFLAGS_TYPE irqflags;
1067
1068        if (port == NULL)
1069                return -1;
1070
1071        if ((*port == 1) || (*port == 0)) {
1072                /* Select port user selected */
1073                if ((*port == 1) && (priv->hwsup.nports < 2))
1074                        return -1; /* Changing to Port 1, but only one port available */
1075                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1076                ctrl = REG_READ(&regs->ctrl);
1077                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1078                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1079                REG_WRITE(&regs->ctrl, ctrl);
1080                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1081        } else if (*port > 1) {
1082                /* Select both ports */
1083                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1084                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1085                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1086        }
1087
1088        /* Get current settings */
1089        ctrl = REG_READ(&regs->ctrl);
1090        if (ctrl & GRSPW_CTRL_NP) {
1091                /* Any port, selected by hardware */
1092                if (priv->hwsup.nports > 1)
1093                        *port = 3;
1094                else
1095                        *port = 0; /* Port0 the only port available */
1096        } else {
1097                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1098        }
1099
1100        return 0;
1101}
1102
1103/* Returns Number ports available in hardware */
1104int grspw_port_count(void *d)
1105{
1106        struct grspw_priv *priv = d;
1107
1108        return priv->hwsup.nports;
1109}
1110
1111/* Current active port: 0 or 1 */
1112int grspw_port_active(void *d)
1113{
1114        struct grspw_priv *priv = d;
1115        unsigned int status;
1116
1117        status = REG_READ(&priv->regs->status);
1118
1119        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1120}
1121
1122void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1123{
1124        struct grspw_priv *priv = d;
1125
1126        if (sts == NULL)
1127                return;
1128        memcpy(sts, &priv->stats, sizeof(priv->stats));
1129}
1130
1131void grspw_stats_clr(void *d)
1132{
1133        struct grspw_priv *priv = d;
1134
1135        /* Clear most of the statistics */     
1136        memset(&priv->stats, 0, sizeof(priv->stats));
1137}
1138
1139/*** DMA Interface ***/
1140
1141/* Initialize the RX and TX Descriptor Ring, empty of packets */
1142STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1143{
1144        struct grspw_ring *r;
1145        int i;
1146
1147        /* Empty BD rings */
1148        dma->rx_ring_head = dma->rx_ring_base;
1149        dma->rx_ring_tail = dma->rx_ring_base;
1150        dma->tx_ring_head = dma->tx_ring_base;
1151        dma->tx_ring_tail = dma->tx_ring_base;
1152
1153        /* Init RX Descriptors */
1154        r = (struct grspw_ring *)dma->rx_ring_base;
1155        for (i=0; i<GRSPW_RXBD_NR; i++) {
1156
1157                /* Init Ring Entry */
1158                r[i].next = &r[i+1];
1159                r[i].bd.rx = &dma->rx_bds[i];
1160                r[i].pkt = NULL;
1161
1162                /* Init HW Descriptor */
1163                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1164                BD_WRITE(&r[i].bd.rx->addr, 0);
1165        }
1166        r[GRSPW_RXBD_NR-1].next = &r[0];
1167
1168        /* Init TX Descriptors */
1169        r = (struct grspw_ring *)dma->tx_ring_base;
1170        for (i=0; i<GRSPW_TXBD_NR; i++) {
1171
1172                /* Init Ring Entry */
1173                r[i].next = &r[i+1];
1174                r[i].bd.tx = &dma->tx_bds[i];
1175                r[i].pkt = NULL;
1176
1177                /* Init HW Descriptor */
1178                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1179                BD_WRITE(&r[i].bd.tx->haddr, 0);
1180                BD_WRITE(&r[i].bd.tx->dlen, 0);
1181                BD_WRITE(&r[i].bd.tx->daddr, 0);
1182        }
1183        r[GRSPW_TXBD_NR-1].next = &r[0];
1184}
1185
1186/* Try to populate descriptor ring with as many as possible READY unused packet
1187 * buffers. The packets assigned with to a descriptor are put in the end of
1188 * the scheduled list.
1189 *
1190 * The number of Packets scheduled is returned.
1191 *
1192 *  - READY List -> RX-SCHED List
1193 *  - Descriptors are initialized and enabled for reception
1194 */
1195STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1196{
1197        int cnt;
1198        unsigned int ctrl, dmactrl;
1199        void *hwaddr;
1200        struct grspw_rxring *curr_bd;
1201        struct grspw_pkt *curr_pkt, *last_pkt;
1202        struct grspw_list lst;
1203        IRQFLAGS_TYPE irqflags;
1204
1205        /* Is Ready Q empty? */
1206        if (grspw_list_is_empty(&dma->ready))
1207                return 0;
1208
1209        cnt = 0;
1210        lst.head = curr_pkt = dma->ready.head;
1211        curr_bd = dma->rx_ring_head;
1212        while (!curr_bd->pkt) {
1213
1214                /* Assign Packet to descriptor */
1215                curr_bd->pkt = curr_pkt;
1216
1217                /* Prepare descriptor address. */
1218                hwaddr = curr_pkt->data;
1219                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1220                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1221                                         hwaddr, &hwaddr);
1222                        if (curr_pkt->data == hwaddr) /* translation needed? */
1223                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1224                }
1225                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1226
1227                ctrl = GRSPW_RXBD_EN;
1228                if (curr_bd->next == dma->rx_ring_base) {
1229                        /* Wrap around (only needed when smaller descriptor
1230                         * table)
1231                         */
1232                        ctrl |= GRSPW_RXBD_WR;
1233                }
1234
1235                /* Is this Packet going to be an interrupt Packet? */
1236                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1237                        if (dma->cfg.rx_irq_en_cnt == 0) {
1238                                /* IRQ is disabled. A big number to avoid
1239                                 * equal to zero too often
1240                                 */
1241                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1242                        } else {
1243                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1244                                ctrl |= GRSPW_RXBD_IE;
1245                        }
1246                }
1247
1248                if (curr_pkt->flags & RXPKT_FLAG_IE)
1249                        ctrl |= GRSPW_RXBD_IE;
1250
1251                /* Enable descriptor */
1252                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1253
1254                last_pkt = curr_pkt;
1255                curr_bd = curr_bd->next;
1256                cnt++;
1257
1258                /* Get Next Packet from Ready Queue */
1259                if (curr_pkt == dma->ready.tail) {
1260                        /* Handled all in ready queue. */
1261                        curr_pkt = NULL;
1262                        break;
1263                }
1264                curr_pkt = curr_pkt->next;
1265        }
1266
1267        /* Has Packets been scheduled? */
1268        if (cnt > 0) {
1269                /* Prepare list for insertion/deleation */
1270                lst.tail = last_pkt;
1271
1272                /* Remove scheduled packets from ready queue */
1273                grspw_list_remove_head_list(&dma->ready, &lst);
1274                dma->ready_cnt -= cnt;
1275                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1276                        dma->stats.ready_cnt_min = dma->ready_cnt;
1277
1278                /* Insert scheduled packets into scheduled queue */
1279                grspw_list_append_list(&dma->rx_sched, &lst);
1280                dma->rx_sched_cnt += cnt;
1281                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1282                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1283
1284                /* Update TX ring posistion */
1285                dma->rx_ring_head = curr_bd;
1286
1287                /* Make hardware aware of the newly enabled descriptors
1288                 * We must protect from ISR which writes RI|TI
1289                 */
1290                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1291                dmactrl = REG_READ(&dma->regs->ctrl);
1292                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1293                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1294                REG_WRITE(&dma->regs->ctrl, dmactrl);
1295                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1296        }
1297
1298        return cnt;
1299}
1300
1301/* Scans the RX desciptor table for scheduled Packet that has been received,
1302 * and moves these Packet from the head of the scheduled queue to the
1303 * tail of the recv queue.
1304 *
1305 * Also, for all packets the status is updated.
1306 *
1307 *  - SCHED List -> SENT List
1308 *
1309 * Return Value
1310 * Number of packets moved
1311 */
1312STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1313{
1314        struct grspw_rxring *curr;
1315        struct grspw_pkt *last_pkt;
1316        int recv_pkt_cnt = 0;
1317        unsigned int ctrl;
1318        struct grspw_list lst;
1319
1320        curr = dma->rx_ring_tail;
1321
1322        /* Step into RX ring to find if packets have been scheduled for
1323         * reception.
1324         */
1325        if (!curr->pkt)
1326                return 0; /* No scheduled packets, thus no received, abort */
1327
1328        /* There has been Packets scheduled ==> scheduled Packets may have been
1329         * received and needs to be collected into RECV List.
1330         *
1331         * A temporary list "lst" with all received packets is created.
1332         */
1333        lst.head = curr->pkt;
1334
1335        /* Loop until first enabled "unrecveived" SpW Packet is found.
1336         * An unused descriptor is indicated by an unassigned pkt field.
1337         */
1338        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1339                /* Handle one received Packet */
1340
1341                /* Remember last handled Packet so that insertion/removal from
1342                 * Packet lists go fast.
1343                 */
1344                last_pkt = curr->pkt;
1345
1346                /* Get Length of Packet in bytes, and reception options */
1347                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1348
1349                /* Set flags to indicate error(s) and CRC information,
1350                 * and Mark Received.
1351                 */
1352                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1353                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1354                                  RXPKT_FLAG_RX;
1355
1356                /* Packet was Truncated? */
1357                if (ctrl & GRSPW_RXBD_TR)
1358                        dma->stats.rx_err_trunk++;
1359
1360                /* Error End-Of-Packet? */
1361                if (ctrl & GRSPW_RXBD_EP)
1362                        dma->stats.rx_err_endpkt++;
1363                curr->pkt = NULL; /* Mark descriptor unused */
1364
1365                /* Increment */
1366                curr = curr->next;
1367                recv_pkt_cnt++;
1368        }
1369
1370        /* 1. Remove all handled packets from scheduled queue
1371         * 2. Put all handled packets into recv queue
1372         */
1373        if (recv_pkt_cnt > 0) {
1374
1375                /* Update Stats, Number of Received Packets */
1376                dma->stats.rx_pkts += recv_pkt_cnt;
1377
1378                /* Save RX ring posistion */
1379                dma->rx_ring_tail = curr;
1380
1381                /* Prepare list for insertion/deleation */
1382                lst.tail = last_pkt;
1383
1384                /* Remove received Packets from RX-SCHED queue */
1385                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1386                dma->rx_sched_cnt -= recv_pkt_cnt;
1387                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1388                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1389
1390                /* Insert received Packets into RECV queue */
1391                grspw_list_append_list(&dma->recv, &lst);
1392                dma->recv_cnt += recv_pkt_cnt;
1393                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1394                        dma->stats.recv_cnt_max = dma->recv_cnt;
1395        }
1396
1397        return recv_pkt_cnt;
1398}
1399
1400/* Try to populate descriptor ring with as many SEND packets as possible. The
1401 * packets assigned with to a descriptor are put in the end of
1402 * the scheduled list.
1403 *
1404 * The number of Packets scheduled is returned.
1405 *
1406 *  - SEND List -> TX-SCHED List
1407 *  - Descriptors are initialized and enabled for transmission
1408 */
1409STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1410{
1411        int cnt;
1412        unsigned int ctrl, dmactrl;
1413        void *hwaddr;
1414        struct grspw_txring *curr_bd;
1415        struct grspw_pkt *curr_pkt, *last_pkt;
1416        struct grspw_list lst;
1417        IRQFLAGS_TYPE irqflags;
1418
1419        /* Is Ready Q empty? */
1420        if (grspw_list_is_empty(&dma->send))
1421                return 0;
1422
1423        cnt = 0;
1424        lst.head = curr_pkt = dma->send.head;
1425        curr_bd = dma->tx_ring_head;
1426        while (!curr_bd->pkt) {
1427
1428                /* Assign Packet to descriptor */
1429                curr_bd->pkt = curr_pkt;
1430
1431                /* Set up header transmission */
1432                if (curr_pkt->hdr && curr_pkt->hlen) {
1433                        hwaddr = curr_pkt->hdr;
1434                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1435                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1436                                                 hwaddr, &hwaddr);
1437                                /* translation needed? */
1438                                if (curr_pkt->hdr == hwaddr)
1439                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1440                        }
1441                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1442                        ctrl = GRSPW_TXBD_EN | curr_pkt->hlen;
1443                } else {
1444                        ctrl = GRSPW_TXBD_EN;
1445                }
1446                /* Enable IRQ generation and CRC options as specified
1447                 * by user.
1448                 */
1449                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1450
1451                if (curr_bd->next == dma->tx_ring_base) {
1452                        /* Wrap around (only needed when smaller descriptor table) */
1453                        ctrl |= GRSPW_TXBD_WR;
1454                }
1455
1456                /* Is this Packet going to be an interrupt Packet? */
1457                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1458                        if (dma->cfg.tx_irq_en_cnt == 0) {
1459                                /* IRQ is disabled.
1460                                 * A big number to avoid equal to zero too often
1461                                 */
1462                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1463                        } else {
1464                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1465                                ctrl |= GRSPW_TXBD_IE;
1466                        }
1467                }
1468
1469                /* Prepare descriptor address. Parts of CTRL is written to
1470                 * DLEN for debug-only (CTRL is cleared by HW).
1471                 */
1472                if (curr_pkt->data && curr_pkt->dlen) {
1473                        hwaddr = curr_pkt->data;
1474                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1475                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1476                                                 hwaddr, &hwaddr);
1477                                /* translation needed? */
1478                                if (curr_pkt->data == hwaddr)
1479                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1480                        }
1481                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1482                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1483                                                     ((ctrl & 0x3f000) << 12));
1484                } else {
1485                        BD_WRITE(&curr_bd->bd->daddr, 0);
1486                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1487                }
1488
1489                /* Enable descriptor */
1490                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1491
1492                last_pkt = curr_pkt;
1493                curr_bd = curr_bd->next;
1494                cnt++;
1495
1496                /* Get Next Packet from Ready Queue */
1497                if (curr_pkt == dma->send.tail) {
1498                        /* Handled all in ready queue. */
1499                        curr_pkt = NULL;
1500                        break;
1501                }
1502                curr_pkt = curr_pkt->next;
1503        }
1504
1505        /* Have Packets been scheduled? */
1506        if (cnt > 0) {
1507                /* Prepare list for insertion/deleation */
1508                lst.tail = last_pkt;
1509
1510                /* Remove scheduled packets from ready queue */
1511                grspw_list_remove_head_list(&dma->send, &lst);
1512                dma->send_cnt -= cnt;
1513                if (dma->stats.send_cnt_min > dma->send_cnt)
1514                        dma->stats.send_cnt_min = dma->send_cnt;
1515
1516                /* Insert scheduled packets into scheduled queue */
1517                grspw_list_append_list(&dma->tx_sched, &lst);
1518                dma->tx_sched_cnt += cnt;
1519                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1520                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1521
1522                /* Update TX ring posistion */
1523                dma->tx_ring_head = curr_bd;
1524
1525                /* Make hardware aware of the newly enabled descriptors */
1526                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1527                dmactrl = REG_READ(&dma->regs->ctrl);
1528                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1529                dmactrl |= GRSPW_DMACTRL_TE;
1530                REG_WRITE(&dma->regs->ctrl, dmactrl);
1531                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1532        }
1533        return cnt;
1534}
1535
1536/* Scans the TX desciptor table for transmitted packets, and moves these
1537 * packets from the head of the scheduled queue to the tail of the sent queue.
1538 *
1539 * Also, for all packets the status is updated.
1540 *
1541 *  - SCHED List -> SENT List
1542 *
1543 * Return Value
1544 * Number of packet moved
1545 */
1546STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1547{
1548        struct grspw_txring *curr;
1549        struct grspw_pkt *last_pkt;
1550        int sent_pkt_cnt = 0;
1551        unsigned int ctrl;
1552        struct grspw_list lst;
1553
1554        curr = dma->tx_ring_tail;
1555
1556        /* Step into TX ring to find if packets have been scheduled for
1557         * transmission.
1558         */
1559        if (!curr->pkt)
1560                return 0; /* No scheduled packets, thus no sent, abort */
1561
1562        /* There has been Packets scheduled ==> scheduled Packets may have been
1563         * transmitted and needs to be collected into SENT List.
1564         *
1565         * A temporary list "lst" with all sent packets is created.
1566         */
1567        lst.head = curr->pkt;
1568
1569        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1570         * An unused descriptor is indicated by an unassigned pkt field.
1571         */
1572        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1573                /* Handle one sent Packet */
1574
1575                /* Remember last handled Packet so that insertion/removal from
1576                 * packet lists go fast.
1577                 */
1578                last_pkt = curr->pkt;
1579
1580                /* Set flags to indicate error(s) and Mark Sent.
1581                 */
1582                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1583                                        (ctrl & TXPKT_FLAG_LINKERR) |
1584                                        TXPKT_FLAG_TX;
1585
1586                /* Sent packet experienced link error? */
1587                if (ctrl & GRSPW_TXBD_LE)
1588                        dma->stats.tx_err_link++;
1589
1590                curr->pkt = NULL; /* Mark descriptor unused */
1591
1592                /* Increment */
1593                curr = curr->next;
1594                sent_pkt_cnt++;
1595        }
1596
1597        /* 1. Remove all handled packets from TX-SCHED queue
1598         * 2. Put all handled packets into SENT queue
1599         */
1600        if (sent_pkt_cnt > 0) {
1601                /* Update Stats, Number of Transmitted Packets */
1602                dma->stats.tx_pkts += sent_pkt_cnt;
1603
1604                /* Save TX ring posistion */
1605                dma->tx_ring_tail = curr;
1606
1607                /* Prepare list for insertion/deleation */
1608                lst.tail = last_pkt;
1609
1610                /* Remove sent packets from TX-SCHED queue */
1611                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1612                dma->tx_sched_cnt -= sent_pkt_cnt;
1613                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1614                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1615
1616                /* Insert received packets into SENT queue */
1617                grspw_list_append_list(&dma->sent, &lst);
1618                dma->sent_cnt += sent_pkt_cnt;
1619                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1620                        dma->stats.sent_cnt_max = dma->sent_cnt;
1621        }
1622
1623        return sent_pkt_cnt;
1624}
1625
1626void *grspw_dma_open(void *d, int chan_no)
1627{
1628        struct grspw_priv *priv = d;
1629        struct grspw_dma_priv *dma;
1630        int size;
1631
1632        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
1633                return NULL;
1634
1635        dma = &priv->dma[chan_no];
1636
1637        /* Take GRSPW lock */
1638        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1639            != RTEMS_SUCCESSFUL)
1640                return NULL;
1641
1642        if (dma->open) {
1643                dma = NULL;
1644                goto out;
1645        }
1646
1647        dma->started = 0;
1648
1649        /* Set Default Configuration:
1650         *
1651         *  - MAX RX Packet Length =
1652         *  - Disable IRQ generation
1653         *  -
1654         */
1655        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1656        dma->cfg.rx_irq_en_cnt = 0;
1657        dma->cfg.tx_irq_en_cnt = 0;
1658        dma->cfg.flags = DMAFLAG_NO_SPILL;
1659
1660        /* set to NULL so that error exit works correctly */
1661        dma->sem_rxdma = RTEMS_ID_NONE;
1662        dma->sem_txdma = RTEMS_ID_NONE;
1663        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1664        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1665        dma->rx_ring_base = NULL;
1666
1667        /* DMA Channel Semaphore created with count = 1 */
1668        if (rtems_semaphore_create(
1669            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
1670            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1671            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1672            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1673                dma->sem_rxdma = RTEMS_ID_NONE;
1674                goto err;
1675        }
1676        if (rtems_semaphore_create(
1677            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1678            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1679            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1680            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1681                dma->sem_txdma = RTEMS_ID_NONE;
1682                goto err;
1683        }
1684
1685        /* Allocate memory for the two descriptor rings */
1686        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1687        dma->rx_ring_base = (struct grspw_rxring *)malloc(size);
1688        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1689        if (dma->rx_ring_base == NULL)
1690                goto err;
1691
1692        /* Create DMA RX and TX Channel sempahore with count = 0 */
1693        if (rtems_semaphore_create(
1694            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1695            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1696            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1697            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1698                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1699                goto err;
1700        }
1701        if (rtems_semaphore_create(
1702            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1703            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1704            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1705            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1706                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1707                goto err;
1708        }
1709
1710        /* Reset software structures */
1711        grspw_dma_reset(dma);
1712
1713        /* Take the device */
1714        dma->open = 1;
1715out:
1716        /* Return GRSPW Lock */
1717        rtems_semaphore_release(grspw_sem);
1718
1719        return dma;
1720
1721        /* initialization error happended */
1722err:
1723        if (dma->sem_rxdma != RTEMS_ID_NONE)
1724                rtems_semaphore_delete(dma->sem_rxdma);
1725        if (dma->sem_txdma != RTEMS_ID_NONE)
1726                rtems_semaphore_delete(dma->sem_txdma);
1727        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1728                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1729        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1730                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1731        if (dma->rx_ring_base)
1732                free(dma->rx_ring_base);
1733        dma = NULL;
1734        goto out;
1735}
1736
1737/* Initialize Software Structures:
1738 *  - Clear all Queues
1739 *  - init BD ring
1740 *  - init IRQ counter
1741 *  - clear statistics counters
1742 *  - init wait structures and semaphores
1743 */
1744STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1745{
1746        /* Empty RX and TX queues */
1747        grspw_list_clr(&dma->ready);
1748        grspw_list_clr(&dma->rx_sched);
1749        grspw_list_clr(&dma->recv);
1750        grspw_list_clr(&dma->send);
1751        grspw_list_clr(&dma->tx_sched);
1752        grspw_list_clr(&dma->sent);
1753        dma->ready_cnt = 0;
1754        dma->rx_sched_cnt = 0;
1755        dma->recv_cnt = 0;
1756        dma->send_cnt = 0;
1757        dma->tx_sched_cnt = 0;
1758        dma->sent_cnt = 0;
1759
1760        dma->rx_irq_en_cnt_curr = 0;
1761        dma->tx_irq_en_cnt_curr = 0;
1762
1763        grspw_bdrings_init(dma);
1764
1765        dma->rx_wait.waiting = 0;
1766        dma->tx_wait.waiting = 0;
1767
1768        grspw_dma_stats_clr(dma);
1769}
1770
1771int grspw_dma_close(void *c)
1772{
1773        struct grspw_dma_priv *dma = c;
1774
1775        if (!dma->open)
1776                return 0;
1777
1778        /* Take device lock - Wait until we get semaphore */
1779        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1780            != RTEMS_SUCCESSFUL)
1781                return -1;
1782        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1783            != RTEMS_SUCCESSFUL) {
1784                rtems_semaphore_release(dma->sem_rxdma);
1785                return -1;
1786        }
1787
1788        /* Can not close active DMA channel. User must stop DMA and make sure
1789         * no threads are active/blocked within driver.
1790         */
1791        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
1792                rtems_semaphore_release(dma->sem_txdma);
1793                rtems_semaphore_release(dma->sem_rxdma);
1794                return 1;
1795        }
1796
1797        /* Free resources */
1798        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1799        rtems_semaphore_delete(dma->tx_wait.sem_wait);
1800        /* Release and delete lock. Operations requiring lock will fail */
1801        rtems_semaphore_delete(dma->sem_txdma);
1802        rtems_semaphore_delete(dma->sem_rxdma);
1803        dma->sem_txdma = RTEMS_ID_NONE;
1804        dma->sem_rxdma = RTEMS_ID_NONE;
1805
1806        /* Free memory */
1807        if (dma->rx_ring_base)
1808                free(dma->rx_ring_base);
1809        dma->rx_ring_base = NULL;
1810        dma->tx_ring_base = NULL;
1811
1812        dma->open = 0;
1813        return 0;
1814}
1815
1816/* Schedule List of packets for transmission at some point in
1817 * future.
1818 *
1819 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1820 * 2. Add the requested packets to the SEND List (USER->SEND)
1821 * 3. Schedule as many packets as possible (SEND->SCHED)
1822 */
1823int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1824{
1825        struct grspw_dma_priv *dma = c;
1826        int ret;
1827
1828        /* Take DMA channel lock */
1829        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1830            != RTEMS_SUCCESSFUL)
1831                return -1;
1832
1833        if (dma->started == 0) {
1834                ret = 1; /* signal DMA has been stopped */
1835                goto out;
1836        }
1837        ret = 0;
1838
1839        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1840        if ((opts & 1) == 0)
1841                grspw_tx_process_scheduled(dma);
1842
1843        /* 2. Add the requested packets to the SEND List (USER->SEND) */
1844        if (pkts && (count > 0)) {
1845                grspw_list_append_list(&dma->send, pkts);
1846                dma->send_cnt += count;
1847                if (dma->stats.send_cnt_max < dma->send_cnt)
1848                        dma->stats.send_cnt_max = dma->send_cnt;
1849        }
1850
1851        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1852        if ((opts & 2) == 0)
1853                grspw_tx_schedule_send(dma);
1854
1855out:
1856        /* Unlock DMA channel */
1857        rtems_semaphore_release(dma->sem_txdma);
1858
1859        return ret;
1860}
1861
1862int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1863{
1864        struct grspw_dma_priv *dma = c;
1865        struct grspw_pkt *pkt, *lastpkt;
1866        int cnt, started;
1867
1868        /* Take DMA channel lock */
1869        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1870            != RTEMS_SUCCESSFUL)
1871                return -1;
1872
1873        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1874        started = dma->started;
1875        if ((started > 0) && ((opts & 1) == 0))
1876                grspw_tx_process_scheduled(dma);
1877
1878        /* Move all/count SENT packet to the callers list (SENT->USER) */
1879        if (pkts) {
1880                if ((count == NULL) || (*count == -1) ||
1881                    (*count >= dma->sent_cnt)) {
1882                        /* Move all SENT Packets */
1883                        *pkts = dma->sent;
1884                        grspw_list_clr(&dma->sent);
1885                        if (count)
1886                                *count = dma->sent_cnt;
1887                        dma->sent_cnt = 0;
1888                } else {
1889                        /* Move a number of SENT Packets */
1890                        pkts->head = pkt = lastpkt = dma->sent.head;
1891                        cnt = 0;
1892                        while (cnt < *count) {
1893                                lastpkt = pkt;
1894                                pkt = pkt->next;
1895                                cnt++;
1896                        }
1897                        if (cnt > 0) {
1898                                pkts->tail = lastpkt;
1899                                grspw_list_remove_head_list(&dma->sent, pkts);
1900                                dma->sent_cnt -= cnt;
1901                        } else {
1902                                grspw_list_clr(pkts);
1903                        }
1904                }
1905        } else if (count) {
1906                *count = 0;
1907        }
1908
1909        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1910        if ((started > 0) && ((opts & 2) == 0))
1911                grspw_tx_schedule_send(dma);
1912
1913        /* Unlock DMA channel */
1914        rtems_semaphore_release(dma->sem_txdma);
1915
1916        return (~started) & 1; /* signal DMA has been stopped */
1917}
1918
1919void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
1920{
1921        struct grspw_dma_priv *dma = c;
1922        int sched_cnt, diff;
1923        unsigned int hwbd;
1924        struct grspw_txbd *tailbd;
1925
1926        /* Take device lock - Wait until we get semaphore.
1927         * The lock is taken so that the counters are in sync with each other
1928         * and that DMA descriptor table and tx_ring_tail is not being updated
1929         * during HW counter processing in this function.
1930         */
1931        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1932            != RTEMS_SUCCESSFUL)
1933                return;
1934
1935        if (send)
1936                *send = dma->send_cnt;
1937        sched_cnt = dma->tx_sched_cnt;
1938        if (sched)
1939                *sched = sched_cnt;
1940        if (sent)
1941                *sent = dma->sent_cnt;
1942        if (hw) {
1943                /* Calculate number of descriptors (processed by HW) between
1944                 * HW pointer and oldest SW pointer.
1945                 */
1946                hwbd = REG_READ(&dma->regs->txdesc);
1947                tailbd = dma->tx_ring_tail->bd;
1948                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
1949                        (GRSPW_TXBD_NR - 1);
1950                /* Handle special case when HW and SW pointers are equal
1951                 * because all TX descriptors have been processed by HW.
1952                 */
1953                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
1954                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
1955                        diff = GRSPW_TXBD_NR;
1956                }
1957                *hw = diff;
1958        }
1959
1960        /* Unlock DMA channel */
1961        rtems_semaphore_release(dma->sem_txdma);
1962}
1963
1964static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
1965{
1966        int send_val, sent_val;
1967
1968        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
1969                send_val = 1;
1970        else
1971                send_val = 0;
1972
1973        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
1974                sent_val = 1;
1975        else
1976                sent_val = 0;
1977
1978        /* AND or OR ? */
1979        if (dma->tx_wait.op == 0)
1980                return send_val & sent_val; /* AND */
1981        else
1982                return send_val | sent_val; /* OR */
1983}
1984
1985/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
1986 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
1987 * is met.
1988 * If a link error occurs and the Stop on Link error is defined, this function
1989 * will also return to caller.
1990 */
1991int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
1992{
1993        struct grspw_dma_priv *dma = c;
1994        int ret, rc, initialized = 0;
1995
1996        if (timeout == 0)
1997                timeout = RTEMS_NO_TIMEOUT;
1998
1999check_condition:
2000
2001        /* Take DMA channel lock */
2002        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2003            != RTEMS_SUCCESSFUL)
2004                return -1;
2005
2006        /* Check so that no other thread is waiting, this driver only supports
2007         * one waiter at a time.
2008         */
2009        if (initialized == 0 && dma->tx_wait.waiting) {
2010                ret = 3;
2011                goto out_release;
2012        }
2013
2014        /* Stop if link error or similar (DMA stopped), abort */
2015        if (dma->started == 0) {
2016                ret = 1;
2017                goto out_release;
2018        }
2019
2020        /* Set up Condition */
2021        dma->tx_wait.send_cnt = send_cnt;
2022        dma->tx_wait.op = op;
2023        dma->tx_wait.sent_cnt = sent_cnt;
2024
2025        if (grspw_tx_wait_eval(dma) == 0) {
2026                /* Prepare Wait */
2027                initialized = 1;
2028                dma->tx_wait.waiting = 1;
2029
2030                /* Release DMA channel lock */
2031                rtems_semaphore_release(dma->sem_txdma);
2032
2033                /* Try to take Wait lock, if this fail link may have gone down
2034                 * or user stopped this DMA channel
2035                 */
2036                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2037                                                timeout);
2038                if (rc == RTEMS_TIMEOUT) {
2039                        ret = 2;
2040                        goto out;
2041                } else if (rc == RTEMS_UNSATISFIED ||
2042                           rc == RTEMS_OBJECT_WAS_DELETED) {
2043                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2044                        goto out;
2045                } else if (rc != RTEMS_SUCCESSFUL) {
2046                        /* Unknown Error */
2047                        ret = -1;
2048                        goto out;
2049                } else if (dma->started == 0) {
2050                        ret = 1;
2051                        goto out;
2052                }
2053
2054                /* Check condition once more */
2055                goto check_condition;
2056        }
2057
2058        ret = 0;
2059
2060out_release:
2061        /* Unlock DMA channel */
2062        rtems_semaphore_release(dma->sem_txdma);
2063
2064out:
2065        if (initialized)
2066                dma->tx_wait.waiting = 0;
2067        return ret;
2068}
2069
2070int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2071{
2072        struct grspw_dma_priv *dma = c;
2073        struct grspw_pkt *pkt, *lastpkt;
2074        int cnt, started;
2075
2076        /* Take DMA channel lock */
2077        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2078            != RTEMS_SUCCESSFUL)
2079                return -1;
2080
2081        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2082        started = dma->started;
2083        if (((opts & 1) == 0) && (started > 0))
2084                grspw_rx_process_scheduled(dma);
2085
2086        /* Move all RECV packet to the callers list */
2087        if (pkts) {
2088                if ((count == NULL) || (*count == -1) ||
2089                    (*count >= dma->recv_cnt)) {
2090                        /* Move all Received packets */
2091                        *pkts = dma->recv;
2092                        grspw_list_clr(&dma->recv);
2093                        if ( count )
2094                                *count = dma->recv_cnt;
2095                        dma->recv_cnt = 0;
2096                } else {
2097                        /* Move a number of RECV Packets */
2098                        pkts->head = pkt = lastpkt = dma->recv.head;
2099                        cnt = 0;
2100                        while (cnt < *count) {
2101                                lastpkt = pkt;
2102                                pkt = pkt->next;
2103                                cnt++;
2104                        }
2105                        if (cnt > 0) {
2106                                pkts->tail = lastpkt;
2107                                grspw_list_remove_head_list(&dma->recv, pkts);
2108                                dma->recv_cnt -= cnt;
2109                        } else {
2110                                grspw_list_clr(pkts);
2111                        }
2112                }
2113        } else if (count) {
2114                *count = 0;
2115        }
2116
2117        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2118        if (((opts & 2) == 0) && (started > 0))
2119                grspw_rx_schedule_ready(dma);
2120
2121        /* Unlock DMA channel */
2122        rtems_semaphore_release(dma->sem_rxdma);
2123
2124        return (~started) & 1;
2125}
2126
2127int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2128{
2129        struct grspw_dma_priv *dma = c;
2130        int ret;
2131
2132        /* Take DMA channel lock */
2133        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2134            != RTEMS_SUCCESSFUL)
2135                return -1;
2136
2137        if (dma->started == 0) {
2138                ret = 1;
2139                goto out;
2140        }
2141
2142        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2143        if ((opts & 1) == 0)
2144                grspw_rx_process_scheduled(dma);
2145
2146        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2147        if (pkts && (count > 0)) {
2148                grspw_list_append_list(&dma->ready, pkts);
2149                dma->ready_cnt += count;
2150                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2151                        dma->stats.ready_cnt_max = dma->ready_cnt;
2152        }
2153
2154        /* 3. Schedule as many packets as possible (READY->SCHED) */
2155        if ((opts & 2) == 0)
2156                grspw_rx_schedule_ready(dma);
2157
2158        ret = 0;
2159out:
2160        /* Unlock DMA channel */
2161        rtems_semaphore_release(dma->sem_rxdma);
2162
2163        return ret;
2164}
2165
2166void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
2167{
2168        struct grspw_dma_priv *dma = c;
2169        int sched_cnt, diff;
2170        unsigned int hwbd;
2171        struct grspw_rxbd *tailbd;
2172
2173        /* Take device lock - Wait until we get semaphore.
2174         * The lock is taken so that the counters are in sync with each other
2175         * and that DMA descriptor table and rx_ring_tail is not being updated
2176         * during HW counter processing in this function.
2177         */
2178        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2179            != RTEMS_SUCCESSFUL)
2180                return;
2181
2182        if (ready)
2183                *ready = dma->ready_cnt;
2184        sched_cnt = dma->rx_sched_cnt;
2185        if (sched)
2186                *sched = sched_cnt;
2187        if (recv)
2188                *recv = dma->recv_cnt;
2189        if (hw) {
2190                /* Calculate number of descriptors (processed by HW) between
2191                 * HW pointer and oldest SW pointer.
2192                 */
2193                hwbd = REG_READ(&dma->regs->rxdesc);
2194                tailbd = dma->rx_ring_tail->bd;
2195                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2196                        (GRSPW_RXBD_NR - 1);
2197                /* Handle special case when HW and SW pointers are equal
2198                 * because all RX descriptors have been processed by HW.
2199                 */
2200                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2201                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2202                        diff = GRSPW_RXBD_NR;
2203                }
2204                *hw = diff;
2205        }
2206
2207        /* Unlock DMA channel */
2208        rtems_semaphore_release(dma->sem_rxdma);
2209}
2210
2211static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2212{
2213        int ready_val, recv_val;
2214
2215        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2216                ready_val = 1;
2217        else
2218                ready_val = 0;
2219
2220        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2221                recv_val = 1;
2222        else
2223                recv_val = 0;
2224
2225        /* AND or OR ? */
2226        if (dma->rx_wait.op == 0)
2227                return ready_val & recv_val; /* AND */
2228        else
2229                return ready_val | recv_val; /* OR */
2230}
2231
2232/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2233 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2234 * condition is met.
2235 * If a link error occurs and the Stop on Link error is defined, this function
2236 * will also return to caller, however with an error.
2237 */
2238int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2239{
2240        struct grspw_dma_priv *dma = c;
2241        int ret, rc, initialized = 0;
2242
2243        if (timeout == 0)
2244                timeout = RTEMS_NO_TIMEOUT;
2245
2246check_condition:
2247
2248        /* Take DMA channel lock */
2249        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2250            != RTEMS_SUCCESSFUL)
2251                return -1;
2252
2253        /* Check so that no other thread is waiting, this driver only supports
2254         * one waiter at a time.
2255         */
2256        if (initialized == 0 && dma->rx_wait.waiting) {
2257                ret = 3;
2258                goto out_release;
2259        }
2260
2261        /* Stop if link error or similar (DMA stopped), abort */
2262        if (dma->started == 0) {
2263                ret = 1;
2264                goto out_release;
2265        }
2266
2267        /* Set up Condition */
2268        dma->rx_wait.recv_cnt = recv_cnt;
2269        dma->rx_wait.op = op;
2270        dma->rx_wait.ready_cnt = ready_cnt;
2271
2272        if (grspw_rx_wait_eval(dma) == 0) {
2273                /* Prepare Wait */
2274                initialized = 1;
2275                dma->rx_wait.waiting = 1;
2276
2277                /* Release channel lock */
2278                rtems_semaphore_release(dma->sem_rxdma);
2279
2280                /* Try to take Wait lock, if this fail link may have gone down
2281                 * or user stopped this DMA channel
2282                 */
2283                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2284                                           timeout);
2285                if (rc == RTEMS_TIMEOUT) {
2286                        ret = 2;
2287                        goto out;
2288                } else if (rc == RTEMS_UNSATISFIED ||
2289                           rc == RTEMS_OBJECT_WAS_DELETED) {
2290                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2291                        goto out;
2292                } else if (rc != RTEMS_SUCCESSFUL) {
2293                        /* Unknown Error */
2294                        ret = -1;
2295                        goto out;
2296                } else if (dma->started == 0) {
2297                        ret = 1;
2298                        goto out;
2299                }
2300
2301                /* Check condition once more */
2302                goto check_condition;
2303        }
2304
2305        ret = 0;
2306
2307out_release:
2308        /* Unlock DMA channel */
2309        rtems_semaphore_release(dma->sem_rxdma);
2310
2311out:
2312        if (initialized)
2313                dma->rx_wait.waiting = 0;
2314        return ret;
2315}
2316
2317int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2318{
2319        struct grspw_dma_priv *dma = c;
2320
2321        if (dma->started || !cfg)
2322                return -1;
2323
2324        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
2325                return -1;
2326
2327        /* Update Configuration */
2328        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2329
2330        return 0;
2331}
2332
2333void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2334{
2335        struct grspw_dma_priv *dma = c;
2336
2337        /* Copy Current Configuration */
2338        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2339}
2340
2341void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2342{
2343        struct grspw_dma_priv *dma = c;
2344
2345        memcpy(sts, &dma->stats, sizeof(dma->stats));
2346}
2347
2348void grspw_dma_stats_clr(void *c)
2349{
2350        struct grspw_dma_priv *dma = c;
2351
2352        /* Clear most of the statistics */     
2353        memset(&dma->stats, 0, sizeof(dma->stats));
2354
2355        /* Init proper default values so that comparisons will work the
2356         * first time.
2357         */
2358        dma->stats.send_cnt_min = 0x3fffffff;
2359        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2360        dma->stats.ready_cnt_min = 0x3fffffff;
2361        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2362}
2363
2364int grspw_dma_start(void *c)
2365{
2366        struct grspw_dma_priv *dma = c;
2367        struct grspw_dma_regs *dregs = dma->regs;
2368        unsigned int ctrl;
2369        IRQFLAGS_TYPE irqflags;
2370
2371        if (dma->started)
2372                return 0;
2373
2374        /* Initialize Software Structures:
2375         *  - Clear all Queues
2376         *  - init BD ring
2377         *  - init IRQ counter
2378         *  - clear statistics counters
2379         *  - init wait structures and semaphores
2380         */
2381        grspw_dma_reset(dma);
2382
2383        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2384         * with SpaceWire Packet buffers. So we do not have to worry about
2385         * IRQs for this channel just yet. However other DMA channels
2386         * may be active.
2387         *
2388         * Some functionality that is not changed during started mode is set up
2389         * once and for all here:
2390         *
2391         *   - RX MAX Packet length
2392         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2393         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2394         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2395         *   - Strip PID
2396         *   - Strip Address
2397         *   - No Spill
2398         *   - Receiver Enable
2399         *   - disable on link error (LE)
2400         *
2401         * Note that the address register and the address enable bit in DMACTRL
2402         * register must be left untouched, they are configured on a GRSPW
2403         * core level.
2404         *
2405         * Note that the receiver is enabled here, but since descriptors are
2406         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2407         * descriptors are enabled or it may ignore RX packets (NS=0) until
2408         * descriptors are enabled (writing RD bit).
2409         */
2410        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2411        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2412
2413        /* MAX Packet length */
2414        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2415
2416        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2417                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2418                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2419        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
2420                ctrl |= GRSPW_DMACTRL_LE;
2421        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
2422                ctrl |= GRSPW_DMACTRL_RI;
2423        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
2424                ctrl |= GRSPW_DMACTRL_TI;
2425        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2426        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
2427        REG_WRITE(&dregs->ctrl, ctrl);
2428        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2429
2430        dma->started = 1; /* open up other DMA interfaces */
2431
2432        return 0;
2433}
2434
2435STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2436{
2437        IRQFLAGS_TYPE irqflags;
2438
2439        if (dma->started == 0)
2440                return;
2441        dma->started = 0;
2442
2443        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2444        grspw_hw_dma_stop(dma);
2445        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2446
2447        /* From here no more packets will be sent, however
2448         * there may still exist scheduled packets that has been
2449         * sent, and packets in the SEND Queue waiting for free
2450         * descriptors. All packets are moved to the SENT Queue
2451         * so that the user may get its buffers back, the user
2452         * must look at the TXPKT_FLAG_TX in order to determine
2453         * if the packet was sent or not.
2454         */
2455
2456        /* Retreive scheduled all sent packets */
2457        grspw_tx_process_scheduled(dma);
2458
2459        /* Move un-sent packets in SEND and SCHED queue to the
2460         * SENT Queue. (never marked sent)
2461         */
2462        if (!grspw_list_is_empty(&dma->tx_sched)) {
2463                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2464                grspw_list_clr(&dma->tx_sched);
2465                dma->sent_cnt += dma->tx_sched_cnt;
2466                dma->tx_sched_cnt = 0;
2467        }
2468        if (!grspw_list_is_empty(&dma->send)) {
2469                grspw_list_append_list(&dma->sent, &dma->send);
2470                grspw_list_clr(&dma->send);
2471                dma->sent_cnt += dma->send_cnt;
2472                dma->send_cnt = 0;
2473        }
2474
2475        /* Similar for RX */
2476        grspw_rx_process_scheduled(dma);
2477        if (!grspw_list_is_empty(&dma->rx_sched)) {
2478                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2479                grspw_list_clr(&dma->rx_sched);
2480                dma->recv_cnt += dma->rx_sched_cnt;
2481                dma->rx_sched_cnt = 0;
2482        }
2483        if (!grspw_list_is_empty(&dma->ready)) {
2484                grspw_list_append_list(&dma->recv, &dma->ready);
2485                grspw_list_clr(&dma->ready);
2486                dma->recv_cnt += dma->ready_cnt;
2487                dma->ready_cnt = 0;
2488        }
2489
2490        /* Throw out blocked threads */
2491        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2492        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2493}
2494
2495void grspw_dma_stop(void *c)
2496{
2497        struct grspw_dma_priv *dma = c;
2498
2499        /* If DMA channel is closed we should not access the semaphore */
2500        if (!dma->open)
2501                return;
2502
2503        /* Take DMA Channel lock */
2504        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2505            != RTEMS_SUCCESSFUL)
2506                return;
2507        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2508            != RTEMS_SUCCESSFUL) {
2509                rtems_semaphore_release(dma->sem_rxdma);
2510                return;
2511        }
2512
2513        grspw_dma_stop_locked(dma);
2514
2515        rtems_semaphore_release(dma->sem_txdma);
2516        rtems_semaphore_release(dma->sem_rxdma);
2517}
2518
2519/* Do general work, invoked indirectly from ISR */
2520static void grspw_work_shutdown_func(struct grspw_priv *priv)
2521{
2522        int i;
2523
2524        /* Link is down for some reason, and the user has configured
2525         * that we stop all (open) DMA channels and throw out all their
2526         * blocked threads.
2527         */
2528        for (i=0; i<priv->hwsup.ndma_chans; i++)
2529                grspw_dma_stop(&priv->dma[i]);
2530        grspw_hw_stop(priv);
2531}
2532
2533/* Do DMA work on one channel, invoked indirectly from ISR */
2534static void grspw_work_dma_func(struct grspw_dma_priv *dma)
2535{
2536        int tx_cond_true, rx_cond_true;
2537        unsigned int ctrl;
2538        IRQFLAGS_TYPE irqflags;
2539
2540        /* If DMA channel is closed we should not access the semaphore */
2541        if (dma->open == 0)
2542                return;
2543
2544        rx_cond_true = 0;
2545        tx_cond_true = 0;
2546        dma->stats.irq_cnt++;
2547
2548        /* Look at cause we were woken up and clear source */
2549        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2550        if (dma->started == 0) {
2551                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2552                return;
2553        }
2554        ctrl = REG_READ(&dma->regs->ctrl);
2555
2556        /* Read/Write DMA error ? */
2557        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
2558                /* DMA error -> Stop DMA channel (both RX and TX) */
2559                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2560                grspw_dma_stop(dma);
2561        } else if (ctrl & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS)) {
2562                /* DMA has finished a TX/RX packet */
2563                ctrl &= ~GRSPW_DMACTRL_AT;
2564                if (dma->cfg.rx_irq_en_cnt != 0 ||
2565                    (dma->cfg.flags & DMAFLAG2_RXIE))
2566                        ctrl |= GRSPW_DMACTRL_RI;
2567                if (dma->cfg.tx_irq_en_cnt != 0 ||
2568                    (dma->cfg.flags & DMAFLAG2_TXIE))
2569                        ctrl |= GRSPW_DMACTRL_TI;
2570                REG_WRITE(&dma->regs->ctrl, ctrl);
2571                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2572                if (ctrl & GRSPW_DMACTRL_PR) {
2573                        /* Do RX Work */
2574
2575                        /* Take DMA channel RX lock */
2576                        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2577                            != RTEMS_SUCCESSFUL)
2578                                return;
2579
2580                        dma->stats.rx_work_cnt++;
2581                        grspw_rx_process_scheduled(dma);
2582                        if (dma->started) {
2583                                dma->stats.rx_work_enabled +=
2584                                        grspw_rx_schedule_ready(dma);
2585                                /* Check to see if condition for waking blocked
2586                                 * USER task is fullfilled.
2587                                 */
2588                                if (dma->rx_wait.waiting)
2589                                        rx_cond_true = grspw_rx_wait_eval(dma);
2590                        }
2591                        rtems_semaphore_release(dma->sem_rxdma);
2592                }
2593                if (ctrl & GRSPW_DMACTRL_PS) {
2594                        /* Do TX Work */
2595
2596                        /* Take DMA channel TX lock */
2597                        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2598                            != RTEMS_SUCCESSFUL)
2599                                return;
2600
2601                        dma->stats.tx_work_cnt++;
2602                        grspw_tx_process_scheduled(dma);
2603                        if (dma->started) {
2604                                dma->stats.tx_work_enabled +=
2605                                        grspw_tx_schedule_send(dma);
2606                                /* Check to see if condition for waking blocked
2607                                 * USER task is fullfilled.
2608                                 */
2609                                if (dma->tx_wait.waiting)
2610                                        tx_cond_true = grspw_tx_wait_eval(dma);
2611                        }
2612                        rtems_semaphore_release(dma->sem_txdma);
2613                }
2614        } else
2615                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2616
2617        if (rx_cond_true)
2618                rtems_semaphore_release(dma->rx_wait.sem_wait);
2619
2620        if (tx_cond_true)
2621                rtems_semaphore_release(dma->tx_wait.sem_wait);
2622}
2623
2624/* Work task is receiving work for the work message queue posted from
2625 * the ISR.
2626 */
2627static void grspw_work_func(rtems_task_argument unused)
2628{
2629        rtems_status_code status;
2630        unsigned int message;
2631        size_t size;
2632        struct grspw_priv *priv;
2633        int i;
2634
2635        while (grspw_task_stop == 0) {
2636                /* Wait for ISR to schedule work */
2637                status = rtems_message_queue_receive(
2638                        grspw_work_queue, &message,
2639                        &size, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
2640                if (status != RTEMS_SUCCESSFUL)
2641                        break;
2642
2643                /* Handle work */
2644                priv = priv_tab[message >> WORK_CORE_BIT];
2645                if (message & WORK_SHUTDOWN)
2646                        grspw_work_shutdown_func(priv);
2647                else if (message & WORK_DMA_MASK) {
2648                        for (i = 0; i < 4; i++) {
2649                                if (message & WORK_DMA(i))
2650                                        grspw_work_dma_func(&priv->dma[i]);
2651                        }
2652                }
2653        }
2654        rtems_task_delete(RTEMS_SELF);
2655}
2656
2657STATIC void grspw_isr(void *data)
2658{
2659        struct grspw_priv *priv = data;
2660        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode;
2661        unsigned int rxirq, rxack, intto;
2662        int i, handled = 0, message = WORK_NONE, call_user_int_isr;
2663#ifdef RTEMS_HAS_SMP
2664        IRQFLAGS_TYPE irqflags;
2665#endif
2666
2667        /* Get Status from Hardware */
2668        stat = REG_READ(&priv->regs->status);
2669        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2670                        (GRSPW_STS_TO | priv->stscfg);
2671
2672        /* Make sure to put the timecode handling first in order to get the
2673         * smallest possible interrupt latency
2674         */
2675        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2676                ctrl = REG_READ(&priv->regs->ctrl);
2677                if (ctrl & GRSPW_CTRL_TQ) {
2678                        /* Timecode received. Let custom function handle this */
2679                        timecode = REG_READ(&priv->regs->time) &
2680                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2681                        (priv->tcisr)(priv->tcisr_arg, timecode);
2682                }
2683        }
2684
2685        /* Get Interrupt status from hardware */
2686        icctrl = REG_READ(&priv->regs->icctrl);
2687        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2688                call_user_int_isr = 0;
2689                rxirq = rxack = intto = 0;
2690
2691                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2692                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2693                        call_user_int_isr = 1;
2694
2695                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2696                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2697                        call_user_int_isr = 1;
2698
2699                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2700                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2701                        call_user_int_isr = 1;                 
2702
2703                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2704                 * user function is called even if no such IRQ has happened!
2705                 * User must make sure to clear all interrupts that have been
2706                 * handled from the three registers by writing a one.
2707                 */
2708                if (call_user_int_isr)
2709                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2710        }
2711
2712        /* An Error occured? */
2713        if (stat & GRSPW_STAT_ERROR) {
2714                /* Wake Global WorkQ */
2715                handled = 1;
2716
2717                if (stat & GRSPW_STS_EE)
2718                        priv->stats.err_eeop++;
2719
2720                if (stat & GRSPW_STS_IA)
2721                        priv->stats.err_addr++;
2722
2723                if (stat & GRSPW_STS_PE)
2724                        priv->stats.err_parity++;
2725
2726                if (stat & GRSPW_STS_DE)
2727                        priv->stats.err_disconnect++;
2728
2729                if (stat & GRSPW_STS_ER)
2730                        priv->stats.err_escape++;
2731
2732                if (stat & GRSPW_STS_CE)
2733                        priv->stats.err_credit++;
2734
2735                if (stat & GRSPW_STS_WE)
2736                        priv->stats.err_wsync++;
2737
2738                if ((priv->dis_link_on_err >> 16) & stat) {
2739                        /* Disable the link, no more transfers are expected
2740                         * on any DMA channel.
2741                         */
2742                        SPIN_LOCK(&priv->devlock, irqflags);
2743                        ctrl = REG_READ(&priv->regs->ctrl);
2744                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2745                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2746                        SPIN_UNLOCK(&priv->devlock, irqflags);
2747                        /* Signal to work-thread to stop DMA and clean up */
2748                        message = WORK_SHUTDOWN;
2749                }
2750        }
2751
2752        /* Clear Status Flags */
2753        if (stat_clrmsk) {
2754                handled = 1;
2755                REG_WRITE(&priv->regs->status, stat_clrmsk);
2756        }
2757
2758        /* A DMA transfer or Error occured? In that case disable more IRQs
2759         * from the DMA channel, then invoke the workQ.
2760         *
2761         * Also the GI interrupt flag may not be available for older
2762         * designs where (was added together with mutiple DMA channels).
2763         */
2764        SPIN_LOCK(&priv->devlock, irqflags);
2765        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2766                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2767                /* Check for Errors and if Packets been sent or received if
2768                 * respective IRQ are enabled
2769                 */
2770#ifdef HW_WITH_GI
2771                if ( dma_stat & (GRSPW_DMA_STATUS_ERROR | GRSPW_DMACTRL_GI) ) {
2772#else
2773                if ( (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2774                     | GRSPW_DMA_STATUS_ERROR) & dma_stat ) {
2775#endif
2776                        /* Disable Further IRQs (until enabled again)
2777                         * from this DMA channel. Let the status
2778                         * bit remain so that they can be handled by
2779                         * work function.
2780                         */
2781                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2782                                ~(GRSPW_DMACTRL_RI|GRSPW_DMACTRL_TI|
2783                                GRSPW_DMACTRL_PR|GRSPW_DMACTRL_PS|
2784                                GRSPW_DMACTRL_RA|GRSPW_DMACTRL_TA|
2785                                GRSPW_DMACTRL_AT));
2786                        message |= WORK_DMA(i);
2787                        handled = 1;
2788                }
2789        }
2790        SPIN_UNLOCK(&priv->devlock, irqflags);
2791
2792        if (handled != 0)
2793                priv->stats.irq_cnt++;
2794
2795        /* Schedule work by sending message to work thread */
2796        if ((message != WORK_NONE) && grspw_work_queue) {
2797                message |= WORK_CORE(priv->index);
2798                stat = rtems_message_queue_send(grspw_work_queue, &message, 4);
2799                if (stat != RTEMS_SUCCESSFUL)
2800                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
2801                                priv->index, stat, message);
2802        }
2803}
2804
2805STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2806{
2807        unsigned int ctrl;
2808        struct grspw_dma_regs *dregs = dma->regs;
2809
2810        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2811               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2812        ctrl |= GRSPW_DMACTRL_AT;
2813        REG_WRITE(&dregs->ctrl, ctrl);
2814}
2815
2816STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2817{
2818        unsigned int ctrl;
2819        struct grspw_dma_regs *dregs = dma->regs;
2820
2821        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2822        REG_WRITE(&dregs->ctrl, ctrl);
2823
2824        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2825        REG_WRITE(&dregs->txdesc, 0);
2826        REG_WRITE(&dregs->rxdesc, 0);
2827}
2828
2829/* Hardware Action:
2830 *  - stop DMA
2831 *  - do not bring down the link (RMAP may be active)
2832 *  - RMAP settings untouched (RMAP may be active)
2833 *  - port select untouched (RMAP may be active)
2834 *  - timecodes are disabled
2835 *  - IRQ generation disabled
2836 *  - status not cleared (let user analyze it if requested later on)
2837 *  - Node address / First DMA channels Node address
2838 *    is untouched (RMAP may be active)
2839 */
2840STATIC void grspw_hw_stop(struct grspw_priv *priv)
2841{
2842        int i;
2843        unsigned int ctrl;
2844        IRQFLAGS_TYPE irqflags;
2845
2846        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2847
2848        for (i=0; i<priv->hwsup.ndma_chans; i++)
2849                grspw_hw_dma_stop(&priv->dma[i]);
2850
2851        ctrl = REG_READ(&priv->regs->ctrl);
2852        REG_WRITE(&priv->regs->ctrl, ctrl & (
2853                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2854                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2855                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2856
2857        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2858}
2859
2860/* Soft reset of GRSPW core registers */
2861STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2862{
2863        int i;
2864        unsigned int tmp;
2865
2866        for (i=0; i<priv->hwsup.ndma_chans; i++)
2867                grspw_hw_dma_softreset(&priv->dma[i]);
2868
2869        REG_WRITE(&priv->regs->status, 0xffffffff);
2870        REG_WRITE(&priv->regs->time, 0);
2871        /* Clear all but valuable reset values of ICCTRL */
2872        tmp = REG_READ(&priv->regs->icctrl);
2873        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2874        tmp |= GRSPW_ICCTRL_ID;
2875        REG_WRITE(&priv->regs->icctrl, tmp);
2876        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2877        REG_WRITE(&priv->regs->icack, 0xffffffff);
2878        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
2879}
2880
2881int grspw_dev_count(void)
2882{
2883        return grspw_count;
2884}
2885
2886void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2887{
2888        int i;
2889        struct grspw_priv *priv;
2890
2891        /* Set new Device Found Handler */
2892        grspw_dev_add = devfound;
2893        grspw_dev_del = devremove;
2894
2895        if (grspw_initialized == 1 && grspw_dev_add) {
2896                /* Call callback for every previously found device */
2897                for (i=0; i<grspw_count; i++) {
2898                        priv = priv_tab[i];
2899                        if (priv)
2900                                priv->data = grspw_dev_add(i);
2901                }
2902        }
2903}
2904
2905/******************* Driver manager interface ***********************/
2906
2907/* Driver prototypes */
2908static int grspw_common_init(void);
2909static int grspw2_init3(struct drvmgr_dev *dev);
2910
2911static struct drvmgr_drv_ops grspw2_ops =
2912{
2913        .init = {NULL,  NULL, grspw2_init3, NULL},
2914        .remove = NULL,
2915        .info = NULL
2916};
2917
2918static struct amba_dev_id grspw2_ids[] =
2919{
2920        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
2921        {VENDOR_GAISLER, GAISLER_SPW2},
2922        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
2923        {0, 0}          /* Mark end of table */
2924};
2925
2926static struct amba_drv_info grspw2_drv_info =
2927{
2928        {
2929                DRVMGR_OBJ_DRV,                 /* Driver */
2930                NULL,                           /* Next driver */
2931                NULL,                           /* Device list */
2932                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
2933                "GRSPW_PKT_DRV",                /* Driver Name */
2934                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
2935                &grspw2_ops,
2936                NULL,                           /* Funcs */
2937                0,                              /* No devices yet */
2938                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
2939        },
2940        &grspw2_ids[0]
2941};
2942
2943void grspw2_register_drv (void)
2944{
2945        GRSPW_DBG("Registering GRSPW2 packet driver\n");
2946        drvmgr_drv_register(&grspw2_drv_info.general);
2947}
2948
2949static int grspw2_init3(struct drvmgr_dev *dev)
2950{
2951        struct grspw_priv *priv;
2952        struct amba_dev_info *ambadev;
2953        struct ambapp_core *pnpinfo;
2954        int i, size;
2955        unsigned int ctrl, icctrl, numi;
2956        union drvmgr_key_value *value;
2957
2958        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
2959                dev->parent->dev->name);
2960
2961        if (grspw_count > GRSPW_MAX)
2962                return DRVMGR_ENORES;
2963
2964        priv = dev->priv;
2965        if (priv == NULL)
2966                return DRVMGR_NOMEM;
2967        priv->dev = dev;
2968
2969        /* If first device init common part of driver */
2970        if (grspw_common_init())
2971                return DRVMGR_FAIL;
2972
2973        /*** Now we take care of device initialization ***/
2974
2975        /* Get device information from AMBA PnP information */
2976        ambadev = (struct amba_dev_info *)dev->businfo;
2977        if (ambadev == NULL)
2978                return -1;
2979        pnpinfo = &ambadev->info;
2980        priv->irq = pnpinfo->irq;
2981        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
2982
2983        /* Read Hardware Support from Control Register */
2984        ctrl = REG_READ(&priv->regs->ctrl);
2985        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
2986        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
2987        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
2988        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
2989        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
2990        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
2991        icctrl = REG_READ(&priv->regs->icctrl);
2992        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
2993        if (numi > 0)
2994                priv->hwsup.irq_num = 1 << (numi - 1);
2995        else
2996                priv->hwsup.irq_num = 0;
2997
2998        /* Construct hardware version identification */
2999        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
3000
3001        if ((pnpinfo->device == GAISLER_SPW2) ||
3002            (pnpinfo->device == GAISLER_SPW2_DMA)) {
3003                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3004                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3005        } else {
3006                /* Autodetect GRSPW1 features? */
3007                priv->hwsup.strip_adr = 0;
3008                priv->hwsup.strip_pid = 0;
3009        }
3010
3011        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3012         * width... so only the first is probed, if no timer result will be
3013         * zero.
3014         */
3015        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3016        ctrl = REG_READ(&priv->regs->icrlpresc);
3017        REG_WRITE(&priv->regs->icrlpresc, 0);
3018        priv->hwsup.itmr_width = 0;
3019        while (ctrl & 1) {
3020                priv->hwsup.itmr_width++;
3021                ctrl = ctrl >> 1;
3022        }
3023
3024        /* Let user limit the number of DMA channels on this core to save
3025         * space. Only the first nDMA channels will be available.
3026         */
3027        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
3028        if (value && (value->i < priv->hwsup.ndma_chans))
3029                priv->hwsup.ndma_chans = value->i;
3030
3031        /* Allocate and init Memory for all DMA channels */
3032        size = sizeof(struct grspw_dma_priv) * priv->hwsup.ndma_chans;
3033        priv->dma = (struct grspw_dma_priv *) malloc(size);
3034        if (priv->dma == NULL)
3035                return DRVMGR_NOMEM;
3036        memset(priv->dma, 0, size);
3037        for (i=0; i<priv->hwsup.ndma_chans; i++) {
3038                priv->dma[i].core = priv;
3039                priv->dma[i].index = i;
3040                priv->dma[i].regs = &priv->regs->dma[i];
3041        }
3042
3043        /* Startup Action:
3044         *  - stop DMA
3045         *  - do not bring down the link (RMAP may be active)
3046         *  - RMAP settings untouched (RMAP may be active)
3047         *  - port select untouched (RMAP may be active)
3048         *  - timecodes are diabled
3049         *  - IRQ generation disabled
3050         *  - status cleared
3051         *  - Node address / First DMA channels Node address
3052         *    is untouched (RMAP may be active)
3053         */
3054        grspw_hw_stop(priv);
3055        grspw_hw_softreset(priv);
3056
3057        /* Register character device in registered region */
3058        priv->index = grspw_count;
3059        priv_tab[priv->index] = priv;
3060        grspw_count++;
3061
3062        /* Device name */
3063        sprintf(priv->devname, "grspw%d", priv->index);
3064
3065        /* Tell above layer about new device */
3066        if (grspw_dev_add)
3067                priv->data = grspw_dev_add(priv->index);
3068
3069        return DRVMGR_OK;
3070}
3071
3072/******************* Driver Implementation ***********************/
3073
3074static int grspw_common_init(void)
3075{
3076        if (grspw_initialized == 1)
3077                return 0;
3078        if (grspw_initialized == -1)
3079                return -1;
3080        grspw_initialized = -1;
3081
3082        /* Device Semaphore created with count = 1 */
3083        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3084            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3085            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3086            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3087                return -1;
3088
3089        /* Work queue, Work thread. Not created if user disables it.
3090         * user can disable it when interrupt is not used to save resources
3091         */
3092        if (grspw_work_task_priority != -1) {
3093                if (rtems_message_queue_create(
3094                    rtems_build_name('S', 'G', 'L', 'Q'), 32, 4, RTEMS_FIFO,
3095                    &grspw_work_queue) != RTEMS_SUCCESSFUL)
3096                        return -1;
3097
3098                if (rtems_task_create(rtems_build_name('S', 'G', 'L', 'T'),
3099                    grspw_work_task_priority, RTEMS_MINIMUM_STACK_SIZE,
3100                    RTEMS_PREEMPT | RTEMS_NO_ASR, RTEMS_NO_FLOATING_POINT,
3101                    &grspw_work_task) != RTEMS_SUCCESSFUL)
3102                        return -1;
3103
3104                if (rtems_task_start(grspw_work_task, grspw_work_func, 0) !=
3105                    RTEMS_SUCCESSFUL)
3106                        return -1;
3107}
3108
3109        grspw_initialized = 1;
3110        return 0;
3111}
Note: See TracBrowser for help on using the repository browser.