source: rtems/bsps/shared/grlib/spw/grspw_pkt.c @ 7eb606d3

5
Last change on this file since 7eb606d3 was 7eb606d3, checked in by Sebastian Huber <sebastian.huber@…>, on 12/22/18 at 17:31:04

grlib: Move source files

Update #3678.

  • Property mode set to 100644
File size: 89.9 KB
RevLine 
[0f49c0e]1/*
2 * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
3 *
4 * This driver can be used to implement a standard I/O system "char"-driver
[408fad3]5 * or used directly.
[0f49c0e]6 *
7 * COPYRIGHT (c) 2011
8 * Cobham Gaisler AB
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
[4a7d1026]12 * http://www.rtems.org/license/LICENSE.
[0f49c0e]13 */
14
15#include <rtems.h>
16#include <bsp.h>
17#include <rtems/libio.h>
18#include <stdlib.h>
19#include <stdio.h>
20#include <string.h>
21#include <assert.h>
22#include <ctype.h>
23#include <rtems/bspIo.h>
24
25#include <drvmgr/drvmgr.h>
[31720925]26#include <grlib/ambapp.h>
27#include <grlib/ambapp_bus.h>
28#include <grlib/grspw_pkt.h>
[0f49c0e]29
[31720925]30#include <grlib/grlib_impl.h>
[0f49c0e]31
32/*#define STATIC*/
33#define STATIC static
34
35/*#define GRSPW_DBG(args...) printk(args)*/
36#define GRSPW_DBG(args...)
37
38struct grspw_dma_regs {
39        volatile unsigned int ctrl;     /* DMA Channel Control */
40        volatile unsigned int rxmax;    /* RX Max Packet Length */
41        volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
42        volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
43        volatile unsigned int addr;     /* Address Register */
44        volatile unsigned int resv[3];
45};
46
47struct grspw_regs {
48        volatile unsigned int ctrl;
49        volatile unsigned int status;
50        volatile unsigned int nodeaddr;
51        volatile unsigned int clkdiv;
52        volatile unsigned int destkey;
53        volatile unsigned int time;
54        volatile unsigned int timer;    /* Used only in GRSPW1 */
55        volatile unsigned int resv1;
56
57        /* DMA Registers, ctrl.NCH determines number of ports,
58         * up to 4 channels are supported
59         */
60        struct grspw_dma_regs dma[4];
[56fc7809]61
62        volatile unsigned int icctrl;
63        volatile unsigned int icrx;
64        volatile unsigned int icack;
65        volatile unsigned int ictimeout;
66        volatile unsigned int ictickomask;
67        volatile unsigned int icaamask;
68        volatile unsigned int icrlpresc;
69        volatile unsigned int icrlisr;
70        volatile unsigned int icrlintack;
71        volatile unsigned int resv2;
72        volatile unsigned int icisr;
73        volatile unsigned int resv3;
[0f49c0e]74};
75
76/* GRSPW - Control Register - 0x00 */
77#define GRSPW_CTRL_RA_BIT       31
78#define GRSPW_CTRL_RX_BIT       30
79#define GRSPW_CTRL_RC_BIT       29
80#define GRSPW_CTRL_NCH_BIT      27
81#define GRSPW_CTRL_PO_BIT       26
[c2011b47]82#define GRSPW_CTRL_CC_BIT       25
[56fc7809]83#define GRSPW_CTRL_ID_BIT       24
84#define GRSPW_CTRL_LE_BIT       22
[0f49c0e]85#define GRSPW_CTRL_PS_BIT       21
86#define GRSPW_CTRL_NP_BIT       20
87#define GRSPW_CTRL_RD_BIT       17
88#define GRSPW_CTRL_RE_BIT       16
[56fc7809]89#define GRSPW_CTRL_TF_BIT       12
[0f49c0e]90#define GRSPW_CTRL_TR_BIT       11
91#define GRSPW_CTRL_TT_BIT       10
92#define GRSPW_CTRL_LI_BIT       9
93#define GRSPW_CTRL_TQ_BIT       8
94#define GRSPW_CTRL_RS_BIT       6
95#define GRSPW_CTRL_PM_BIT       5
96#define GRSPW_CTRL_TI_BIT       4
97#define GRSPW_CTRL_IE_BIT       3
98#define GRSPW_CTRL_AS_BIT       2
99#define GRSPW_CTRL_LS_BIT       1
100#define GRSPW_CTRL_LD_BIT       0
101
102#define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
103#define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
104#define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
105#define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
106#define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
[c2011b47]107#define GRSPW_CTRL_CC   (1<<GRSPW_CTRL_CC_BIT)
[56fc7809]108#define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
109#define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
[0f49c0e]110#define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
111#define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
112#define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
113#define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
[56fc7809]114#define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
[0f49c0e]115#define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
116#define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
117#define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
118#define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
119#define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
120#define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
121#define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
122#define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
123#define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
124#define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
125#define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
126
[56fc7809]127#define GRSPW_CTRL_IRQSRC_MASK \
128        (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
129#define GRSPW_ICCTRL_IRQSRC_MASK \
130        (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
131
132
[0f49c0e]133/* GRSPW - Status Register - 0x04 */
134#define GRSPW_STS_LS_BIT        21
135#define GRSPW_STS_AP_BIT        9
136#define GRSPW_STS_EE_BIT        8
137#define GRSPW_STS_IA_BIT        7
[56fc7809]138#define GRSPW_STS_WE_BIT        6       /* GRSPW1 */
[0f49c0e]139#define GRSPW_STS_PE_BIT        4
140#define GRSPW_STS_DE_BIT        3
141#define GRSPW_STS_ER_BIT        2
142#define GRSPW_STS_CE_BIT        1
143#define GRSPW_STS_TO_BIT        0
144
145#define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
146#define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
147#define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
148#define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
[56fc7809]149#define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
[0f49c0e]150#define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
151#define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
152#define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
153#define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
154#define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
155
156/* GRSPW - Default Address Register - 0x08 */
157#define GRSPW_DEF_ADDR_BIT      0
158#define GRSPW_DEF_MASK_BIT      8
159#define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
160#define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
161
162/* GRSPW - Clock Divisor Register - 0x0C */
163#define GRSPW_CLKDIV_START_BIT  8
164#define GRSPW_CLKDIV_RUN_BIT    0
165#define GRSPW_CLKDIV_START      (0xff<<GRSPW_CLKDIV_START_BIT)
166#define GRSPW_CLKDIV_RUN        (0xff<<GRSPW_CLKDIV_RUN_BIT)
167#define GRSPW_CLKDIV_MASK       (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
168
169/* GRSPW - Destination key Register - 0x10 */
170#define GRSPW_DK_DESTKEY_BIT    0
171#define GRSPW_DK_DESTKEY        (0xff<<GRSPW_DK_DESTKEY_BIT)
172
173/* GRSPW - Time Register - 0x14 */
[c119c0e]174#define GRSPW_TIME_CTRL_BIT     6
175#define GRSPW_TIME_CNT_BIT      0
176#define GRSPW_TIME_CTRL         (0x3<<GRSPW_TIME_CTRL_BIT)
177#define GRSPW_TIME_TCNT         (0x3f<<GRSPW_TIME_CNT_BIT)
[0f49c0e]178
179/* GRSPW - DMA Control Register - 0x20*N */
180#define GRSPW_DMACTRL_LE_BIT    16
181#define GRSPW_DMACTRL_SP_BIT    15
182#define GRSPW_DMACTRL_SA_BIT    14
183#define GRSPW_DMACTRL_EN_BIT    13
184#define GRSPW_DMACTRL_NS_BIT    12
185#define GRSPW_DMACTRL_RD_BIT    11
186#define GRSPW_DMACTRL_RX_BIT    10
187#define GRSPW_DMACTRL_AT_BIT    9
188#define GRSPW_DMACTRL_RA_BIT    8
189#define GRSPW_DMACTRL_TA_BIT    7
190#define GRSPW_DMACTRL_PR_BIT    6
191#define GRSPW_DMACTRL_PS_BIT    5
192#define GRSPW_DMACTRL_AI_BIT    4
193#define GRSPW_DMACTRL_RI_BIT    3
194#define GRSPW_DMACTRL_TI_BIT    2
195#define GRSPW_DMACTRL_RE_BIT    1
196#define GRSPW_DMACTRL_TE_BIT    0
197
198#define GRSPW_DMACTRL_LE        (1<<GRSPW_DMACTRL_LE_BIT)
199#define GRSPW_DMACTRL_SP        (1<<GRSPW_DMACTRL_SP_BIT)
200#define GRSPW_DMACTRL_SA        (1<<GRSPW_DMACTRL_SA_BIT)
201#define GRSPW_DMACTRL_EN        (1<<GRSPW_DMACTRL_EN_BIT)
202#define GRSPW_DMACTRL_NS        (1<<GRSPW_DMACTRL_NS_BIT)
203#define GRSPW_DMACTRL_RD        (1<<GRSPW_DMACTRL_RD_BIT)
204#define GRSPW_DMACTRL_RX        (1<<GRSPW_DMACTRL_RX_BIT)
205#define GRSPW_DMACTRL_AT        (1<<GRSPW_DMACTRL_AT_BIT)
206#define GRSPW_DMACTRL_RA        (1<<GRSPW_DMACTRL_RA_BIT)
207#define GRSPW_DMACTRL_TA        (1<<GRSPW_DMACTRL_TA_BIT)
208#define GRSPW_DMACTRL_PR        (1<<GRSPW_DMACTRL_PR_BIT)
209#define GRSPW_DMACTRL_PS        (1<<GRSPW_DMACTRL_PS_BIT)
210#define GRSPW_DMACTRL_AI        (1<<GRSPW_DMACTRL_AI_BIT)
211#define GRSPW_DMACTRL_RI        (1<<GRSPW_DMACTRL_RI_BIT)
212#define GRSPW_DMACTRL_TI        (1<<GRSPW_DMACTRL_TI_BIT)
213#define GRSPW_DMACTRL_RE        (1<<GRSPW_DMACTRL_RE_BIT)
214#define GRSPW_DMACTRL_TE        (1<<GRSPW_DMACTRL_TE_BIT)
215
216/* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
217#define GRSPW_DMARXLEN_MAX_BIT  0
218#define GRSPW_DMARXLEN_MAX      (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
219
220/* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
221#define GRSPW_DMAADR_ADDR_BIT   0
222#define GRSPW_DMAADR_MASK_BIT   8
223#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
224#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
225
[56fc7809]226/* GRSPW - Interrupt code receive register - 0xa4 */
227#define GRSPW_ICCTRL_INUM_BIT   27
228#define GRSPW_ICCTRL_IA_BIT     24
229#define GRSPW_ICCTRL_LE_BIT     23
230#define GRSPW_ICCTRL_PR_BIT     22
231#define GRSPW_ICCTRL_DQ_BIT     21 /* never used */
232#define GRSPW_ICCTRL_TQ_BIT     20
233#define GRSPW_ICCTRL_AQ_BIT     19
234#define GRSPW_ICCTRL_IQ_BIT     18
235#define GRSPW_ICCTRL_IR_BIT     17
236#define GRSPW_ICCTRL_IT_BIT     16
237#define GRSPW_ICCTRL_NUMI_BIT   13
238#define GRSPW_ICCTRL_BIRQ_BIT   8
239#define GRSPW_ICCTRL_ID_BIT     7
240#define GRSPW_ICCTRL_II_BIT     6
241#define GRSPW_ICCTRL_TXIRQ_BIT  0
[29c2304]242#define GRSPW_ICCTRL_INUM       (0x1f << GRSPW_ICCTRL_INUM_BIT)
[56fc7809]243#define GRSPW_ICCTRL_IA         (1 << GRSPW_ICCTRL_IA_BIT)
244#define GRSPW_ICCTRL_LE         (1 << GRSPW_ICCTRL_LE_BIT)
245#define GRSPW_ICCTRL_PR         (1 << GRSPW_ICCTRL_PR_BIT)
246#define GRSPW_ICCTRL_DQ         (1 << GRSPW_ICCTRL_DQ_BIT)
247#define GRSPW_ICCTRL_TQ         (1 << GRSPW_ICCTRL_TQ_BIT)
248#define GRSPW_ICCTRL_AQ         (1 << GRSPW_ICCTRL_AQ_BIT)
249#define GRSPW_ICCTRL_IQ         (1 << GRSPW_ICCTRL_IQ_BIT)
250#define GRSPW_ICCTRL_IR         (1 << GRSPW_ICCTRL_IR_BIT)
251#define GRSPW_ICCTRL_IT         (1 << GRSPW_ICCTRL_IT_BIT)
252#define GRSPW_ICCTRL_NUMI       (0x7 << GRSPW_ICCTRL_NUMI_BIT)
253#define GRSPW_ICCTRL_BIRQ       (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
254#define GRSPW_ICCTRL_ID         (1 << GRSPW_ICCTRL_ID_BIT)
255#define GRSPW_ICCTRL_II         (1 << GRSPW_ICCTRL_II_BIT)
256#define GRSPW_ICCTRL_TXIRQ      (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
257
[0f49c0e]258/* RX Buffer Descriptor */
259struct grspw_rxbd {
260   volatile unsigned int ctrl;
261   volatile unsigned int addr;
262};
263
264/* TX Buffer Descriptor */
265struct grspw_txbd {
266   volatile unsigned int ctrl;
267   volatile unsigned int haddr;
268   volatile unsigned int dlen;
269   volatile unsigned int daddr;
270};
271
272/* GRSPW - DMA RXBD Ctrl */
273#define GRSPW_RXBD_LEN_BIT 0
274#define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
275#define GRSPW_RXBD_EN   (1<<25)
276#define GRSPW_RXBD_WR   (1<<26)
277#define GRSPW_RXBD_IE   (1<<27)
278#define GRSPW_RXBD_EP   (1<<28)
279#define GRSPW_RXBD_HC   (1<<29)
280#define GRSPW_RXBD_DC   (1<<30)
281#define GRSPW_RXBD_TR   (1<<31)
282
283#define GRSPW_TXBD_HLEN (0xff<<0)
284#define GRSPW_TXBD_NCL  (0xf<<8)
285#define GRSPW_TXBD_EN   (1<<12)
286#define GRSPW_TXBD_WR   (1<<13)
287#define GRSPW_TXBD_IE   (1<<14)
288#define GRSPW_TXBD_LE   (1<<15)
289#define GRSPW_TXBD_HC   (1<<16)
290#define GRSPW_TXBD_DC   (1<<17)
291
292#define GRSPW_DMAADR_MASK_BIT   8
293#define GRSPW_DMAADR_ADDR       (0xff<<GRSPW_DMAADR_ADDR_BIT)
294#define GRSPW_DMAADR_MASK       (0xff<<GRSPW_DMAADR_MASK_BIT)
295
296
297/* GRSPW Error Condition */
298#define GRSPW_STAT_ERROR        (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
299#define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
300/* GRSPW Link configuration options */
301#define GRSPW_LINK_CFG          (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
302#define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
303
304/* Software Defaults */
305#define DEFAULT_RXMAX 1024      /* 1 KBytes Max RX Packet Size */
306
307/* GRSPW Constants */
308#define GRSPW_TXBD_NR 64        /* Maximum number of TX Descriptors */
309#define GRSPW_RXBD_NR 128       /* Maximum number of RX Descriptors */
[1ef9caa2]310#define GRSPW_TXBD_SIZE 16      /* Size in bytes of one TX descriptor */
311#define GRSPW_RXBD_SIZE 8       /* Size in bytes of one RX descriptor */
[0f49c0e]312#define BDTAB_SIZE 0x400        /* BD Table Size (RX or TX) */
313#define BDTAB_ALIGN 0x400       /* BD Table Alignment Requirement */
314
315/* Memory and HW Registers Access routines. All 32-bit access routines */
316#define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
317/*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
318#define BD_READ(addr) leon_r32_no_cache((unsigned long)(addr))
319#define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
320#define REG_READ(addr) (*(volatile unsigned int *)(addr))
321
322struct grspw_ring {
323        struct grspw_ring *next;        /* Next Descriptor */
324        union {
325                struct grspw_txbd *tx;  /* Descriptor Address */
326                struct grspw_rxbd *rx;  /* Descriptor Address */
327        } bd;
328        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
329};
330
331/* An entry in the TX descriptor Ring */
332struct grspw_txring {
333        struct grspw_txring *next;      /* Next Descriptor */
334        struct grspw_txbd *bd;          /* Descriptor Address */
335        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
336};
337
338/* An entry in the RX descriptor Ring */
339struct grspw_rxring {
340        struct grspw_rxring *next;      /* Next Descriptor */
341        struct grspw_rxbd *bd;          /* Descriptor Address */
342        struct grspw_pkt *pkt;          /* Packet description associated.NULL if none*/
343};
344
345
346struct grspw_dma_priv {
347        struct grspw_priv *core;        /* GRSPW Core */
348        struct grspw_dma_regs *regs;    /* DMA Channel Registers */
349        int index;                      /* DMA Channel Index @ GRSPW core */
350        int open;                       /* DMA Channel opened by user */
351        int started;                    /* DMA Channel activity (start|stop) */
[0d31dcc]352        rtems_id sem_rxdma;             /* DMA Channel RX Semaphore */
353        rtems_id sem_txdma;             /* DMA Channel TX Semaphore */
[0f49c0e]354        struct grspw_dma_stats stats;   /* DMA Channel Statistics */
355        struct grspw_dma_config cfg;    /* DMA Channel Configuration */
356
357        /*** RX ***/
358
359        /* RX Descriptor Ring */
360        struct grspw_rxbd *rx_bds;              /* Descriptor Address */
361        struct grspw_rxbd *rx_bds_hwa;          /* Descriptor HW Address */
362        struct grspw_rxring *rx_ring_base;
363        struct grspw_rxring *rx_ring_head;      /* Next descriptor to enable */
364        struct grspw_rxring *rx_ring_tail;      /* Oldest enabled Descriptor */
365        int rx_irq_en_cnt_curr;
366        struct {
367                int waiting;
368                int ready_cnt;
369                int op;
370                int recv_cnt;
371                rtems_id sem_wait;              /* RX Semaphore used to implement RX blocking */
372        } rx_wait;
373
374        /* Queue of Packets READY to be scheduled */
375        struct grspw_list ready;
376        int ready_cnt;
377
378        /* Scheduled RX Packets Queue */
379        struct grspw_list rx_sched;
380        int rx_sched_cnt;
381
382        /* Queue of Packets that has been RECIEVED */
383        struct grspw_list recv;
384        int recv_cnt;
385
386
387        /*** TX ***/
388
389        /* TX Descriptor Ring */
390        struct grspw_txbd *tx_bds;              /* Descriptor Address */
391        struct grspw_txbd *tx_bds_hwa;          /* Descriptor HW Address */
392        struct grspw_txring *tx_ring_base;
393        struct grspw_txring *tx_ring_head;
394        struct grspw_txring *tx_ring_tail;
395        int tx_irq_en_cnt_curr;
396        struct {
397                int waiting;
398                int send_cnt;
399                int op;
400                int sent_cnt;
401                rtems_id sem_wait;              /* TX Semaphore used to implement TX blocking */
402        } tx_wait;
403
404        /* Queue of Packets ready to be scheduled for transmission */
405        struct grspw_list send;
406        int send_cnt;
407
408        /* Scheduled TX Packets Queue */
409        struct grspw_list tx_sched;
410        int tx_sched_cnt;
411
412        /* Queue of Packets that has been SENT */
413        struct grspw_list sent;
414        int sent_cnt;
415};
416
417struct grspw_priv {
418        char devname[8];                /* Device name "grspw%d" */
419        struct drvmgr_dev *dev;         /* Device */
420        struct grspw_regs *regs;        /* Virtual Address of APB Registers */
421        int irq;                        /* AMBA IRQ number of core */
422        int index;                      /* Index in order it was probed */
423        int core_index;                 /* Core Bus Index */
424        int open;                       /* If Device is alrady opened (=1) or not (=0) */
425        void *data;                     /* User private Data for this device instance, set by grspw_initialize_user */
426
427        /* Features supported by Hardware */
428        struct grspw_hw_sup hwsup;
429
430        /* Pointer to an array of Maximally 4 DMA Channels */
431        struct grspw_dma_priv *dma;
432
433        /* Spin-lock ISR protection */
434        SPIN_DECLARE(devlock);
435
436        /* Descriptor Memory Area for TX & RX and all DMA channels */
437        unsigned int bd_mem;
438        unsigned int bd_mem_alloced;
439
440        /*** Time Code Handling ***/
441        void (*tcisr)(void *data, int timecode);
442        void *tcisr_arg;
443
[56fc7809]444        /*** Interrupt-code Handling ***/
445        spwpkt_ic_isr_t icisr;
446        void *icisr_arg;
447
[49cf776e]448        /* Bit mask representing events which shall cause link disable. */
449        unsigned int dis_link_on_err;
[0f49c0e]450
[ac7da5bc]451        /* Bit mask for link status bits to clear by ISR */
452        unsigned int stscfg;
453
[ab9b447]454        /*** Message Queue Handling ***/
455        struct grspw_work_config wc;
456
[0f49c0e]457        /* "Core Global" Statistics gathered, not dependent on DMA channel */
458        struct grspw_core_stats stats;
459};
460
461int grspw_initialized = 0;
462int grspw_count = 0;
463rtems_id grspw_sem;
464static struct grspw_priv *priv_tab[GRSPW_MAX];
465
466/* callback to upper layer when devices are discovered/removed */
467void *(*grspw_dev_add)(int) = NULL;
468void (*grspw_dev_del)(int,void*) = NULL;
469
[ab9b447]470/* Defaults to do nothing - user can override this function.
471 * Called from work-task.
472 */
473void __attribute__((weak)) grspw_work_event(
474        enum grspw_worktask_ev ev,
475        unsigned int msg)
476{
477
478}
479
[0f49c0e]480/* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
481 * the work-task and work-queue to save space.
482 */
483int grspw_work_task_priority __attribute__((weak)) = 100;
484rtems_id grspw_work_task;
[ab9b447]485static struct grspw_work_config grspw_wc_def;
[0f49c0e]486
487STATIC void grspw_hw_stop(struct grspw_priv *priv);
488STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
489STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
490STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
491STATIC void grspw_isr(void *data);
492
493void *grspw_open(int dev_no)
494{
495        struct grspw_priv *priv;
496        unsigned int bdtabsize, hwa;
497        int i;
498        union drvmgr_key_value *value;
499
500        if (grspw_initialized != 1 || (dev_no >= grspw_count))
501                return NULL;
502
503        priv = priv_tab[dev_no];
504
505        /* Take GRSPW lock - Wait until we get semaphore */
506        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
507            != RTEMS_SUCCESSFUL)
508                return NULL;
509
510        if (priv->open) {
511                priv = NULL;
512                goto out;
513        }
514
515        /* Initialize Spin-lock for GRSPW Device. This is to protect
516         * CTRL and DMACTRL registers from ISR.
517         */
[fec8288]518        SPIN_INIT(&priv->devlock, priv->devname);
[0f49c0e]519
520        priv->tcisr = NULL;
521        priv->tcisr_arg = NULL;
[56fc7809]522        priv->icisr = NULL;
523        priv->icisr_arg = NULL;
[ac7da5bc]524        priv->stscfg = LINKSTS_MASK;
[0f49c0e]525
[ab9b447]526        /* Default to common work queue and message queue, if not created
527         * during initialization then its disabled.
528         */
529        grspw_work_cfg(priv, &grspw_wc_def);
530
[0f49c0e]531        grspw_stats_clr(priv);
532
533        /* Allocate TX & RX Descriptor memory area for all DMA
534         * channels. Max-size descriptor area is allocated (or user assigned):
535         *  - 128 RX descriptors per DMA Channel
536         *  - 64 TX descriptors per DMA Channel
[c442647f]537         * Specified address must be in CPU RAM.
[0f49c0e]538         */
539        bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
[4d3e70f4]540        value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
[0f49c0e]541        if (value) {
542                priv->bd_mem = value->i;
543                priv->bd_mem_alloced = 0;
544                if (priv->bd_mem & (BDTAB_ALIGN-1)) {
545                        GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
546                                  priv->index);
547                        priv = NULL;
548                        goto out;
549                }
550        } else {
[11f3b9a]551                priv->bd_mem_alloced = (unsigned int)grlib_malloc(bdtabsize + BDTAB_ALIGN - 1);
[0f49c0e]552                if (priv->bd_mem_alloced == 0) {
553                        priv = NULL;
554                        goto out;
555                }
556                /* Align memory */
557                priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
558                               ~(BDTAB_ALIGN-1);
559        }
560
561        /* Translate into DMA address that HW can use to access DMA
562         * descriptors
563         */
564        drvmgr_translate_check(
565                priv->dev,
566                CPUMEM_TO_DMA,
567                (void *)priv->bd_mem,
568                (void **)&hwa,
569                bdtabsize);
570
571        GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
572                priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
573        for (i=0; i<priv->hwsup.ndma_chans; i++) {
574                /* Do DMA Channel Init, other variables etc. are inited
575                 * when respective DMA channel is opened.
576                 *
577                 * index & core are initialized by probe function.
578                 */
579                priv->dma[i].open = 0;
580                priv->dma[i].rx_bds = (struct grspw_rxbd *)
581                        (priv->bd_mem + i*BDTAB_SIZE*2);
582                priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
583                        (hwa + BDTAB_SIZE*(2*i));
584                priv->dma[i].tx_bds = (struct grspw_txbd *)
585                        (priv->bd_mem + BDTAB_SIZE*(2*i+1));
586                priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
587                        (hwa + BDTAB_SIZE*(2*i+1));
588                GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
589                        i,
590                        priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
591                        priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
592                        priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
593                        priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
594        }
595
596        /* Basic initialization of hardware, clear some registers but
597         * keep Link/RMAP/Node-Address registers intact.
598         */
599        grspw_hw_stop(priv);
600
601        /* Register Interrupt handler and enable IRQ at IRQ ctrl */
602        drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
603
604        /* Take the device */
605        priv->open = 1;
606out:
607        rtems_semaphore_release(grspw_sem);
608        return priv;
609}
610
[eb5a42f6]611int grspw_close(void *d)
[0f49c0e]612{
613        struct grspw_priv *priv = d;
614        int i;
615
616        /* Take GRSPW lock - Wait until we get semaphore */
617        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
618            != RTEMS_SUCCESSFUL)
[eb5a42f6]619                return -1;
[0f49c0e]620
[eb5a42f6]621        /* Check that user has stopped and closed all DMA channels
622         * appropriately. At this point the Hardware shall not be doing DMA
623         * or generating Interrupts. We want HW in a "startup-state".
[0f49c0e]624         */
[eb5a42f6]625        for (i=0; i<priv->hwsup.ndma_chans; i++) {
626                if (priv->dma[i].open) {
627                        rtems_semaphore_release(grspw_sem);
628                        return 1;
629                }
630        }
[0f49c0e]631        grspw_hw_stop(priv);
632
[36fc8b18]633        /* Uninstall Interrupt handler */
634        drvmgr_interrupt_unregister(priv->dev, 0, grspw_isr, priv);
635
[029ce4d1]636        /* Free descriptor table memory if allocated using malloc() */
637        if (priv->bd_mem_alloced) {
638                free((void *)priv->bd_mem_alloced);
639                priv->bd_mem_alloced = 0;
640        }
641
[0f49c0e]642        /* Mark not open */
643        priv->open = 0;
644        rtems_semaphore_release(grspw_sem);
[eb5a42f6]645        return 0;
[0f49c0e]646}
647
648void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
649{
650        struct grspw_priv *priv = d;
651
652        *hw = priv->hwsup;
653}
654
655void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
656{
657        struct grspw_priv *priv = d;
658        struct grspw_regs *regs = priv->regs;
659        unsigned int ctrl, nodeaddr;
[fec8288]660        SPIN_IRQFLAGS(irqflags);
[0f49c0e]661        int i;
662
663        if (!priv || !cfg)
664                return;
665
666        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
667
668        if (cfg->promiscuous != -1) {
669                /* Set Configuration */
670                ctrl = REG_READ(&regs->ctrl);
671                if (cfg->promiscuous)
672                        ctrl |= GRSPW_CTRL_PM;
673                else
674                        ctrl &= ~GRSPW_CTRL_PM;
675                REG_WRITE(&regs->ctrl, ctrl);
676                REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
677
678                for (i=0; i<priv->hwsup.ndma_chans; i++) {
679                        ctrl = REG_READ(&regs->dma[i].ctrl);
680                        ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
681                        if (cfg->dma_nacfg[i].node_en) {
682                                ctrl |= GRSPW_DMACTRL_EN;
683                                REG_WRITE(&regs->dma[i].addr,
684                                          (cfg->dma_nacfg[i].node_addr & 0xff) |
685                                          ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
686                        } else {
687                                ctrl &= ~GRSPW_DMACTRL_EN;
688                        }
689                        REG_WRITE(&regs->dma[i].ctrl, ctrl);
690                }
691        }
692
693        /* Read Current Configuration */
694        cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
695        nodeaddr = REG_READ(&regs->nodeaddr);
696        cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
697        cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
698        for (i=0; i<priv->hwsup.ndma_chans; i++) {
699                cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
700                                                GRSPW_DMACTRL_EN;
701                ctrl = REG_READ(&regs->dma[i].addr);
702                cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
703                                                GRSPW_DMAADR_ADDR_BIT;
704                cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
705                                                GRSPW_DMAADR_MASK_BIT;
706        }
707        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
708        for (; i<4; i++) {
709                cfg->dma_nacfg[i].node_en = 0;
710                cfg->dma_nacfg[i].node_addr = 0;
711                cfg->dma_nacfg[i].node_mask = 0;
712        }
713}
714
[72ec13ef]715/* Return Current DMA CTRL/Status Register */
716unsigned int grspw_dma_ctrlsts(void *c)
717{
718        struct grspw_dma_priv *dma = c;
719
720        return REG_READ(&dma->regs->ctrl);
721}
722
[0f49c0e]723/* Return Current Status Register */
724unsigned int grspw_link_status(void *d)
725{
726        struct grspw_priv *priv = d;
727
728        return REG_READ(&priv->regs->status);
729}
730
[fad4324d]731/* Clear Status Register bits */
732void grspw_link_status_clr(void *d, unsigned int mask)
733{
734        struct grspw_priv *priv = d;
735
736        REG_WRITE(&priv->regs->status, mask);
737}
738
[0f49c0e]739/* Return Current Link State */
740spw_link_state_t grspw_link_state(void *d)
741{
742        struct grspw_priv *priv = d;
743        unsigned int status = REG_READ(&priv->regs->status);
744
745        return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
746}
747
[56fc7809]748/* Enable Global IRQ only if some irq source is set */
749static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
750{
751        return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
752                (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
753}
754
755
[0f49c0e]756/* options and clkdiv [in/out]: set to -1 to only read current config */
[ac7da5bc]757void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
[0f49c0e]758{
759        struct grspw_priv *priv = d;
760        struct grspw_regs *regs = priv->regs;
761        unsigned int ctrl;
[fec8288]762        SPIN_IRQFLAGS(irqflags);
[0f49c0e]763
764        /* Write? */
765        if (clkdiv) {
766                if (*clkdiv != -1)
767                        REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
768                *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
769        }
770        if (options) {
771                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
772                ctrl = REG_READ(&regs->ctrl);
773                if (*options != -1) {
774                        ctrl = (ctrl & ~GRSPW_LINK_CFG) |
775                                (*options & GRSPW_LINK_CFG);
776
[56fc7809]777                        /* Enable Global IRQ only if some irq source is set */
778                        if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]779                                ctrl |= GRSPW_CTRL_IE;
780                        else
781                                ctrl &= ~GRSPW_CTRL_IE;
782
783                        REG_WRITE(&regs->ctrl, ctrl);
[49cf776e]784                        /* Store the link disable events for use in
785                        ISR. The LINKOPTS_DIS_ON_* options are actually the
786                        corresponding bits in the status register, shifted
787                        by 16. */
788                        priv->dis_link_on_err = *options &
789                                (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
[0f49c0e]790                }
791                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
[49cf776e]792                *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
[0f49c0e]793        }
[ac7da5bc]794        if (stscfg) {
795                if (*stscfg != -1) {
796                        priv->stscfg = *stscfg & LINKSTS_MASK;
797                }
798                *stscfg = priv->stscfg;
799        }
[0f49c0e]800}
801
802/* Generate Tick-In (increment Time Counter, Send Time Code) */
803void grspw_tc_tx(void *d)
804{
805        struct grspw_priv *priv = d;
806        struct grspw_regs *regs = priv->regs;
[fec8288]807        SPIN_IRQFLAGS(irqflags);
[0f49c0e]808
809        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
810        REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
811        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
812}
813
814void grspw_tc_ctrl(void *d, int *options)
815{
816        struct grspw_priv *priv = d;
817        struct grspw_regs *regs = priv->regs;
818        unsigned int ctrl;
[fec8288]819        SPIN_IRQFLAGS(irqflags);
[0f49c0e]820
821        if (options == NULL)
822                return;
823
824        /* Write? */
825        if (*options != -1) {
826                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
827                ctrl = REG_READ(&regs->ctrl);
828                ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
829                ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
830
[56fc7809]831                /* Enable Global IRQ only if some irq source is set */
832                if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
[0f49c0e]833                        ctrl |= GRSPW_CTRL_IE;
[56fc7809]834                else
[0f49c0e]835                        ctrl &= ~GRSPW_CTRL_IE;
836
837                REG_WRITE(&regs->ctrl, ctrl);
838                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
839        } else
840                ctrl = REG_READ(&regs->ctrl);
841        *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
842}
843
844/* Assign ISR Function to TimeCode RX IRQ */
845void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
846{
847        struct grspw_priv *priv = d;
848
849        priv->tcisr_arg = data;
850        priv->tcisr = tcisr;
851}
852
853/* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
854 * TCTRL   = bits 7 and 6
855 * TIMECNT = bits 5 to 0
856 */
857void grspw_tc_time(void *d, int *time)
[56fc7809]858{
[c442647f]859        struct grspw_priv *priv = d;
860        struct grspw_regs *regs = priv->regs;
861
862        if (time == NULL)
863                return;
864        if (*time != -1)
865                REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
866        *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
[56fc7809]867}
868
869/* Generate Tick-In for the given Interrupt-code and check for generation
870 * error.
871 *
872 * Returns zero on success and non-zero on failure
873 */
874int grspw_ic_tickin(void *d, int ic)
875{
876        struct grspw_priv *priv = d;
877        struct grspw_regs *regs = priv->regs;
[fec8288]878        SPIN_IRQFLAGS(irqflags);
[56fc7809]879        unsigned int icctrl, mask;
880
881        /* Prepare before turning off IRQ */
882        mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
883        ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
884             GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
885
886        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
887        icctrl = REG_READ(&regs->icctrl);
888        icctrl &= ~mask;
889        icctrl |= ic;
890        REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
891        /* the ID bit is valid after two clocks, so we not to wait here */
892        icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
893        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
894
895        return icctrl & GRSPW_ICCTRL_ID;
896}
897
898#define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
899#define ICOPTS_ICCTRL_MASK                                              \
900        (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
901         ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
902         ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
903         ICOPTS_BASEIRQ)
904
905/* Control Interrupt-code settings of core
906 * Write if not pointing to -1, always read current value
907 *
908 * TODO: A lot of code duplication with grspw_tc_ctrl
909 */
910void grspw_ic_ctrl(void *d, unsigned int *options)
911{
912        struct grspw_priv *priv = d;
913        struct grspw_regs *regs = priv->regs;
914        unsigned int ctrl;
915        unsigned int icctrl;
[fec8288]916        SPIN_IRQFLAGS(irqflags);
[56fc7809]917
918        if (options == NULL)
919                return;
920
921        if (*options != -1) {
922                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
923
924                ctrl = REG_READ(&regs->ctrl);
925                ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
926                                         * irqopts bits and ctrl bits */
927                ctrl |= (*options & ICOPTS_CTRL_MASK) <<
928                        (GRSPW_CTRL_TF_BIT - 0);
929
930                icctrl = REG_READ(&regs->icctrl);
931                icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
932                                                * irqopts bits and icctrl bits */
933                icctrl |= *options & ICOPTS_ICCTRL_MASK;
934
935                /* Enable Global IRQ only if some irq source is set */
936                if (grspw_is_irqsource_set(ctrl, icctrl))
937                        ctrl |= GRSPW_CTRL_IE;
938                else
939                        ctrl &= ~GRSPW_CTRL_IE;
940
941                REG_WRITE(&regs->ctrl, ctrl);
942                REG_WRITE(&regs->icctrl, icctrl);
943                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
944        }
945        *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
946                    (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
947}
948
949void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
[0f49c0e]950{
951        struct grspw_priv *priv = d;
952        struct grspw_regs *regs = priv->regs;
953
[56fc7809]954        if (!cfg)
[0f49c0e]955                return;
[56fc7809]956
957        if (rw & 1) {
958                REG_WRITE(&regs->ictickomask, cfg->tomask);
959                REG_WRITE(&regs->icaamask, cfg->aamask);
960                REG_WRITE(&regs->icrlpresc, cfg->scaler);
961                REG_WRITE(&regs->icrlisr, cfg->isr_reload);
962                REG_WRITE(&regs->icrlintack, cfg->ack_reload);
963        }
964        if (rw & 2) {
965                cfg->tomask = REG_READ(&regs->ictickomask);
966                cfg->aamask = REG_READ(&regs->icaamask);
967                cfg->scaler = REG_READ(&regs->icrlpresc);
968                cfg->isr_reload = REG_READ(&regs->icrlisr);
969                cfg->ack_reload = REG_READ(&regs->icrlintack);
970        }
971}
972
973/* Read or Write Interrupt-code status registers */
974void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
975{
976        struct grspw_priv *priv = d;
977        struct grspw_regs *regs = priv->regs;
978
979        /* No locking needed since the status bits are clear-on-write */
980
981        if (rxirq) {
982                if (*rxirq != 0)
983                        REG_WRITE(&regs->icrx, *rxirq);
984                else
985                        *rxirq = REG_READ(&regs->icrx);
986        }
987
988        if (rxack) {
989                if (*rxack != 0)
990                        REG_WRITE(&regs->icack, *rxack);
991                else
992                        *rxack = REG_READ(&regs->icack);
993        }
994
995        if (intto) {
996                if (*intto != 0)
997                        REG_WRITE(&regs->ictimeout, *intto);
998                else
999                        *intto = REG_READ(&regs->ictimeout);
1000        }
1001}
1002
1003/* Assign handler function to Interrupt-code tick out IRQ */
1004void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1005{
1006        struct grspw_priv *priv = d;
1007
1008        priv->icisr_arg = data;
1009        priv->icisr = handler;
[0f49c0e]1010}
1011
1012/* Set (not -1) and/or read RMAP options. */
1013int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1014{
1015        struct grspw_priv *priv = d;
1016        struct grspw_regs *regs = priv->regs;
1017        unsigned int ctrl;
[fec8288]1018        SPIN_IRQFLAGS(irqflags);
[0f49c0e]1019
1020        if (dstkey) {
1021                if (*dstkey != -1)
1022                        REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1023                *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1024        }
1025        if (options) {
1026                if (*options != -1) {
1027                        if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1028                                return -1;
1029
1030
1031                        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1032                        ctrl = REG_READ(&regs->ctrl);
1033                        ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1034                        ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1035                        REG_WRITE(&regs->ctrl, ctrl);
1036                        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1037                }
1038                *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1039        }
1040
1041        return 0;
1042}
1043
1044void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1045{
1046        struct grspw_priv *priv = d;
1047
1048        if (rmap)
1049                *rmap = priv->hwsup.rmap;
1050        if (rmap_crc)
1051                *rmap_crc = priv->hwsup.rmap_crc;
1052}
1053
1054/* Select port, if
1055 * -1=The current selected port is returned
1056 * 0=Port 0
1057 * 1=Port 1
1058 * Others=Both Port0 and Port1
1059 */
1060int grspw_port_ctrl(void *d, int *port)
1061{
1062        struct grspw_priv *priv = d;
1063        struct grspw_regs *regs = priv->regs;
1064        unsigned int ctrl;
[fec8288]1065        SPIN_IRQFLAGS(irqflags);
[0f49c0e]1066
1067        if (port == NULL)
1068                return -1;
1069
1070        if ((*port == 1) || (*port == 0)) {
1071                /* Select port user selected */
1072                if ((*port == 1) && (priv->hwsup.nports < 2))
1073                        return -1; /* Changing to Port 1, but only one port available */
1074                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1075                ctrl = REG_READ(&regs->ctrl);
1076                ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1077                ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1078                REG_WRITE(&regs->ctrl, ctrl);
1079                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1080        } else if (*port > 1) {
1081                /* Select both ports */
1082                SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1083                REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1084                SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1085        }
1086
1087        /* Get current settings */
1088        ctrl = REG_READ(&regs->ctrl);
1089        if (ctrl & GRSPW_CTRL_NP) {
1090                /* Any port, selected by hardware */
1091                if (priv->hwsup.nports > 1)
1092                        *port = 3;
1093                else
1094                        *port = 0; /* Port0 the only port available */
1095        } else {
1096                *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1097        }
1098
1099        return 0;
1100}
1101
1102/* Returns Number ports available in hardware */
1103int grspw_port_count(void *d)
1104{
1105        struct grspw_priv *priv = d;
1106
1107        return priv->hwsup.nports;
1108}
1109
1110/* Current active port: 0 or 1 */
1111int grspw_port_active(void *d)
1112{
1113        struct grspw_priv *priv = d;
1114        unsigned int status;
1115
1116        status = REG_READ(&priv->regs->status);
1117
1118        return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1119}
1120
1121void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1122{
1123        struct grspw_priv *priv = d;
1124
1125        if (sts == NULL)
1126                return;
1127        memcpy(sts, &priv->stats, sizeof(priv->stats));
1128}
1129
1130void grspw_stats_clr(void *d)
1131{
1132        struct grspw_priv *priv = d;
1133
1134        /* Clear most of the statistics */     
1135        memset(&priv->stats, 0, sizeof(priv->stats));
1136}
1137
1138/*** DMA Interface ***/
1139
1140/* Initialize the RX and TX Descriptor Ring, empty of packets */
1141STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1142{
1143        struct grspw_ring *r;
1144        int i;
1145
1146        /* Empty BD rings */
1147        dma->rx_ring_head = dma->rx_ring_base;
1148        dma->rx_ring_tail = dma->rx_ring_base;
1149        dma->tx_ring_head = dma->tx_ring_base;
1150        dma->tx_ring_tail = dma->tx_ring_base;
1151
1152        /* Init RX Descriptors */
1153        r = (struct grspw_ring *)dma->rx_ring_base;
1154        for (i=0; i<GRSPW_RXBD_NR; i++) {
1155
1156                /* Init Ring Entry */
1157                r[i].next = &r[i+1];
1158                r[i].bd.rx = &dma->rx_bds[i];
1159                r[i].pkt = NULL;
1160
1161                /* Init HW Descriptor */
1162                BD_WRITE(&r[i].bd.rx->ctrl, 0);
1163                BD_WRITE(&r[i].bd.rx->addr, 0);
1164        }
1165        r[GRSPW_RXBD_NR-1].next = &r[0];
1166
1167        /* Init TX Descriptors */
1168        r = (struct grspw_ring *)dma->tx_ring_base;
1169        for (i=0; i<GRSPW_TXBD_NR; i++) {
1170
1171                /* Init Ring Entry */
1172                r[i].next = &r[i+1];
1173                r[i].bd.tx = &dma->tx_bds[i];
1174                r[i].pkt = NULL;
1175
1176                /* Init HW Descriptor */
1177                BD_WRITE(&r[i].bd.tx->ctrl, 0);
1178                BD_WRITE(&r[i].bd.tx->haddr, 0);
1179                BD_WRITE(&r[i].bd.tx->dlen, 0);
1180                BD_WRITE(&r[i].bd.tx->daddr, 0);
1181        }
1182        r[GRSPW_TXBD_NR-1].next = &r[0];
1183}
1184
1185/* Try to populate descriptor ring with as many as possible READY unused packet
1186 * buffers. The packets assigned with to a descriptor are put in the end of
1187 * the scheduled list.
1188 *
1189 * The number of Packets scheduled is returned.
1190 *
1191 *  - READY List -> RX-SCHED List
1192 *  - Descriptors are initialized and enabled for reception
1193 */
1194STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1195{
1196        int cnt;
1197        unsigned int ctrl, dmactrl;
1198        void *hwaddr;
1199        struct grspw_rxring *curr_bd;
1200        struct grspw_pkt *curr_pkt, *last_pkt;
1201        struct grspw_list lst;
[fec8288]1202        SPIN_IRQFLAGS(irqflags);
[0f49c0e]1203
1204        /* Is Ready Q empty? */
1205        if (grspw_list_is_empty(&dma->ready))
1206                return 0;
1207
1208        cnt = 0;
1209        lst.head = curr_pkt = dma->ready.head;
1210        curr_bd = dma->rx_ring_head;
1211        while (!curr_bd->pkt) {
1212
1213                /* Assign Packet to descriptor */
1214                curr_bd->pkt = curr_pkt;
1215
1216                /* Prepare descriptor address. */
1217                hwaddr = curr_pkt->data;
1218                if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1219                        drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1220                                         hwaddr, &hwaddr);
1221                        if (curr_pkt->data == hwaddr) /* translation needed? */
1222                                curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1223                }
1224                BD_WRITE(&curr_bd->bd->addr, hwaddr);
1225
1226                ctrl = GRSPW_RXBD_EN;
1227                if (curr_bd->next == dma->rx_ring_base) {
1228                        /* Wrap around (only needed when smaller descriptor
1229                         * table)
1230                         */
1231                        ctrl |= GRSPW_RXBD_WR;
1232                }
1233
1234                /* Is this Packet going to be an interrupt Packet? */
1235                if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1236                        if (dma->cfg.rx_irq_en_cnt == 0) {
1237                                /* IRQ is disabled. A big number to avoid
1238                                 * equal to zero too often
1239                                 */
1240                                dma->rx_irq_en_cnt_curr = 0x3fffffff;
1241                        } else {
1242                                dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1243                                ctrl |= GRSPW_RXBD_IE;
1244                        }
1245                }
1246
1247                if (curr_pkt->flags & RXPKT_FLAG_IE)
1248                        ctrl |= GRSPW_RXBD_IE;
1249
1250                /* Enable descriptor */
1251                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1252
1253                last_pkt = curr_pkt;
1254                curr_bd = curr_bd->next;
1255                cnt++;
1256
1257                /* Get Next Packet from Ready Queue */
1258                if (curr_pkt == dma->ready.tail) {
1259                        /* Handled all in ready queue. */
1260                        curr_pkt = NULL;
1261                        break;
1262                }
1263                curr_pkt = curr_pkt->next;
1264        }
1265
1266        /* Has Packets been scheduled? */
1267        if (cnt > 0) {
1268                /* Prepare list for insertion/deleation */
1269                lst.tail = last_pkt;
1270
1271                /* Remove scheduled packets from ready queue */
1272                grspw_list_remove_head_list(&dma->ready, &lst);
1273                dma->ready_cnt -= cnt;
1274                if (dma->stats.ready_cnt_min > dma->ready_cnt)
1275                        dma->stats.ready_cnt_min = dma->ready_cnt;
1276
1277                /* Insert scheduled packets into scheduled queue */
1278                grspw_list_append_list(&dma->rx_sched, &lst);
1279                dma->rx_sched_cnt += cnt;
1280                if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1281                        dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1282
1283                /* Update TX ring posistion */
1284                dma->rx_ring_head = curr_bd;
1285
1286                /* Make hardware aware of the newly enabled descriptors
1287                 * We must protect from ISR which writes RI|TI
1288                 */
1289                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1290                dmactrl = REG_READ(&dma->regs->ctrl);
1291                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1292                dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1293                REG_WRITE(&dma->regs->ctrl, dmactrl);
1294                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1295        }
1296
1297        return cnt;
1298}
1299
1300/* Scans the RX desciptor table for scheduled Packet that has been received,
1301 * and moves these Packet from the head of the scheduled queue to the
1302 * tail of the recv queue.
1303 *
1304 * Also, for all packets the status is updated.
1305 *
1306 *  - SCHED List -> SENT List
1307 *
1308 * Return Value
1309 * Number of packets moved
1310 */
1311STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1312{
1313        struct grspw_rxring *curr;
1314        struct grspw_pkt *last_pkt;
1315        int recv_pkt_cnt = 0;
1316        unsigned int ctrl;
1317        struct grspw_list lst;
1318
1319        curr = dma->rx_ring_tail;
1320
1321        /* Step into RX ring to find if packets have been scheduled for
1322         * reception.
1323         */
1324        if (!curr->pkt)
1325                return 0; /* No scheduled packets, thus no received, abort */
1326
1327        /* There has been Packets scheduled ==> scheduled Packets may have been
1328         * received and needs to be collected into RECV List.
1329         *
1330         * A temporary list "lst" with all received packets is created.
1331         */
1332        lst.head = curr->pkt;
1333
1334        /* Loop until first enabled "unrecveived" SpW Packet is found.
1335         * An unused descriptor is indicated by an unassigned pkt field.
1336         */
1337        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1338                /* Handle one received Packet */
1339
1340                /* Remember last handled Packet so that insertion/removal from
1341                 * Packet lists go fast.
1342                 */
1343                last_pkt = curr->pkt;
1344
1345                /* Get Length of Packet in bytes, and reception options */
1346                last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1347
1348                /* Set flags to indicate error(s) and CRC information,
1349                 * and Mark Received.
1350                 */
1351                last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1352                                  ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1353                                  RXPKT_FLAG_RX;
1354
1355                /* Packet was Truncated? */
1356                if (ctrl & GRSPW_RXBD_TR)
1357                        dma->stats.rx_err_trunk++;
1358
1359                /* Error End-Of-Packet? */
1360                if (ctrl & GRSPW_RXBD_EP)
1361                        dma->stats.rx_err_endpkt++;
1362                curr->pkt = NULL; /* Mark descriptor unused */
1363
1364                /* Increment */
1365                curr = curr->next;
1366                recv_pkt_cnt++;
1367        }
1368
1369        /* 1. Remove all handled packets from scheduled queue
1370         * 2. Put all handled packets into recv queue
1371         */
1372        if (recv_pkt_cnt > 0) {
1373
1374                /* Update Stats, Number of Received Packets */
1375                dma->stats.rx_pkts += recv_pkt_cnt;
1376
1377                /* Save RX ring posistion */
1378                dma->rx_ring_tail = curr;
1379
1380                /* Prepare list for insertion/deleation */
1381                lst.tail = last_pkt;
1382
1383                /* Remove received Packets from RX-SCHED queue */
1384                grspw_list_remove_head_list(&dma->rx_sched, &lst);
1385                dma->rx_sched_cnt -= recv_pkt_cnt;
1386                if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1387                        dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1388
1389                /* Insert received Packets into RECV queue */
1390                grspw_list_append_list(&dma->recv, &lst);
1391                dma->recv_cnt += recv_pkt_cnt;
1392                if (dma->stats.recv_cnt_max < dma->recv_cnt)
1393                        dma->stats.recv_cnt_max = dma->recv_cnt;
1394        }
1395
1396        return recv_pkt_cnt;
1397}
1398
1399/* Try to populate descriptor ring with as many SEND packets as possible. The
1400 * packets assigned with to a descriptor are put in the end of
1401 * the scheduled list.
1402 *
1403 * The number of Packets scheduled is returned.
1404 *
1405 *  - SEND List -> TX-SCHED List
1406 *  - Descriptors are initialized and enabled for transmission
1407 */
1408STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1409{
1410        int cnt;
1411        unsigned int ctrl, dmactrl;
1412        void *hwaddr;
1413        struct grspw_txring *curr_bd;
1414        struct grspw_pkt *curr_pkt, *last_pkt;
1415        struct grspw_list lst;
[fec8288]1416        SPIN_IRQFLAGS(irqflags);
[0f49c0e]1417
1418        /* Is Ready Q empty? */
1419        if (grspw_list_is_empty(&dma->send))
1420                return 0;
1421
1422        cnt = 0;
1423        lst.head = curr_pkt = dma->send.head;
1424        curr_bd = dma->tx_ring_head;
1425        while (!curr_bd->pkt) {
1426
1427                /* Assign Packet to descriptor */
1428                curr_bd->pkt = curr_pkt;
1429
1430                /* Set up header transmission */
1431                if (curr_pkt->hdr && curr_pkt->hlen) {
1432                        hwaddr = curr_pkt->hdr;
1433                        if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1434                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1435                                                 hwaddr, &hwaddr);
1436                                /* translation needed? */
1437                                if (curr_pkt->hdr == hwaddr)
1438                                        curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1439                        }
1440                        BD_WRITE(&curr_bd->bd->haddr, hwaddr);
[0aae151]1441                        ctrl = GRSPW_TXBD_EN |
1442                               (curr_pkt->hlen & GRSPW_TXBD_HLEN);
[0f49c0e]1443                } else {
1444                        ctrl = GRSPW_TXBD_EN;
1445                }
1446                /* Enable IRQ generation and CRC options as specified
1447                 * by user.
1448                 */
1449                ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1450
1451                if (curr_bd->next == dma->tx_ring_base) {
1452                        /* Wrap around (only needed when smaller descriptor table) */
1453                        ctrl |= GRSPW_TXBD_WR;
1454                }
1455
1456                /* Is this Packet going to be an interrupt Packet? */
1457                if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1458                        if (dma->cfg.tx_irq_en_cnt == 0) {
1459                                /* IRQ is disabled.
1460                                 * A big number to avoid equal to zero too often
1461                                 */
1462                                dma->tx_irq_en_cnt_curr = 0x3fffffff;
1463                        } else {
1464                                dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1465                                ctrl |= GRSPW_TXBD_IE;
1466                        }
1467                }
1468
1469                /* Prepare descriptor address. Parts of CTRL is written to
1470                 * DLEN for debug-only (CTRL is cleared by HW).
1471                 */
1472                if (curr_pkt->data && curr_pkt->dlen) {
1473                        hwaddr = curr_pkt->data;
1474                        if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1475                                drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1476                                                 hwaddr, &hwaddr);
1477                                /* translation needed? */
1478                                if (curr_pkt->data == hwaddr)
1479                                        curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1480                        }
1481                        BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1482                        BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1483                                                     ((ctrl & 0x3f000) << 12));
1484                } else {
1485                        BD_WRITE(&curr_bd->bd->daddr, 0);
1486                        BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1487                }
1488
1489                /* Enable descriptor */
1490                BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1491
1492                last_pkt = curr_pkt;
1493                curr_bd = curr_bd->next;
1494                cnt++;
1495
1496                /* Get Next Packet from Ready Queue */
1497                if (curr_pkt == dma->send.tail) {
1498                        /* Handled all in ready queue. */
1499                        curr_pkt = NULL;
1500                        break;
1501                }
1502                curr_pkt = curr_pkt->next;
1503        }
1504
1505        /* Have Packets been scheduled? */
1506        if (cnt > 0) {
1507                /* Prepare list for insertion/deleation */
1508                lst.tail = last_pkt;
1509
1510                /* Remove scheduled packets from ready queue */
1511                grspw_list_remove_head_list(&dma->send, &lst);
1512                dma->send_cnt -= cnt;
1513                if (dma->stats.send_cnt_min > dma->send_cnt)
1514                        dma->stats.send_cnt_min = dma->send_cnt;
1515
1516                /* Insert scheduled packets into scheduled queue */
1517                grspw_list_append_list(&dma->tx_sched, &lst);
1518                dma->tx_sched_cnt += cnt;
1519                if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1520                        dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1521
1522                /* Update TX ring posistion */
1523                dma->tx_ring_head = curr_bd;
1524
1525                /* Make hardware aware of the newly enabled descriptors */
1526                SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1527                dmactrl = REG_READ(&dma->regs->ctrl);
1528                dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1529                dmactrl |= GRSPW_DMACTRL_TE;
1530                REG_WRITE(&dma->regs->ctrl, dmactrl);
1531                SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1532        }
1533        return cnt;
1534}
1535
1536/* Scans the TX desciptor table for transmitted packets, and moves these
1537 * packets from the head of the scheduled queue to the tail of the sent queue.
1538 *
1539 * Also, for all packets the status is updated.
1540 *
1541 *  - SCHED List -> SENT List
1542 *
1543 * Return Value
1544 * Number of packet moved
1545 */
1546STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1547{
1548        struct grspw_txring *curr;
1549        struct grspw_pkt *last_pkt;
1550        int sent_pkt_cnt = 0;
1551        unsigned int ctrl;
1552        struct grspw_list lst;
1553
1554        curr = dma->tx_ring_tail;
1555
1556        /* Step into TX ring to find if packets have been scheduled for
1557         * transmission.
1558         */
1559        if (!curr->pkt)
1560                return 0; /* No scheduled packets, thus no sent, abort */
1561
1562        /* There has been Packets scheduled ==> scheduled Packets may have been
1563         * transmitted and needs to be collected into SENT List.
1564         *
1565         * A temporary list "lst" with all sent packets is created.
1566         */
1567        lst.head = curr->pkt;
1568
1569        /* Loop until first enabled "un-transmitted" SpW Packet is found.
1570         * An unused descriptor is indicated by an unassigned pkt field.
1571         */
1572        while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1573                /* Handle one sent Packet */
1574
1575                /* Remember last handled Packet so that insertion/removal from
1576                 * packet lists go fast.
1577                 */
1578                last_pkt = curr->pkt;
1579
1580                /* Set flags to indicate error(s) and Mark Sent.
1581                 */
1582                last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1583                                        (ctrl & TXPKT_FLAG_LINKERR) |
1584                                        TXPKT_FLAG_TX;
1585
1586                /* Sent packet experienced link error? */
1587                if (ctrl & GRSPW_TXBD_LE)
1588                        dma->stats.tx_err_link++;
1589
1590                curr->pkt = NULL; /* Mark descriptor unused */
1591
1592                /* Increment */
1593                curr = curr->next;
1594                sent_pkt_cnt++;
1595        }
1596
1597        /* 1. Remove all handled packets from TX-SCHED queue
1598         * 2. Put all handled packets into SENT queue
1599         */
1600        if (sent_pkt_cnt > 0) {
1601                /* Update Stats, Number of Transmitted Packets */
1602                dma->stats.tx_pkts += sent_pkt_cnt;
1603
1604                /* Save TX ring posistion */
1605                dma->tx_ring_tail = curr;
1606
1607                /* Prepare list for insertion/deleation */
1608                lst.tail = last_pkt;
1609
1610                /* Remove sent packets from TX-SCHED queue */
1611                grspw_list_remove_head_list(&dma->tx_sched, &lst);
1612                dma->tx_sched_cnt -= sent_pkt_cnt;
1613                if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1614                        dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1615
1616                /* Insert received packets into SENT queue */
1617                grspw_list_append_list(&dma->sent, &lst);
1618                dma->sent_cnt += sent_pkt_cnt;
1619                if (dma->stats.sent_cnt_max < dma->sent_cnt)
1620                        dma->stats.sent_cnt_max = dma->sent_cnt;
1621        }
1622
1623        return sent_pkt_cnt;
1624}
1625
1626void *grspw_dma_open(void *d, int chan_no)
1627{
1628        struct grspw_priv *priv = d;
1629        struct grspw_dma_priv *dma;
1630        int size;
1631
[3395ca99]1632        if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
[0f49c0e]1633                return NULL;
1634
1635        dma = &priv->dma[chan_no];
1636
1637        /* Take GRSPW lock */
1638        if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1639            != RTEMS_SUCCESSFUL)
1640                return NULL;
1641
1642        if (dma->open) {
1643                dma = NULL;
1644                goto out;
1645        }
1646
1647        dma->started = 0;
1648
1649        /* Set Default Configuration:
1650         *
1651         *  - MAX RX Packet Length =
1652         *  - Disable IRQ generation
1653         *  -
1654         */
1655        dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1656        dma->cfg.rx_irq_en_cnt = 0;
1657        dma->cfg.tx_irq_en_cnt = 0;
1658        dma->cfg.flags = DMAFLAG_NO_SPILL;
1659
[57e1f4c3]1660        /* set to NULL so that error exit works correctly */
[0d31dcc]1661        dma->sem_rxdma = RTEMS_ID_NONE;
1662        dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1663        dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1664        dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1665        dma->rx_ring_base = NULL;
1666
[0f49c0e]1667        /* DMA Channel Semaphore created with count = 1 */
1668        if (rtems_semaphore_create(
[0d31dcc]1669            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
[0f49c0e]1670            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1671            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
[0d31dcc]1672            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1673                dma->sem_rxdma = RTEMS_ID_NONE;
1674                goto err;
1675        }
1676        if (rtems_semaphore_create(
1677            rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1678            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1679            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1680            RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1681                dma->sem_txdma = RTEMS_ID_NONE;
[57e1f4c3]1682                goto err;
[0f49c0e]1683        }
1684
1685        /* Allocate memory for the two descriptor rings */
1686        size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
[11f3b9a]1687        dma->rx_ring_base = grlib_malloc(size);
[0f49c0e]1688        dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
[57e1f4c3]1689        if (dma->rx_ring_base == NULL)
1690                goto err;
[0f49c0e]1691
1692        /* Create DMA RX and TX Channel sempahore with count = 0 */
1693        if (rtems_semaphore_create(
1694            rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1695            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1696            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1697            RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1698                dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1699                goto err;
[0f49c0e]1700        }
1701        if (rtems_semaphore_create(
1702            rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1703            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1704            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1705            RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
[57e1f4c3]1706                dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1707                goto err;
[0f49c0e]1708        }
1709
1710        /* Reset software structures */
1711        grspw_dma_reset(dma);
1712
1713        /* Take the device */
1714        dma->open = 1;
1715out:
1716        /* Return GRSPW Lock */
1717        rtems_semaphore_release(grspw_sem);
1718
1719        return dma;
[57e1f4c3]1720
1721        /* initialization error happended */
1722err:
[0d31dcc]1723        if (dma->sem_rxdma != RTEMS_ID_NONE)
1724                rtems_semaphore_delete(dma->sem_rxdma);
1725        if (dma->sem_txdma != RTEMS_ID_NONE)
1726                rtems_semaphore_delete(dma->sem_txdma);
[57e1f4c3]1727        if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1728                rtems_semaphore_delete(dma->rx_wait.sem_wait);
1729        if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1730                rtems_semaphore_delete(dma->tx_wait.sem_wait);
1731        if (dma->rx_ring_base)
1732                free(dma->rx_ring_base);
1733        dma = NULL;
1734        goto out;
[0f49c0e]1735}
1736
1737/* Initialize Software Structures:
1738 *  - Clear all Queues
1739 *  - init BD ring
1740 *  - init IRQ counter
1741 *  - clear statistics counters
1742 *  - init wait structures and semaphores
1743 */
1744STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1745{
1746        /* Empty RX and TX queues */
1747        grspw_list_clr(&dma->ready);
1748        grspw_list_clr(&dma->rx_sched);
1749        grspw_list_clr(&dma->recv);
1750        grspw_list_clr(&dma->send);
1751        grspw_list_clr(&dma->tx_sched);
1752        grspw_list_clr(&dma->sent);
1753        dma->ready_cnt = 0;
1754        dma->rx_sched_cnt = 0;
1755        dma->recv_cnt = 0;
1756        dma->send_cnt = 0;
1757        dma->tx_sched_cnt = 0;
1758        dma->sent_cnt = 0;
1759
1760        dma->rx_irq_en_cnt_curr = 0;
1761        dma->tx_irq_en_cnt_curr = 0;
1762
1763        grspw_bdrings_init(dma);
1764
1765        dma->rx_wait.waiting = 0;
1766        dma->tx_wait.waiting = 0;
1767
1768        grspw_dma_stats_clr(dma);
1769}
1770
[eb5a42f6]1771int grspw_dma_close(void *c)
[0f49c0e]1772{
1773        struct grspw_dma_priv *dma = c;
1774
1775        if (!dma->open)
[eb5a42f6]1776                return 0;
[0f49c0e]1777
1778        /* Take device lock - Wait until we get semaphore */
[0d31dcc]1779        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1780            != RTEMS_SUCCESSFUL)
[eb5a42f6]1781                return -1;
[0d31dcc]1782        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1783            != RTEMS_SUCCESSFUL) {
1784                rtems_semaphore_release(dma->sem_rxdma);
1785                return -1;
1786        }
[0f49c0e]1787
[eb5a42f6]1788        /* Can not close active DMA channel. User must stop DMA and make sure
1789         * no threads are active/blocked within driver.
1790         */
1791        if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
[0d31dcc]1792                rtems_semaphore_release(dma->sem_txdma);
1793                rtems_semaphore_release(dma->sem_rxdma);
[eb5a42f6]1794                return 1;
1795        }
[0f49c0e]1796
1797        /* Free resources */
1798        rtems_semaphore_delete(dma->rx_wait.sem_wait);
1799        rtems_semaphore_delete(dma->tx_wait.sem_wait);
[57e1f4c3]1800        /* Release and delete lock. Operations requiring lock will fail */
[0d31dcc]1801        rtems_semaphore_delete(dma->sem_txdma);
1802        rtems_semaphore_delete(dma->sem_rxdma);
1803        dma->sem_txdma = RTEMS_ID_NONE;
1804        dma->sem_rxdma = RTEMS_ID_NONE;
[0f49c0e]1805
1806        /* Free memory */
1807        if (dma->rx_ring_base)
1808                free(dma->rx_ring_base);
1809        dma->rx_ring_base = NULL;
1810        dma->tx_ring_base = NULL;
1811
1812        dma->open = 0;
[eb5a42f6]1813        return 0;
[0f49c0e]1814}
1815
[72ec13ef]1816unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
1817{
1818        struct grspw_dma_priv *dma = c;
1819        int rc = 0;
1820        unsigned int ctrl, ctrl_old;
[fec8288]1821        SPIN_IRQFLAGS(irqflags);
[72ec13ef]1822
1823        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1824        if (dma->started == 0) {
1825                rc = 1; /* DMA stopped */
1826                goto out;
1827        }
1828        ctrl = REG_READ(&dma->regs->ctrl);
1829        ctrl_old = ctrl;
1830
1831        /* Read/Write DMA error ? */
1832        if (ctrl & GRSPW_DMA_STATUS_ERROR) {
1833                rc = 2; /* DMA error */
1834                goto out;
1835        }
1836
1837        /* DMA has finished a TX/RX packet and user wants work-task to
1838         * take care of DMA table processing.
1839         */
1840        ctrl &= ~GRSPW_DMACTRL_AT;
1841
1842        if ((rxtx & 1) == 0)
1843                ctrl &= ~GRSPW_DMACTRL_PR;
1844        else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
1845                 (dma->cfg.flags & DMAFLAG2_RXIE)))
1846                ctrl |= GRSPW_DMACTRL_RI;
1847
1848        if ((rxtx & 2) == 0)
1849                ctrl &= ~GRSPW_DMACTRL_PS;
1850        else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
1851                 (dma->cfg.flags & DMAFLAG2_TXIE)))
1852                ctrl |= GRSPW_DMACTRL_TI;
1853
1854        REG_WRITE(&dma->regs->ctrl, ctrl);
1855        /* Re-enabled interrupts previously enabled */
1856        rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
1857out:
1858        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1859        return rc;
1860}
1861
[0f49c0e]1862/* Schedule List of packets for transmission at some point in
1863 * future.
1864 *
1865 * 1. Move transmitted packets to SENT List (SCHED->SENT)
1866 * 2. Add the requested packets to the SEND List (USER->SEND)
1867 * 3. Schedule as many packets as possible (SEND->SCHED)
1868 */
1869int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1870{
1871        struct grspw_dma_priv *dma = c;
1872        int ret;
1873
1874        /* Take DMA channel lock */
[0d31dcc]1875        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1876            != RTEMS_SUCCESSFUL)
1877                return -1;
1878
1879        if (dma->started == 0) {
1880                ret = 1; /* signal DMA has been stopped */
1881                goto out;
1882        }
1883        ret = 0;
1884
1885        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1886        if ((opts & 1) == 0)
1887                grspw_tx_process_scheduled(dma);
1888
1889        /* 2. Add the requested packets to the SEND List (USER->SEND) */
[ef94150f]1890        if (pkts && (count > 0)) {
[0f49c0e]1891                grspw_list_append_list(&dma->send, pkts);
1892                dma->send_cnt += count;
1893                if (dma->stats.send_cnt_max < dma->send_cnt)
1894                        dma->stats.send_cnt_max = dma->send_cnt;
1895        }
1896
1897        /* 3. Schedule as many packets as possible (SEND->SCHED) */
1898        if ((opts & 2) == 0)
1899                grspw_tx_schedule_send(dma);
1900
1901out:
1902        /* Unlock DMA channel */
[0d31dcc]1903        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1904
1905        return ret;
1906}
1907
1908int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1909{
1910        struct grspw_dma_priv *dma = c;
1911        struct grspw_pkt *pkt, *lastpkt;
1912        int cnt, started;
1913
1914        /* Take DMA channel lock */
[0d31dcc]1915        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]1916            != RTEMS_SUCCESSFUL)
1917                return -1;
1918
1919        /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1920        started = dma->started;
1921        if ((started > 0) && ((opts & 1) == 0))
1922                grspw_tx_process_scheduled(dma);
1923
1924        /* Move all/count SENT packet to the callers list (SENT->USER) */
1925        if (pkts) {
1926                if ((count == NULL) || (*count == -1) ||
1927                    (*count >= dma->sent_cnt)) {
1928                        /* Move all SENT Packets */
1929                        *pkts = dma->sent;
1930                        grspw_list_clr(&dma->sent);
1931                        if (count)
1932                                *count = dma->sent_cnt;
1933                        dma->sent_cnt = 0;
1934                } else {
1935                        /* Move a number of SENT Packets */
1936                        pkts->head = pkt = lastpkt = dma->sent.head;
1937                        cnt = 0;
1938                        while (cnt < *count) {
1939                                lastpkt = pkt;
1940                                pkt = pkt->next;
1941                                cnt++;
1942                        }
1943                        if (cnt > 0) {
1944                                pkts->tail = lastpkt;
1945                                grspw_list_remove_head_list(&dma->sent, pkts);
1946                                dma->sent_cnt -= cnt;
1947                        } else {
1948                                grspw_list_clr(pkts);
1949                        }
1950                }
1951        } else if (count) {
1952                *count = 0;
1953        }
1954
1955        /* 3. Schedule as many packets as possible (SEND->SCHED) */
[c442647f]1956        if ((started > 0) && ((opts & 2) == 0))
[0f49c0e]1957                grspw_tx_schedule_send(dma);
1958
1959        /* Unlock DMA channel */
[0d31dcc]1960        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]1961
1962        return (~started) & 1; /* signal DMA has been stopped */
1963}
1964
[1ef9caa2]1965void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
[0f49c0e]1966{
1967        struct grspw_dma_priv *dma = c;
[1ef9caa2]1968        int sched_cnt, diff;
1969        unsigned int hwbd;
1970        struct grspw_txbd *tailbd;
1971
1972        /* Take device lock - Wait until we get semaphore.
1973         * The lock is taken so that the counters are in sync with each other
1974         * and that DMA descriptor table and tx_ring_tail is not being updated
1975         * during HW counter processing in this function.
1976         */
[0d31dcc]1977        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]1978            != RTEMS_SUCCESSFUL)
1979                return;
[0f49c0e]1980
1981        if (send)
1982                *send = dma->send_cnt;
[1ef9caa2]1983        sched_cnt = dma->tx_sched_cnt;
[0f49c0e]1984        if (sched)
[1ef9caa2]1985                *sched = sched_cnt;
[0f49c0e]1986        if (sent)
1987                *sent = dma->sent_cnt;
[1ef9caa2]1988        if (hw) {
1989                /* Calculate number of descriptors (processed by HW) between
1990                 * HW pointer and oldest SW pointer.
1991                 */
1992                hwbd = REG_READ(&dma->regs->txdesc);
1993                tailbd = dma->tx_ring_tail->bd;
1994                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
1995                        (GRSPW_TXBD_NR - 1);
1996                /* Handle special case when HW and SW pointers are equal
1997                 * because all TX descriptors have been processed by HW.
1998                 */
1999                if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
2000                    ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
2001                        diff = GRSPW_TXBD_NR;
2002                }
2003                *hw = diff;
2004        }
2005
2006        /* Unlock DMA channel */
[0d31dcc]2007        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2008}
2009
2010static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
2011{
2012        int send_val, sent_val;
2013
2014        if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
2015                send_val = 1;
2016        else
2017                send_val = 0;
2018
2019        if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
2020                sent_val = 1;
2021        else
2022                sent_val = 0;
2023
2024        /* AND or OR ? */
2025        if (dma->tx_wait.op == 0)
2026                return send_val & sent_val; /* AND */
2027        else
2028                return send_val | sent_val; /* OR */
2029}
2030
2031/* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
2032 * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
2033 * is met.
2034 * If a link error occurs and the Stop on Link error is defined, this function
2035 * will also return to caller.
2036 */
2037int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
2038{
2039        struct grspw_dma_priv *dma = c;
[9cb7e5d]2040        int ret, rc, initialized = 0;
[0f49c0e]2041
2042        if (timeout == 0)
2043                timeout = RTEMS_NO_TIMEOUT;
2044
2045check_condition:
2046
2047        /* Take DMA channel lock */
[0d31dcc]2048        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2049            != RTEMS_SUCCESSFUL)
2050                return -1;
2051
2052        /* Check so that no other thread is waiting, this driver only supports
2053         * one waiter at a time.
2054         */
[9cb7e5d]2055        if (initialized == 0 && dma->tx_wait.waiting) {
2056                ret = 3;
2057                goto out_release;
[0f49c0e]2058        }
2059
[9cb7e5d]2060        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2061        if (dma->started == 0) {
2062                ret = 1;
[9cb7e5d]2063                goto out_release;
[0f49c0e]2064        }
2065
2066        /* Set up Condition */
2067        dma->tx_wait.send_cnt = send_cnt;
2068        dma->tx_wait.op = op;
2069        dma->tx_wait.sent_cnt = sent_cnt;
2070
2071        if (grspw_tx_wait_eval(dma) == 0) {
2072                /* Prepare Wait */
[9cb7e5d]2073                initialized = 1;
[0f49c0e]2074                dma->tx_wait.waiting = 1;
2075
2076                /* Release DMA channel lock */
[0d31dcc]2077                rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2078
2079                /* Try to take Wait lock, if this fail link may have gone down
2080                 * or user stopped this DMA channel
2081                 */
2082                rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2083                                                timeout);
2084                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2085                        ret = 2;
2086                        goto out;
[0f49c0e]2087                } else if (rc == RTEMS_UNSATISFIED ||
2088                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2089                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2090                        goto out;
2091                } else if (rc != RTEMS_SUCCESSFUL) {
2092                        /* Unknown Error */
2093                        ret = -1;
2094                        goto out;
2095                } else if (dma->started == 0) {
2096                        ret = 1;
2097                        goto out;
2098                }
[0f49c0e]2099
2100                /* Check condition once more */
2101                goto check_condition;
2102        }
2103
2104        ret = 0;
[9cb7e5d]2105
2106out_release:
[0f49c0e]2107        /* Unlock DMA channel */
[0d31dcc]2108        rtems_semaphore_release(dma->sem_txdma);
[0f49c0e]2109
[9cb7e5d]2110out:
2111        if (initialized)
2112                dma->tx_wait.waiting = 0;
[0f49c0e]2113        return ret;
2114}
2115
2116int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2117{
2118        struct grspw_dma_priv *dma = c;
2119        struct grspw_pkt *pkt, *lastpkt;
2120        int cnt, started;
2121
2122        /* Take DMA channel lock */
[0d31dcc]2123        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2124            != RTEMS_SUCCESSFUL)
2125                return -1;
2126
2127        /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2128        started = dma->started;
2129        if (((opts & 1) == 0) && (started > 0))
2130                grspw_rx_process_scheduled(dma);
2131
2132        /* Move all RECV packet to the callers list */
2133        if (pkts) {
2134                if ((count == NULL) || (*count == -1) ||
2135                    (*count >= dma->recv_cnt)) {
2136                        /* Move all Received packets */
2137                        *pkts = dma->recv;
2138                        grspw_list_clr(&dma->recv);
2139                        if ( count )
2140                                *count = dma->recv_cnt;
2141                        dma->recv_cnt = 0;
2142                } else {
2143                        /* Move a number of RECV Packets */
2144                        pkts->head = pkt = lastpkt = dma->recv.head;
2145                        cnt = 0;
2146                        while (cnt < *count) {
2147                                lastpkt = pkt;
2148                                pkt = pkt->next;
2149                                cnt++;
2150                        }
2151                        if (cnt > 0) {
2152                                pkts->tail = lastpkt;
2153                                grspw_list_remove_head_list(&dma->recv, pkts);
2154                                dma->recv_cnt -= cnt;
2155                        } else {
2156                                grspw_list_clr(pkts);
2157                        }
2158                }
2159        } else if (count) {
2160                *count = 0;
2161        }
2162
2163        /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2164        if (((opts & 2) == 0) && (started > 0))
2165                grspw_rx_schedule_ready(dma);
2166
2167        /* Unlock DMA channel */
[0d31dcc]2168        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2169
2170        return (~started) & 1;
2171}
2172
2173int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2174{
2175        struct grspw_dma_priv *dma = c;
2176        int ret;
2177
2178        /* Take DMA channel lock */
[0d31dcc]2179        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2180            != RTEMS_SUCCESSFUL)
2181                return -1;
2182
2183        if (dma->started == 0) {
2184                ret = 1;
2185                goto out;
2186        }
2187
2188        /* 1. Move Received packets to RECV List (SCHED->RECV) */
2189        if ((opts & 1) == 0)
2190                grspw_rx_process_scheduled(dma);
2191
2192        /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2193        if (pkts && (count > 0)) {
2194                grspw_list_append_list(&dma->ready, pkts);
2195                dma->ready_cnt += count;
2196                if (dma->stats.ready_cnt_max < dma->ready_cnt)
2197                        dma->stats.ready_cnt_max = dma->ready_cnt;
2198        }
2199
2200        /* 3. Schedule as many packets as possible (READY->SCHED) */
2201        if ((opts & 2) == 0)
2202                grspw_rx_schedule_ready(dma);
2203
2204        ret = 0;
2205out:
2206        /* Unlock DMA channel */
[0d31dcc]2207        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2208
2209        return ret;
2210}
2211
[1ef9caa2]2212void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
[0f49c0e]2213{
2214        struct grspw_dma_priv *dma = c;
[1ef9caa2]2215        int sched_cnt, diff;
2216        unsigned int hwbd;
2217        struct grspw_rxbd *tailbd;
2218
2219        /* Take device lock - Wait until we get semaphore.
2220         * The lock is taken so that the counters are in sync with each other
2221         * and that DMA descriptor table and rx_ring_tail is not being updated
2222         * during HW counter processing in this function.
2223         */
[0d31dcc]2224        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[1ef9caa2]2225            != RTEMS_SUCCESSFUL)
2226                return;
[0f49c0e]2227
2228        if (ready)
2229                *ready = dma->ready_cnt;
[1ef9caa2]2230        sched_cnt = dma->rx_sched_cnt;
[0f49c0e]2231        if (sched)
[1ef9caa2]2232                *sched = sched_cnt;
[0f49c0e]2233        if (recv)
2234                *recv = dma->recv_cnt;
[1ef9caa2]2235        if (hw) {
2236                /* Calculate number of descriptors (processed by HW) between
2237                 * HW pointer and oldest SW pointer.
2238                 */
2239                hwbd = REG_READ(&dma->regs->rxdesc);
2240                tailbd = dma->rx_ring_tail->bd;
2241                diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2242                        (GRSPW_RXBD_NR - 1);
2243                /* Handle special case when HW and SW pointers are equal
2244                 * because all RX descriptors have been processed by HW.
2245                 */
2246                if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2247                    ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2248                        diff = GRSPW_RXBD_NR;
2249                }
2250                *hw = diff;
2251        }
2252
2253        /* Unlock DMA channel */
[0d31dcc]2254        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2255}
2256
2257static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2258{
2259        int ready_val, recv_val;
2260
2261        if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2262                ready_val = 1;
2263        else
2264                ready_val = 0;
2265
2266        if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2267                recv_val = 1;
2268        else
2269                recv_val = 0;
2270
2271        /* AND or OR ? */
2272        if (dma->rx_wait.op == 0)
2273                return ready_val & recv_val; /* AND */
2274        else
2275                return ready_val | recv_val; /* OR */
2276}
2277
2278/* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR),
2279 * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2280 * condition is met.
2281 * If a link error occurs and the Stop on Link error is defined, this function
2282 * will also return to caller, however with an error.
2283 */
2284int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2285{
2286        struct grspw_dma_priv *dma = c;
[9cb7e5d]2287        int ret, rc, initialized = 0;
[0f49c0e]2288
2289        if (timeout == 0)
2290                timeout = RTEMS_NO_TIMEOUT;
2291
2292check_condition:
2293
2294        /* Take DMA channel lock */
[0d31dcc]2295        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2296            != RTEMS_SUCCESSFUL)
2297                return -1;
2298
2299        /* Check so that no other thread is waiting, this driver only supports
2300         * one waiter at a time.
2301         */
[9cb7e5d]2302        if (initialized == 0 && dma->rx_wait.waiting) {
2303                ret = 3;
2304                goto out_release;
[0f49c0e]2305        }
2306
[9cb7e5d]2307        /* Stop if link error or similar (DMA stopped), abort */
[0f49c0e]2308        if (dma->started == 0) {
2309                ret = 1;
[9cb7e5d]2310                goto out_release;
[0f49c0e]2311        }
2312
2313        /* Set up Condition */
2314        dma->rx_wait.recv_cnt = recv_cnt;
2315        dma->rx_wait.op = op;
2316        dma->rx_wait.ready_cnt = ready_cnt;
2317
2318        if (grspw_rx_wait_eval(dma) == 0) {
2319                /* Prepare Wait */
[9cb7e5d]2320                initialized = 1;
[0f49c0e]2321                dma->rx_wait.waiting = 1;
2322
2323                /* Release channel lock */
[0d31dcc]2324                rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2325
2326                /* Try to take Wait lock, if this fail link may have gone down
2327                 * or user stopped this DMA channel
2328                 */
2329                rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2330                                           timeout);
2331                if (rc == RTEMS_TIMEOUT) {
[9cb7e5d]2332                        ret = 2;
2333                        goto out;
[0f49c0e]2334                } else if (rc == RTEMS_UNSATISFIED ||
2335                           rc == RTEMS_OBJECT_WAS_DELETED) {
[9cb7e5d]2336                        ret = 1; /* sem was flushed/deleted, means DMA stop */
2337                        goto out;
2338                } else if (rc != RTEMS_SUCCESSFUL) {
2339                        /* Unknown Error */
2340                        ret = -1;
2341                        goto out;
2342                } else if (dma->started == 0) {
2343                        ret = 1;
2344                        goto out;
2345                }
[0f49c0e]2346
2347                /* Check condition once more */
2348                goto check_condition;
2349        }
[9cb7e5d]2350
[0f49c0e]2351        ret = 0;
2352
[9cb7e5d]2353out_release:
[0f49c0e]2354        /* Unlock DMA channel */
[0d31dcc]2355        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2356
[9cb7e5d]2357out:
2358        if (initialized)
2359                dma->rx_wait.waiting = 0;
[0f49c0e]2360        return ret;
2361}
2362
2363int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2364{
2365        struct grspw_dma_priv *dma = c;
2366
2367        if (dma->started || !cfg)
2368                return -1;
2369
[77856f6]2370        if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
[0f49c0e]2371                return -1;
2372
2373        /* Update Configuration */
2374        memcpy(&dma->cfg, cfg, sizeof(*cfg));
2375
2376        return 0;
2377}
2378
2379void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2380{
2381        struct grspw_dma_priv *dma = c;
2382
2383        /* Copy Current Configuration */
2384        memcpy(cfg, &dma->cfg, sizeof(*cfg));
2385}
2386
2387void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2388{
2389        struct grspw_dma_priv *dma = c;
2390
2391        memcpy(sts, &dma->stats, sizeof(dma->stats));
2392}
2393
2394void grspw_dma_stats_clr(void *c)
2395{
2396        struct grspw_dma_priv *dma = c;
2397
2398        /* Clear most of the statistics */     
2399        memset(&dma->stats, 0, sizeof(dma->stats));
2400
2401        /* Init proper default values so that comparisons will work the
2402         * first time.
2403         */
2404        dma->stats.send_cnt_min = 0x3fffffff;
2405        dma->stats.tx_sched_cnt_min = 0x3fffffff;
2406        dma->stats.ready_cnt_min = 0x3fffffff;
2407        dma->stats.rx_sched_cnt_min = 0x3fffffff;
2408}
2409
2410int grspw_dma_start(void *c)
2411{
2412        struct grspw_dma_priv *dma = c;
2413        struct grspw_dma_regs *dregs = dma->regs;
2414        unsigned int ctrl;
[fec8288]2415        SPIN_IRQFLAGS(irqflags);
[0f49c0e]2416
2417        if (dma->started)
2418                return 0;
2419
2420        /* Initialize Software Structures:
2421         *  - Clear all Queues
2422         *  - init BD ring
2423         *  - init IRQ counter
2424         *  - clear statistics counters
2425         *  - init wait structures and semaphores
2426         */
2427        grspw_dma_reset(dma);
2428
2429        /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2430         * with SpaceWire Packet buffers. So we do not have to worry about
2431         * IRQs for this channel just yet. However other DMA channels
2432         * may be active.
2433         *
2434         * Some functionality that is not changed during started mode is set up
2435         * once and for all here:
2436         *
2437         *   - RX MAX Packet length
2438         *   - TX Descriptor base address to first BD in TX ring (not enabled)
2439         *   - RX Descriptor base address to first BD in RX ring (not enabled)
2440         *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2441         *   - Strip PID
2442         *   - Strip Address
2443         *   - No Spill
2444         *   - Receiver Enable
2445         *   - disable on link error (LE)
2446         *
2447         * Note that the address register and the address enable bit in DMACTRL
2448         * register must be left untouched, they are configured on a GRSPW
2449         * core level.
2450         *
2451         * Note that the receiver is enabled here, but since descriptors are
2452         * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2453         * descriptors are enabled or it may ignore RX packets (NS=0) until
2454         * descriptors are enabled (writing RD bit).
2455         */
2456        REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2457        REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2458
2459        /* MAX Packet length */
2460        REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2461
2462        ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2463                GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2464                (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
[49cf776e]2465        if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
[0f49c0e]2466                ctrl |= GRSPW_DMACTRL_LE;
[77856f6]2467        if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
[0f49c0e]2468                ctrl |= GRSPW_DMACTRL_RI;
[77856f6]2469        if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
[0f49c0e]2470                ctrl |= GRSPW_DMACTRL_TI;
[6ecad1d]2471        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2472        ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
[0f49c0e]2473        REG_WRITE(&dregs->ctrl, ctrl);
[6ecad1d]2474        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
[0f49c0e]2475
2476        dma->started = 1; /* open up other DMA interfaces */
2477
2478        return 0;
2479}
2480
2481STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2482{
[fec8288]2483        SPIN_IRQFLAGS(irqflags);
[0f49c0e]2484
2485        if (dma->started == 0)
2486                return;
2487        dma->started = 0;
2488
[fec8288]2489        SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
[0f49c0e]2490        grspw_hw_dma_stop(dma);
[fec8288]2491        SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
[0f49c0e]2492
2493        /* From here no more packets will be sent, however
2494         * there may still exist scheduled packets that has been
2495         * sent, and packets in the SEND Queue waiting for free
2496         * descriptors. All packets are moved to the SENT Queue
2497         * so that the user may get its buffers back, the user
2498         * must look at the TXPKT_FLAG_TX in order to determine
2499         * if the packet was sent or not.
2500         */
2501
2502        /* Retreive scheduled all sent packets */
2503        grspw_tx_process_scheduled(dma);
2504
2505        /* Move un-sent packets in SEND and SCHED queue to the
2506         * SENT Queue. (never marked sent)
2507         */
2508        if (!grspw_list_is_empty(&dma->tx_sched)) {
2509                grspw_list_append_list(&dma->sent, &dma->tx_sched);
2510                grspw_list_clr(&dma->tx_sched);
2511                dma->sent_cnt += dma->tx_sched_cnt;
2512                dma->tx_sched_cnt = 0;
2513        }
2514        if (!grspw_list_is_empty(&dma->send)) {
2515                grspw_list_append_list(&dma->sent, &dma->send);
2516                grspw_list_clr(&dma->send);
2517                dma->sent_cnt += dma->send_cnt;
2518                dma->send_cnt = 0;
2519        }
2520
2521        /* Similar for RX */
2522        grspw_rx_process_scheduled(dma);
2523        if (!grspw_list_is_empty(&dma->rx_sched)) {
2524                grspw_list_append_list(&dma->recv, &dma->rx_sched);
2525                grspw_list_clr(&dma->rx_sched);
2526                dma->recv_cnt += dma->rx_sched_cnt;
2527                dma->rx_sched_cnt = 0;
2528        }
2529        if (!grspw_list_is_empty(&dma->ready)) {
2530                grspw_list_append_list(&dma->recv, &dma->ready);
2531                grspw_list_clr(&dma->ready);
2532                dma->recv_cnt += dma->ready_cnt;
2533                dma->ready_cnt = 0;
2534        }
2535
2536        /* Throw out blocked threads */
2537        rtems_semaphore_flush(dma->rx_wait.sem_wait);
2538        rtems_semaphore_flush(dma->tx_wait.sem_wait);
2539}
2540
2541void grspw_dma_stop(void *c)
2542{
2543        struct grspw_dma_priv *dma = c;
2544
[eb5a42f6]2545        /* If DMA channel is closed we should not access the semaphore */
2546        if (!dma->open)
2547                return;
2548
[0f49c0e]2549        /* Take DMA Channel lock */
[0d31dcc]2550        if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
[0f49c0e]2551            != RTEMS_SUCCESSFUL)
2552                return;
[0d31dcc]2553        if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2554            != RTEMS_SUCCESSFUL) {
2555                rtems_semaphore_release(dma->sem_rxdma);
2556                return;
2557        }
[0f49c0e]2558
2559        grspw_dma_stop_locked(dma);
2560
[0d31dcc]2561        rtems_semaphore_release(dma->sem_txdma);
2562        rtems_semaphore_release(dma->sem_rxdma);
[0f49c0e]2563}
2564
2565/* Do general work, invoked indirectly from ISR */
2566static void grspw_work_shutdown_func(struct grspw_priv *priv)
2567{
2568        int i;
2569
2570        /* Link is down for some reason, and the user has configured
[9cb7e5d]2571         * that we stop all (open) DMA channels and throw out all their
2572         * blocked threads.
[0f49c0e]2573         */
2574        for (i=0; i<priv->hwsup.ndma_chans; i++)
2575                grspw_dma_stop(&priv->dma[i]);
2576        grspw_hw_stop(priv);
2577}
2578
2579/* Do DMA work on one channel, invoked indirectly from ISR */
[ab9b447]2580static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
[0f49c0e]2581{
[72ec13ef]2582        int tx_cond_true, rx_cond_true, rxtx;
[0f49c0e]2583
[eb5a42f6]2584        /* If DMA channel is closed we should not access the semaphore */
2585        if (dma->open == 0)
2586                return;
2587
[0f49c0e]2588        dma->stats.irq_cnt++;
2589
2590        /* Look at cause we were woken up and clear source */
[72ec13ef]2591        rxtx = 0;
2592        if (msg & WORK_DMA_RX_MASK)
2593                rxtx |= 1;
2594        if (msg & WORK_DMA_TX_MASK)
2595                rxtx |= 2;
2596        switch (grspw_dma_enable_int(dma, rxtx, 0)) {
2597        case 1:
2598                /* DMA stopped */
[0d31dcc]2599                return;
[72ec13ef]2600        case 2:
[0f49c0e]2601                /* DMA error -> Stop DMA channel (both RX and TX) */
[ab9b447]2602                if (msg & WORK_DMA_ER_MASK) {
2603                        /* DMA error and user wants work-task to handle error */
2604                        grspw_dma_stop(dma);
2605                        grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
2606                }
[72ec13ef]2607                return;
2608        default:
2609                break;
2610        }
[94fb377b]2611        if (msg == 0)
2612                return;
[72ec13ef]2613
2614        rx_cond_true = 0;
2615        tx_cond_true = 0;
2616
[94fb377b]2617        if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
2618                /* In case both interrupt sources are disabled simultaneously
2619                 * by the ISR the re-enabling of the interrupt source must also
2620                 * do so to avoid missing interrupts. Both RX and TX process
2621                 * will be forced.
2622                 */
2623                msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
2624        }
2625
[72ec13ef]2626        if (msg & WORK_DMA_RX_MASK) {
2627                /* Do RX Work */
2628
2629                /* Take DMA channel RX lock */
2630                if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2631                    != RTEMS_SUCCESSFUL)
2632                        return;
2633
2634                dma->stats.rx_work_cnt++;
2635                grspw_rx_process_scheduled(dma);
2636                if (dma->started) {
2637                        dma->stats.rx_work_enabled +=
2638                                grspw_rx_schedule_ready(dma);
2639                        /* Check to see if condition for waking blocked
2640                         * USER task is fullfilled.
2641                         */
2642                        if (dma->rx_wait.waiting)
2643                                rx_cond_true = grspw_rx_wait_eval(dma);
[0f49c0e]2644                }
[72ec13ef]2645                rtems_semaphore_release(dma->sem_rxdma);
2646        }
2647
2648        if (msg & WORK_DMA_TX_MASK) {
2649                /* Do TX Work */
2650
2651                /* Take DMA channel TX lock */
2652                if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2653                    != RTEMS_SUCCESSFUL)
2654                        return;
2655
2656                dma->stats.tx_work_cnt++;
2657                grspw_tx_process_scheduled(dma);
2658                if (dma->started) {
2659                        dma->stats.tx_work_enabled +=
2660                                grspw_tx_schedule_send(dma);
2661                        /* Check to see if condition for waking blocked
2662                         * USER task is fullfilled.
2663                         */
2664                        if (dma->tx_wait.waiting)
2665                                tx_cond_true = grspw_tx_wait_eval(dma);
[0f49c0e]2666                }
[72ec13ef]2667                rtems_semaphore_release(dma->sem_txdma);
2668        }
[0f49c0e]2669
2670        if (rx_cond_true)
2671                rtems_semaphore_release(dma->rx_wait.sem_wait);
2672
2673        if (tx_cond_true)
2674                rtems_semaphore_release(dma->tx_wait.sem_wait);
2675}
2676
2677/* Work task is receiving work for the work message queue posted from
2678 * the ISR.
2679 */
[ab9b447]2680void grspw_work_func(rtems_id msgQ)
[0f49c0e]2681{
[ab9b447]2682        unsigned int message = 0, msg;
[0f49c0e]2683        size_t size;
2684        struct grspw_priv *priv;
2685        int i;
2686
[ab9b447]2687        /* Wait for ISR to schedule work */
2688        while (rtems_message_queue_receive(msgQ, &message, &size,
2689               RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
2690                if (message & WORK_QUIT_TASK)
[0f49c0e]2691                        break;
2692
2693                /* Handle work */
2694                priv = priv_tab[message >> WORK_CORE_BIT];
[ab9b447]2695                if (message & WORK_SHUTDOWN) {
[0f49c0e]2696                        grspw_work_shutdown_func(priv);
[ab9b447]2697                               
2698                        grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
2699                } else if (message & WORK_DMA_MASK) {
2700                        for (i = 0; i < priv->hwsup.ndma_chans; i++) {
2701                                msg = message &
2702                                      (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
2703                                if (msg)
2704                                        grspw_work_dma_func(&priv->dma[i], msg);
[0f49c0e]2705                        }
2706                }
[ab9b447]2707                message = 0;
[0f49c0e]2708        }
[ab9b447]2709
2710        if (message & WORK_FREE_MSGQ)
2711                rtems_message_queue_delete(msgQ);
2712
2713        grspw_work_event(WORKTASK_EV_QUIT, message);
[f004b2b8]2714        rtems_task_exit();
[0f49c0e]2715}
2716
2717STATIC void grspw_isr(void *data)
2718{
2719        struct grspw_priv *priv = data;
[ab9b447]2720        unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
[56fc7809]2721        unsigned int rxirq, rxack, intto;
[ab9b447]2722        int i, handled = 0, call_user_int_isr;
[94fb377b]2723        unsigned int message = WORK_NONE, dma_en;
[fec8288]2724        SPIN_ISR_IRQFLAGS(irqflags);
[0f49c0e]2725
2726        /* Get Status from Hardware */
2727        stat = REG_READ(&priv->regs->status);
[a7cc0da9]2728        stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2729                        (GRSPW_STS_TO | priv->stscfg);
[0f49c0e]2730
2731        /* Make sure to put the timecode handling first in order to get the
2732         * smallest possible interrupt latency
2733         */
2734        if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
[56fc7809]2735                ctrl = REG_READ(&priv->regs->ctrl);
2736                if (ctrl & GRSPW_CTRL_TQ) {
2737                        /* Timecode received. Let custom function handle this */
2738                        timecode = REG_READ(&priv->regs->time) &
2739                                        (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2740                        (priv->tcisr)(priv->tcisr_arg, timecode);
2741                }
2742        }
2743
2744        /* Get Interrupt status from hardware */
2745        icctrl = REG_READ(&priv->regs->icctrl);
2746        if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2747                call_user_int_isr = 0;
2748                rxirq = rxack = intto = 0;
2749
2750                if ((icctrl & GRSPW_ICCTRL_IQ) &&
2751                    (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2752                        call_user_int_isr = 1;
2753
2754                if ((icctrl & GRSPW_ICCTRL_AQ) &&
2755                    (rxack = REG_READ(&priv->regs->icack)) != 0)
2756                        call_user_int_isr = 1;
2757
2758                if ((icctrl & GRSPW_ICCTRL_TQ) &&
2759                    (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2760                        call_user_int_isr = 1;                 
2761
2762                /* Let custom functions handle this POTENTIAL SPW interrupt. The
2763                 * user function is called even if no such IRQ has happened!
2764                 * User must make sure to clear all interrupts that have been
2765                 * handled from the three registers by writing a one.
2766                 */
2767                if (call_user_int_isr)
2768                        priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
[0f49c0e]2769        }
2770
2771        /* An Error occured? */
2772        if (stat & GRSPW_STAT_ERROR) {
2773                /* Wake Global WorkQ */
2774                handled = 1;
2775
2776                if (stat & GRSPW_STS_EE)
2777                        priv->stats.err_eeop++;
2778
2779                if (stat & GRSPW_STS_IA)
2780                        priv->stats.err_addr++;
2781
2782                if (stat & GRSPW_STS_PE)
2783                        priv->stats.err_parity++;
2784
[ac7da5bc]2785                if (stat & GRSPW_STS_DE)
2786                        priv->stats.err_disconnect++;
2787
[0f49c0e]2788                if (stat & GRSPW_STS_ER)
2789                        priv->stats.err_escape++;
2790
2791                if (stat & GRSPW_STS_CE)
2792                        priv->stats.err_credit++;
2793
2794                if (stat & GRSPW_STS_WE)
2795                        priv->stats.err_wsync++;
2796
[090016a]2797                if (((priv->dis_link_on_err >> 16) & stat) &&
2798                    (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
[0f49c0e]2799                        /* Disable the link, no more transfers are expected
2800                         * on any DMA channel.
2801                         */
2802                        SPIN_LOCK(&priv->devlock, irqflags);
2803                        ctrl = REG_READ(&priv->regs->ctrl);
2804                        REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2805                                (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2806                        SPIN_UNLOCK(&priv->devlock, irqflags);
2807                        /* Signal to work-thread to stop DMA and clean up */
2808                        message = WORK_SHUTDOWN;
2809                }
2810        }
2811
2812        /* Clear Status Flags */
2813        if (stat_clrmsk) {
2814                handled = 1;
2815                REG_WRITE(&priv->regs->status, stat_clrmsk);
2816        }
2817
2818        /* A DMA transfer or Error occured? In that case disable more IRQs
2819         * from the DMA channel, then invoke the workQ.
2820         *
2821         * Also the GI interrupt flag may not be available for older
2822         * designs where (was added together with mutiple DMA channels).
2823         */
2824        SPIN_LOCK(&priv->devlock, irqflags);
2825        for (i=0; i<priv->hwsup.ndma_chans; i++) {
2826                dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2827                /* Check for Errors and if Packets been sent or received if
2828                 * respective IRQ are enabled
2829                 */
[ab9b447]2830                irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2831                        | GRSPW_DMA_STATUS_ERROR) & dma_stat;
2832                if (!irqs)
2833                        continue;
2834
2835                handled = 1;
2836
2837                /* DMA error has priority, if error happens it is assumed that
2838                 * the common work-queue stops the DMA operation for that
2839                 * channel and makes the DMA tasks exit from their waiting
2840                 * functions (both RX and TX tasks).
[94fb377b]2841                 *
2842                 * Disable Further IRQs (until enabled again)
2843                 * from this DMA channel. Let the status
2844                 * bit remain so that they can be handled by
2845                 * work function.
[ab9b447]2846                 */
2847                if (irqs & GRSPW_DMA_STATUS_ERROR) {
[94fb377b]2848                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2849                                ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2850                                  GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2851                                  GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2852                                  GRSPW_DMACTRL_AT));
[ab9b447]2853                        message |= WORK_DMA_ER(i);
2854                } else {
[94fb377b]2855                        /* determine if RX/TX interrupt source(s) shall remain
2856                         * enabled.
2857                         */
2858                        if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
2859                                dma_en = ~irqs >> 3;
2860                        } else {
2861                                dma_en = priv->dma[i].cfg.flags >>
2862                                 (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
2863                        }
2864                        dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
2865                        REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2866                                (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2867                                   GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2868                                   GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2869                                   GRSPW_DMACTRL_AT) | dma_en));
[ab9b447]2870                        message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
[0f49c0e]2871                }
2872        }
2873        SPIN_UNLOCK(&priv->devlock, irqflags);
2874
2875        if (handled != 0)
2876                priv->stats.irq_cnt++;
2877
2878        /* Schedule work by sending message to work thread */
[ab9b447]2879        if (message != WORK_NONE && priv->wc.msgisr) {
2880                int status;
[0f49c0e]2881                message |= WORK_CORE(priv->index);
[ab9b447]2882                /* func interface compatible with msgQSend() on purpose, but
2883                 * at the same time the user can assign a custom function to
2884                 * handle DMA RX/TX operations as indicated by the "message"
2885                 * and clear the handled bits before given to msgQSend().
2886                 */
2887                status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
2888                if (status != RTEMS_SUCCESSFUL) {
[0f49c0e]2889                        printk("grspw_isr(%d): message fail %d (0x%x)\n",
[ab9b447]2890                                priv->index, status, message);
2891                }
[0f49c0e]2892        }
2893}
2894
2895STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2896{
2897        unsigned int ctrl;
2898        struct grspw_dma_regs *dregs = dma->regs;
2899
2900        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2901               GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2902        ctrl |= GRSPW_DMACTRL_AT;
2903        REG_WRITE(&dregs->ctrl, ctrl);
2904}
2905
2906STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2907{
2908        unsigned int ctrl;
2909        struct grspw_dma_regs *dregs = dma->regs;
2910
2911        ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2912        REG_WRITE(&dregs->ctrl, ctrl);
2913
2914        REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2915        REG_WRITE(&dregs->txdesc, 0);
2916        REG_WRITE(&dregs->rxdesc, 0);
2917}
2918
2919/* Hardware Action:
2920 *  - stop DMA
2921 *  - do not bring down the link (RMAP may be active)
2922 *  - RMAP settings untouched (RMAP may be active)
2923 *  - port select untouched (RMAP may be active)
2924 *  - timecodes are disabled
2925 *  - IRQ generation disabled
2926 *  - status not cleared (let user analyze it if requested later on)
2927 *  - Node address / First DMA channels Node address
2928 *    is untouched (RMAP may be active)
2929 */
2930STATIC void grspw_hw_stop(struct grspw_priv *priv)
2931{
2932        int i;
2933        unsigned int ctrl;
[fec8288]2934        SPIN_IRQFLAGS(irqflags);
[0f49c0e]2935
2936        SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2937
2938        for (i=0; i<priv->hwsup.ndma_chans; i++)
2939                grspw_hw_dma_stop(&priv->dma[i]);
2940
2941        ctrl = REG_READ(&priv->regs->ctrl);
2942        REG_WRITE(&priv->regs->ctrl, ctrl & (
2943                GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2944                GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2945                GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2946
2947        SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2948}
2949
2950/* Soft reset of GRSPW core registers */
2951STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2952{
2953        int i;
[56fc7809]2954        unsigned int tmp;
[0f49c0e]2955
2956        for (i=0; i<priv->hwsup.ndma_chans; i++)
2957                grspw_hw_dma_softreset(&priv->dma[i]);
2958
2959        REG_WRITE(&priv->regs->status, 0xffffffff);
2960        REG_WRITE(&priv->regs->time, 0);
[56fc7809]2961        /* Clear all but valuable reset values of ICCTRL */
2962        tmp = REG_READ(&priv->regs->icctrl);
2963        tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2964        tmp |= GRSPW_ICCTRL_ID;
2965        REG_WRITE(&priv->regs->icctrl, tmp);
2966        REG_WRITE(&priv->regs->icrx, 0xffffffff);
2967        REG_WRITE(&priv->regs->icack, 0xffffffff);
2968        REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
[0f49c0e]2969}
2970
2971int grspw_dev_count(void)
2972{
2973        return grspw_count;
2974}
2975
2976void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2977{
2978        int i;
2979        struct grspw_priv *priv;
2980
2981        /* Set new Device Found Handler */
2982        grspw_dev_add = devfound;
2983        grspw_dev_del = devremove;
2984
2985        if (grspw_initialized == 1 && grspw_dev_add) {
2986                /* Call callback for every previously found device */
2987                for (i=0; i<grspw_count; i++) {
2988                        priv = priv_tab[i];
2989                        if (priv)
2990                                priv->data = grspw_dev_add(i);
2991                }
2992        }
2993}
2994
[1b559e3]2995/* Get a value at least 6.4us in number of clock cycles */
2996static unsigned int grspw1_calc_timer64(int freq_khz)
2997{
2998        unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
2999        return timer64 & 0xfff;
3000}
3001
3002/* Get a value at least 850ns in number of clock cycles - 3 */
3003static unsigned int grspw1_calc_discon(int freq_khz)
3004{
3005        unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
3006        return discon & 0x3ff;
3007}
3008
[0f49c0e]3009/******************* Driver manager interface ***********************/
3010
3011/* Driver prototypes */
3012static int grspw_common_init(void);
3013static int grspw2_init3(struct drvmgr_dev *dev);
3014
3015static struct drvmgr_drv_ops grspw2_ops =
3016{
3017        .init = {NULL,  NULL, grspw2_init3, NULL},
3018        .remove = NULL,
3019        .info = NULL
3020};
3021
3022static struct amba_dev_id grspw2_ids[] =
3023{
3024        {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
3025        {VENDOR_GAISLER, GAISLER_SPW2},
3026        {VENDOR_GAISLER, GAISLER_SPW2_DMA},
3027        {0, 0}          /* Mark end of table */
3028};
3029
3030static struct amba_drv_info grspw2_drv_info =
3031{
3032        {
3033                DRVMGR_OBJ_DRV,                 /* Driver */
3034                NULL,                           /* Next driver */
3035                NULL,                           /* Device list */
3036                DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
3037                "GRSPW_PKT_DRV",                /* Driver Name */
3038                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
3039                &grspw2_ops,
3040                NULL,                           /* Funcs */
3041                0,                              /* No devices yet */
3042                sizeof(struct grspw_priv),      /* Let DrvMgr alloc priv */
3043        },
3044        &grspw2_ids[0]
3045};
3046
3047void grspw2_register_drv (void)
3048{
3049        GRSPW_DBG("Registering GRSPW2 packet driver\n");
3050        drvmgr_drv_register(&grspw2_drv_info.general);
3051}
3052
3053static int grspw2_init3(struct drvmgr_dev *dev)
3054{
3055        struct grspw_priv *priv;
3056        struct amba_dev_info *ambadev;
3057        struct ambapp_core *pnpinfo;
[11f3b9a]3058        int i;
[56fc7809]3059        unsigned int ctrl, icctrl, numi;
[0f49c0e]3060        union drvmgr_key_value *value;
3061
3062        GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
3063                dev->parent->dev->name);
3064
[49caf22]3065        if (grspw_count >= GRSPW_MAX)
[0f49c0e]3066                return DRVMGR_ENORES;
3067
3068        priv = dev->priv;
3069        if (priv == NULL)
3070                return DRVMGR_NOMEM;
3071        priv->dev = dev;
3072
3073        /* If first device init common part of driver */
3074        if (grspw_common_init())
3075                return DRVMGR_FAIL;
3076
3077        /*** Now we take care of device initialization ***/
3078
3079        /* Get device information from AMBA PnP information */
3080        ambadev = (struct amba_dev_info *)dev->businfo;
3081        if (ambadev == NULL)
3082                return -1;
3083        pnpinfo = &ambadev->info;
3084        priv->irq = pnpinfo->irq;
3085        priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
3086
3087        /* Read Hardware Support from Control Register */
3088        ctrl = REG_READ(&priv->regs->ctrl);
3089        priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
3090        priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
[c2011b47]3091        priv->hwsup.ccsds_crc = (ctrl & GRSPW_CTRL_CC) >> GRSPW_CTRL_CC_BIT;
[0f49c0e]3092        priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
3093        priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
3094        priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
[56fc7809]3095        priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
3096        icctrl = REG_READ(&priv->regs->icctrl);
3097        numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
3098        if (numi > 0)
3099                priv->hwsup.irq_num = 1 << (numi - 1);
3100        else
3101                priv->hwsup.irq_num = 0;
[0f49c0e]3102
3103        /* Construct hardware version identification */
3104        priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->ver;
3105
3106        if ((pnpinfo->device == GAISLER_SPW2) ||
3107            (pnpinfo->device == GAISLER_SPW2_DMA)) {
3108                priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3109                priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3110        } else {
[1b559e3]3111                unsigned int apb_hz, apb_khz;
3112
[0f49c0e]3113                /* Autodetect GRSPW1 features? */
3114                priv->hwsup.strip_adr = 0;
3115                priv->hwsup.strip_pid = 0;
[1b559e3]3116
3117                drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
3118                apb_khz = apb_hz / 1000;
3119
3120                REG_WRITE(&priv->regs->timer,
3121                        ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
3122                        (grspw1_calc_timer64(apb_khz) & 0xFFF));
[0f49c0e]3123        }
3124
[56fc7809]3125        /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3126         * width... so only the first is probed, if no timer result will be
3127         * zero.
3128         */
3129        REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3130        ctrl = REG_READ(&priv->regs->icrlpresc);
3131        REG_WRITE(&priv->regs->icrlpresc, 0);
3132        priv->hwsup.itmr_width = 0;
3133        while (ctrl & 1) {
3134                priv->hwsup.itmr_width++;
3135                ctrl = ctrl >> 1;
3136        }
3137
[0f49c0e]3138        /* Let user limit the number of DMA channels on this core to save
3139         * space. Only the first nDMA channels will be available.
3140         */
[4d3e70f4]3141        value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
[0f49c0e]3142        if (value && (value->i < priv->hwsup.ndma_chans))
3143                priv->hwsup.ndma_chans = value->i;
3144
3145        /* Allocate and init Memory for all DMA channels */
[11f3b9a]3146        priv->dma = grlib_calloc(priv->hwsup.ndma_chans, sizeof(*priv->dma));
[0f49c0e]3147        if (priv->dma == NULL)
3148                return DRVMGR_NOMEM;
3149        for (i=0; i<priv->hwsup.ndma_chans; i++) {
3150                priv->dma[i].core = priv;
3151                priv->dma[i].index = i;
3152                priv->dma[i].regs = &priv->regs->dma[i];
3153        }
3154
3155        /* Startup Action:
3156         *  - stop DMA
3157         *  - do not bring down the link (RMAP may be active)
3158         *  - RMAP settings untouched (RMAP may be active)
3159         *  - port select untouched (RMAP may be active)
3160         *  - timecodes are diabled
3161         *  - IRQ generation disabled
3162         *  - status cleared
3163         *  - Node address / First DMA channels Node address
3164         *    is untouched (RMAP may be active)
3165         */
3166        grspw_hw_stop(priv);
3167        grspw_hw_softreset(priv);
3168
3169        /* Register character device in registered region */
3170        priv->index = grspw_count;
3171        priv_tab[priv->index] = priv;
3172        grspw_count++;
3173
3174        /* Device name */
3175        sprintf(priv->devname, "grspw%d", priv->index);
3176
3177        /* Tell above layer about new device */
3178        if (grspw_dev_add)
3179                priv->data = grspw_dev_add(priv->index);
3180
3181        return DRVMGR_OK;
3182}
3183
3184/******************* Driver Implementation ***********************/
[ab9b447]3185/* Creates a MsgQ (optional) and spawns a worker task associated with the
3186 * message Q. The task can also be associated with a custom msgQ if *msgQ.
3187 * is non-zero.
3188 */
3189rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
3190{
3191        rtems_id tid;
3192        int created_msgq = 0;
[8acfa94]3193        static char work_name = 'A';
[ab9b447]3194
3195        if (pMsgQ == NULL)
3196                return OBJECTS_ID_NONE;
3197
3198        if (*pMsgQ == OBJECTS_ID_NONE) {
3199                if (msgMax <= 0)
3200                        msgMax = 32;
3201
3202                if (rtems_message_queue_create(
[8acfa94]3203                        rtems_build_name('S', 'G', 'Q', work_name),
[ab9b447]3204                        msgMax, 4, RTEMS_FIFO, pMsgQ) !=
3205                        RTEMS_SUCCESSFUL)
3206                        return OBJECTS_ID_NONE;
3207                created_msgq = 1;
3208        }
3209
3210        if (prio < 0)
3211                prio = grspw_work_task_priority; /* default prio */
3212        if (stack < 0x800)
3213                stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
3214
[8acfa94]3215        if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
[ab9b447]3216                prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
3217                RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
3218                tid = OBJECTS_ID_NONE;
3219        else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
3220                    RTEMS_SUCCESSFUL) {
3221                rtems_task_delete(tid);
3222                tid = OBJECTS_ID_NONE;
3223        }
3224
3225        if (tid == OBJECTS_ID_NONE && created_msgq) {
3226                rtems_message_queue_delete(*pMsgQ);
3227                *pMsgQ = OBJECTS_ID_NONE;
[8acfa94]3228        } else {
3229                if (++work_name > 'Z')
3230                        work_name = 'A';
[ab9b447]3231        }
3232        return tid;
3233}
3234
3235/* Free task associated with message queue and optionally also the message
3236 * queue itself. The message queue is deleted by the work task and is therefore
3237 * delayed until it the work task resumes its execution.
3238 */
3239rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
3240{
3241        int msg = WORK_QUIT_TASK;
3242        if (freeMsgQ)
3243                msg |= WORK_FREE_MSGQ;
3244        return rtems_message_queue_send(msgQ, &msg, 4);
3245}
3246
3247void grspw_work_cfg(void *d, struct grspw_work_config *wc)
3248{
3249        struct grspw_priv *priv = (struct grspw_priv *)d;
3250
3251        if (wc == NULL)
3252                wc = &grspw_wc_def; /* use default config */
3253        priv->wc = *wc;
3254}
[0f49c0e]3255
[fec8288]3256#ifdef RTEMS_SMP
3257int grspw_isr_affinity(void *d, const cpu_set_t *cpus)
3258{
3259        return -1; /* BSP support only static configured IRQ affinity */
3260}
3261#endif
3262
[0f49c0e]3263static int grspw_common_init(void)
3264{
3265        if (grspw_initialized == 1)
3266                return 0;
3267        if (grspw_initialized == -1)
3268                return -1;
3269        grspw_initialized = -1;
3270
3271        /* Device Semaphore created with count = 1 */
3272        if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3273            RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3274            RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3275            RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3276                return -1;
3277
3278        /* Work queue, Work thread. Not created if user disables it.
3279         * user can disable it when interrupt is not used to save resources
3280         */
3281        if (grspw_work_task_priority != -1) {
[ab9b447]3282                grspw_work_task = grspw_work_spawn(-1, 0,
3283                        (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
3284                if (grspw_work_task == OBJECTS_ID_NONE)
3285                        return -2;
3286                grspw_wc_def.msgisr =
3287                        (grspw_msgqisr_t) rtems_message_queue_send;
3288        } else {
3289                grspw_wc_def.msgisr = NULL;
3290                grspw_wc_def.msgisr_arg = NULL;
3291        }
[0f49c0e]3292
3293        grspw_initialized = 1;
3294        return 0;
3295}
Note: See TracBrowser for help on using the repository browser.