source: rtems/c/src/lib/libbsp/sparc/shared/tmtc/grtm.c @ c92f402c

5
Last change on this file since c92f402c was c92f402c, checked in by Daniel Hellstrom <daniel@…>, on 05/09/17 at 11:13:44

leon, grtm: SMP support by using spin-locks

  • Property mode set to 100644
File size: 44.4 KB
Line 
1/* GRTM CCSDS Telemetry Encoder driver
2 *
3 * COPYRIGHT (c) 2007.
4 * Cobham Gaisler AB.
5 *
6 * The license and distribution terms for this file may be
7 * found in the file LICENSE in this distribution or at
8 * http://www.rtems.org/license/LICENSE.
9 */
10
11#include <bsp.h>
12#include <rtems/libio.h>
13#include <stdlib.h>
14#include <stdio.h>
15#include <string.h>
16#include <assert.h>
17#include <ctype.h>
18#include <malloc.h>
19#include <rtems/bspIo.h>
20
21#include <drvmgr/drvmgr.h>
22#include <ambapp.h>
23#include <drvmgr/ambapp_bus.h>
24#include <bsp/grtm.h>
25
26/* map via rtems_interrupt_lock_* API: */
27#define SPIN_DECLARE(lock) RTEMS_INTERRUPT_LOCK_MEMBER(lock)
28#define SPIN_INIT(lock, name) rtems_interrupt_lock_initialize(lock, name)
29#define SPIN_LOCK(lock, level) rtems_interrupt_lock_acquire_isr(lock, &level)
30#define SPIN_LOCK_IRQ(lock, level) rtems_interrupt_lock_acquire(lock, &level)
31#define SPIN_UNLOCK(lock, level) rtems_interrupt_lock_release_isr(lock, &level)
32#define SPIN_UNLOCK_IRQ(lock, level) rtems_interrupt_lock_release(lock, &level)
33#define SPIN_IRQFLAGS(k) rtems_interrupt_lock_context k
34#define SPIN_ISR_IRQFLAGS(k) SPIN_IRQFLAGS(k)
35
36/*
37#define DEBUG
38#define DEBUGFUNCS
39*/
40
41#include <bsp/debug_defs.h>
42
43/* GRTM register map */
44struct grtm_regs {
45        volatile unsigned int   dma_ctrl;       /* DMA Control Register (0x00) */
46        volatile unsigned int   dma_status;     /* DMA Status Register (0x04) */
47        volatile unsigned int   dma_len;        /* DMA Length Register (0x08) */       
48        volatile unsigned int   dma_bd;         /* DMA Descriptor Pointer Register (0x0c) */
49
50        volatile unsigned int   dma_cfg;        /* DMA Configuration Register (0x10) */
51        volatile unsigned int   revision;       /* GRTM Revision Register (0x14) */
52
53        int unused0[(0x80-0x18)/4];
54
55        volatile unsigned int   ctrl;           /* TM Control Register (0x80) */
56        volatile unsigned int   status;         /* TM Status Register (0x84) */
57        volatile unsigned int   cfg;            /* TM Configuration Register (0x88) */
58        volatile unsigned int   size;           /* TM Size Register (0x8c) */
59
60        volatile unsigned int   phy;            /* TM Physical Layer Register (0x90) */
61        volatile unsigned int   code;           /* TM Coding Sub-Layer Register (0x94) */
62        volatile unsigned int   asmr;           /* TM Attached Synchronization Marker Register (0x98) */
63
64        int unused1;
65
66        volatile unsigned int   all_frm;        /* TM All Frames Generation Register (0xa0) */
67        volatile unsigned int   mst_frm;        /* TM Master Channel Frame Generation Register (0xa4) */
68        volatile unsigned int   idle_frm;       /* TM Idle Frame Generation Register (0xa8) */
69
70        int unused2[(0xc0-0xac)/4];
71
72        volatile unsigned int   fsh[4];         /* TM FSH/Insert Zone Registers (0xc0..0xcc) */
73
74        volatile unsigned int   ocf;            /* TM Operational Control Field Register (0xd0) */
75};
76
77/* DMA Control Register (0x00) */
78#define GRTM_DMA_CTRL_EN_BIT    0
79#define GRTM_DMA_CTRL_IE_BIT    1
80#define GRTM_DMA_CTRL_TXRST_BIT 2
81#define GRTM_DMA_CTRL_RST_BIT   3
82#define GRTM_DMA_CTRL_TFIE_BIT  4
83
84#define GRTM_DMA_CTRL_EN        (1<<GRTM_DMA_CTRL_EN_BIT)
85#define GRTM_DMA_CTRL_IE        (1<<GRTM_DMA_CTRL_IE_BIT)
86#define GRTM_DMA_CTRL_TXRST     (1<<GRTM_DMA_CTRL_TXRST_BIT)
87#define GRTM_DMA_CTRL_RST       (1<<GRTM_DMA_CTRL_RST_BIT)
88#define GRTM_DMA_CTRL_TFIE      (1<<GRTM_DMA_CTRL_TFIE_BIT)
89
90/* DMA Status Register (0x04) */
91#define GRTM_DMA_STS_TE_BIT     0
92#define GRTM_DMA_STS_TI_BIT     1
93#define GRTM_DMA_STS_TA_BIT     2
94#define GRTM_DMA_STS_TFF_BIT    3
95#define GRTM_DMA_STS_TFS_BIT    4
96
97#define GRTM_DMA_STS_TE         (1<<GRTM_DMA_STS_TE_BIT)
98#define GRTM_DMA_STS_TI         (1<<GRTM_DMA_STS_TI_BIT)
99#define GRTM_DMA_STS_TA         (1<<GRTM_DMA_STS_TA_BIT)
100#define GRTM_DMA_STS_TFF        (1<<GRTM_DMA_STS_TFF_BIT)
101#define GRTM_DMA_STS_TFS        (1<<GRTM_DMA_STS_TFS_BIT)
102#define GRTM_DMA_STS_ALL        0x1f
103
104/* DMA Length Register (0x08) */
105#define GRTM_DMA_LEN_LEN_BIT    0
106#define GRTM_DMA_LEN_LIM_BIT    16
107
108#define GRTM_DMA_LEN_LEN        (0x7ff<<GRTM_DMA_LEN_LEN_BIT)
109#define GRTM_DMA_LEN_LIM        (0x3ff<<GRTM_DMA_LEN_LIM_BIT)
110
111/* DMA Descriptor Pointer Register (0x0c) */
112#define GRTM_DMA_BD_INDEX_BIT   0
113#define GRTM_DMA_BD_BASE_BIT    10
114
115#define GRTM_DMA_BD_INDEX       (0x3ff<<GRTM_DMA_BD_INDEX_BIT)
116#define GRTM_DMA_BD_BASE        (0xfffffc<<GRTM_DMA_BD_BASE_BIT)
117
118/* DMA Configuration Register (0x10) */
119#define GRTM_DMA_CFG_BLKSZ_BIT  0
120#define GRTM_DMA_CFG_FIFOSZ_BIT 16
121
122#define GRTM_DMA_CFG_BLKSZ      (0xffff<<GRTM_DMA_CFG_BLKSZ_BIT)
123#define GRTM_DMA_CFG_FIFOSZ     (0xffff<<GRTM_DMA_CFG_FIFOSZ_BIT)
124
125/* TM Control Register (0x80) */
126#define GRTM_CTRL_EN_BIT        0
127
128#define GRTM_CTRL_EN            (1<<GRTM_CTRL_EN_BIT)
129
130/* TM Status Register (0x84) - Unused */
131
132/* TM Configuration Register (0x88) */
133#define GRTM_CFG_SC_BIT         0
134#define GRTM_CFG_SP_BIT         1
135#define GRTM_CFG_CE_BIT         2
136#define GRTM_CFG_NRZ_BIT        3
137#define GRTM_CFG_PSR_BIT        4
138#define GRTM_CFG_TE_BIT         5
139#define GRTM_CFG_RSDEP_BIT      6
140#define GRTM_CFG_RS_BIT         9
141#define GRTM_CFG_AASM_BIT       11
142#define GRTM_CFG_FECF_BIT       12
143#define GRTM_CFG_OCF_BIT        13
144#define GRTM_CFG_EVC_BIT        14
145#define GRTM_CFG_IDLE_BIT       15
146#define GRTM_CFG_FSH_BIT        16
147#define GRTM_CFG_MCG_BIT        17
148#define GRTM_CFG_IZ_BIT         18
149#define GRTM_CFG_FHEC_BIT       19
150#define GRTM_CFG_AOS_BIT        20
151#define GRTM_CFG_CIF_BIT        21
152#define GRTM_CFG_OCFB_BIT       22
153
154#define GRTM_CFG_SC             (1<<GRTM_CFG_SC_BIT)
155#define GRTM_CFG_SP             (1<<GRTM_CFG_SP_BIT)
156#define GRTM_CFG_CE             (1<<GRTM_CFG_CE_BIT)
157#define GRTM_CFG_NRZ            (1<<GRTM_CFG_NRZ_BIT)
158#define GRTM_CFG_PSR            (1<<GRTM_CFG_PSR_BIT)
159#define GRTM_CFG_TE             (1<<GRTM_CFG_TE_BIT)
160#define GRTM_CFG_RSDEP          (0x7<<GRTM_CFG_RSDEP_BIT)
161#define GRTM_CFG_RS             (0x3<<GRTM_CFG_RS_BIT)
162#define GRTM_CFG_AASM           (1<<GRTM_CFG_AASM_BIT)
163#define GRTM_CFG_FECF           (1<<GRTM_CFG_FECF_BIT)
164#define GRTM_CFG_OCF            (1<<GRTM_CFG_OCF_BIT)
165#define GRTM_CFG_EVC            (1<<GRTM_CFG_EVC_BIT)
166#define GRTM_CFG_IDLE           (1<<GRTM_CFG_IDLE_BIT)
167#define GRTM_CFG_FSH            (1<<GRTM_CFG_FSH_BIT)
168#define GRTM_CFG_MCG            (1<<GRTM_CFG_MCG_BIT)
169#define GRTM_CFG_IZ             (1<<GRTM_CFG_IZ_BIT)
170#define GRTM_CFG_FHEC           (1<<GRTM_CFG_FHEC_BIT)
171#define GRTM_CFG_AOS            (1<<GRTM_CFG_AOS_BIT)
172#define GRTM_CFG_CIF            (1<<GRTM_CFG_CIF_BIT)
173#define GRTM_CFG_OCFB           (1<<GRTM_CFG_OCFB_BIT)
174
175/* TM Size Register (0x8c) */
176#define GRTM_SIZE_BLKSZ_BIT     0
177#define GRTM_SIZE_FIFOSZ_BIT    8
178#define GRTM_SIZE_LEN_BIT       20
179
180#define GRTM_SIZE_BLKSZ         (0xff<<GRTM_SIZE_BLKSZ_BIT)
181#define GRTM_SIZE_FIFOSZ        (0xfff<<GRTM_SIZE_FIFOSZ_BIT)
182#define GRTM_SIZE_LEN           (0xfff<<GRTM_SIZE_LEN_BIT)
183
184/* TM Physical Layer Register (0x90) */
185#define GRTM_PHY_SUB_BIT        0
186#define GRTM_PHY_SCF_BIT        15
187#define GRTM_PHY_SYM_BIT        16
188#define GRTM_PHY_SF_BIT         31
189
190#define GRTM_PHY_SUB            (0x7fff<<GRTM_PHY_SUB_BIT)
191#define GRTM_PHY_SCF            (1<<GRTM_PHY_SCF_BIT)
192#define GRTM_PHY_SYM            (0x7fff<<GRTM_PHY_SYM_BIT)
193#define GRTM_PHY_SF             (1<<GRTM_PHY_SF_BIT)
194
195/* TM Coding Sub-Layer Register (0x94) */
196#define GRTM_CODE_SC_BIT        0
197#define GRTM_CODE_SP_BIT        1
198#define GRTM_CODE_CERATE_BIT    2
199#define GRTM_CODE_CE_BIT        5
200#define GRTM_CODE_NRZ_BIT       6
201#define GRTM_CODE_PSR_BIT       7
202#define GRTM_CODE_RS8_BIT       11
203#define GRTM_CODE_RSDEP_BIT     12
204#define GRTM_CODE_RS_BIT        15
205#define GRTM_CODE_AASM_BIT      16
206#define GRTM_CODE_CSEL_BIT      17
207
208#define GRTM_CODE_SC            (1<<GRTM_CODE_SC_BIT)
209#define GRTM_CODE_SP            (1<<GRTM_CODE_SP_BIT)
210#define GRTM_CODE_CERATE        (0x7<<GRTM_CODE_CERATE_BIT)
211#define GRTM_CODE_CE            (1<<GRTM_CODE_CE_BIT)
212#define GRTM_CODE_NRZ           (1<<GRTM_CODE_NRZ_BIT)
213#define GRTM_CODE_PSR           (1<<GRTM_CODE_PSR_BIT)
214#define GRTM_CODE_RS8           (1<<GRTM_CODE_RS8_BIT)
215#define GRTM_CODE_RSDEP         (0x7<<GRTM_CODE_RSDEP_BIT)
216#define GRTM_CODE_RS            (1<<GRTM_CODE_RS_BIT)
217#define GRTM_CODE_AASM          (1<<GRTM_CODE_AASM_BIT)
218#define GRTM_CODE_CSEL          (0x3<<GRTM_CODE_CSEL_BIT)
219
220/* TM Attached Synchronization Marker Register (0x98) */
221#define GRTM_ASM_BIT            0
222
223#define GRTM_ASM                0xffffffff
224
225/* TM All Frames Generation Register (0xa0) */
226#define GRTM_ALL_LEN_BIT        0
227#define GRTM_ALL_VER_BIT        12
228#define GRTM_ALL_FHEC_BIT       14
229#define GRTM_ALL_FECF_BIT       15
230#define GRTM_ALL_IZ_BIT         16
231#define GRTM_ALL_IZLEN_BIT      17
232
233#define GRTM_ALL_LEN            (0x7ff<<GRTM_ALL_LEN_BIT)
234#define GRTM_ALL_VER            (0x3<<GRTM_ALL_VER_BIT)
235#define GRTM_ALL_FHEC           (1<<GRTM_ALL_FHEC_BIT)
236#define GRTM_ALL_FECF           (1<<GRTM_ALL_FECF_BIT)
237#define GRTM_ALL_IZ             (1<<GRTM_ALL_IZ_BIT)
238#define GRTM_ALL_IZLEN          (0x1f<<GRTM_ALL_IZLEN_BIT)
239
240/* TM Master Channel Frame Generation Register (0xa4) */
241#define GRTM_MST_OW_BIT         0
242#define GRTM_MST_OCF_BIT        1
243#define GRTM_MST_FSH_BIT        2
244#define GRTM_MST_MC_BIT         3
245#define GRTM_MST_MCCNTR_BIT     24
246
247#define GRTM_MST_OW             (1<<GRTM_MST_OW_BIT)
248#define GRTM_MST_OCF            (1<<GRTM_MST_OCF_BIT)
249#define GRTM_MST_FSH            (1<<GRTM_MST_FSH_BIT)
250#define GRTM_MST_MC             (0xff<<GRTM_MST_MC_BIT)
251
252/* TM Idle Frame Generation Register (0xa8) */
253#define GRTM_IDLE_SCID_BIT      0
254#define GRTM_IDLE_VCID_BIT      10
255#define GRTM_IDLE_MC_BIT        16
256#define GRTM_IDLE_VCC_BIT       17
257#define GRTM_IDLE_FSH_BIT       18
258#define GRTM_IDLE_EVC_BIT       19
259#define GRTM_IDLE_OCF_BIT       20
260#define GRTM_IDLE_IDLE_BIT      21
261#define GRTM_IDLE_MCCNTR_BIT    24
262
263#define GRTM_IDLE_SCID          (0x3ff<<GRTM_IDLE_SCID_BIT)
264#define GRTM_IDLE_VCID          (0x3f<<GRTM_IDLE_VCID_BIT)
265#define GRTM_IDLE_MC            (1<<GRTM_IDLE_MC_BIT)
266#define GRTM_IDLE_VCC           (1<<GRTM_IDLE_VCC_BIT)
267#define GRTM_IDLE_FSH           (1<<GRTM_IDLE_FSH_BIT)
268#define GRTM_IDLE_EVC           (1<<GRTM_IDLE_EVC_BIT)
269#define GRTM_IDLE_OCF           (1<<GRTM_IDLE_OCF_BIT)
270#define GRTM_IDLE_IDLE          (1<<GRTM_IDLE_IDLE_BIT)
271#define GRTM_IDLE_MCCNTR        (0xff<<GRTM_IDLE_MCCNTR_BIT)
272
273/* TM FSH/Insert Zone Registers (0xc0..0xcc) */
274#define GRTM_FSH_DATA_BIT       0
275
276#define GRTM_FSH_DATA           0xffffffff
277
278
279/* TM Operational Control Field Register (0xd0) */
280#define GRTM_OCF_CLCW_BIT       0
281
282#define GRTM_OCF_CLCW           0xffffffff
283
284
285/* GRTM Revision 0 */
286#define GRTM_REV0_DMA_CTRL_TXRDY_BIT    5
287#define GRTM_REV0_DMA_CTRL_TXRDY        (1<<GRTM_REV0_DMA_CTRL_TXRDY_BIT)
288
289/* GRTM Revision 1 */
290#define GRTM_REV1_DMA_STS_TXRDY_BIT     6
291#define GRTM_REV1_DMA_STS_TXSTAT_BIT    7
292#define GRTM_REV1_DMA_STS_TXRDY         (1<<GRTM_REV1_DMA_STS_TXRDY_BIT)
293#define GRTM_REV1_DMA_STS_TXSTAT        (1<<GRTM_REV1_DMA_STS_TXSTAT_BIT)
294
295#define GRTM_REV1_REV_SREV_BIT          0
296#define GRTM_REV1_REV_MREV_BIT          8
297#define GRTM_REV1_REV_TIRQ_BIT          16
298#define GRTM_REV1_REV_SREV              (0xff<<GRTM_REV1_REV_SREV_BIT)
299#define GRTM_REV1_REV_MREV              (0xff<<GRTM_REV1_REV_MREV_BIT)
300#define GRTM_REV1_REV_TIRQ              (1<<GRTM_REV1_REV_TIRQ_BIT)
301
302
303/* GRTM transmit descriptor (0x400 Alignment need) */
304struct grtm_bd {
305        volatile unsigned int   ctrl;
306        unsigned int            address;
307};
308
309#define GRTM_BD_EN_BIT          0
310#define GRTM_BD_WR_BIT          1
311#define GRTM_BD_IE_BIT          2
312#define GRTM_BD_FECFB_BIT       3
313#define GRTM_BD_IZB_BIT         4
314#define GRTM_BD_FHECB_BIT       5
315#define GRTM_BD_OCFB_BIT        6
316#define GRTM_BD_FSHB_BIT        7
317#define GRTM_BD_MCB_BIT         8
318#define GRTM_BD_VCE_BIT         9
319#define GRTM_BD_TS_BIT          14
320#define GRTM_BD_UE_BIT          15
321
322#define GRTM_BD_EN              (1<<GRTM_BD_EN_BIT)
323#define GRTM_BD_WR              (1<<GRTM_BD_WR_BIT)
324#define GRTM_BD_IE              (1<<GRTM_BD_IE_BIT)
325#define GRTM_BD_FECFB           (1<<GRTM_BD_FECFB_BIT)
326#define GRTM_BD_IZB             (1<<GRTM_BD_IZB_BIT)
327#define GRTM_BD_FHECB           (1<<GRTM_BD_FHECB_BIT)
328#define GRTM_BD_OCFB            (1<<GRTM_BD_OCFB_BIT)
329#define GRTM_BD_FSHB            (1<<GRTM_BD_FSHB_BIT)
330#define GRTM_BD_MCB             (1<<GRTM_BD_MCB_BIT)
331#define GRTM_BD_VCE             (1<<GRTM_BD_VCE_BIT)
332#define GRTM_BD_TS              (1<<GRTM_BD_TS_BIT)
333#define GRTM_BD_UE              (1<<GRTM_BD_UE_BIT)
334
335/* Load register */
336
337#define READ_REG(address)       (*(volatile unsigned int *)address)
338
339/* Driver functions */
340static rtems_device_driver grtm_initialize(rtems_device_major_number  major, rtems_device_minor_number  minor, void *arg);
341static rtems_device_driver grtm_open(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
342static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
343static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
344static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
345static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg);
346
347#define GRTM_DRIVER_TABLE_ENTRY { grtm_initialize, grtm_open, grtm_close, grtm_read, grtm_write, grtm_ioctl }
348
349static rtems_driver_address_table grtm_driver = GRTM_DRIVER_TABLE_ENTRY;
350
351/* Structure that connects BD with SoftWare Frame */
352struct grtm_ring {
353        struct grtm_ring        *next;
354        struct grtm_bd          *bd;
355        struct grtm_frame       *frm;
356};
357
358struct grtm_priv {
359        struct drvmgr_dev       *dev;           /* Driver manager device */
360        char                    devName[32];    /* Device Name */
361        struct grtm_regs        *regs;
362        int                     irq;
363        int                     minor;
364        int                     subrev;         /* GRTM Revision */
365        SPIN_DECLARE(devlock);                  /* spin-lock ISR protection */
366
367        int                     open;
368        int                     running;
369
370        struct grtm_bd          *bds;
371        void                    *_bds;
372
373        /* Interrupt generation */
374        int                     enable_cnt_curr;/* Down counter, when 0 the interrupt bit is set for next descriptor */
375        volatile int            handling_transmission;  /* Tells ISR if user are active changing descriptors/queues */
376
377        struct grtm_ring        *_ring;         /* Root of ring */
378        struct grtm_ring        *ring;          /* Next ring to use for new frames to be transmitted */
379        struct grtm_ring        *ring_end;      /* Oldest activated ring used */
380
381        /* Collections of frames Ready to sent/ Scheduled for transmission/Sent
382         * frames waiting for the user to reclaim
383         */
384        struct grtm_list        ready;          /* Frames Waiting for free BDs */
385        struct grtm_list        scheduled;      /* Frames in BDs beeing transmitted */
386        struct grtm_list        sent;           /* Sent Frames waiting for user to reclaim and reuse */
387
388        /* Number of frames in the lists */
389        int                     ready_cnt;      /* Number of ready frames */
390        int                     scheduled_cnt;  /* Number of scheduled frames */
391        int                     sent_cnt;       /* Number of sent frames */
392
393        struct grtm_ioc_hw      hw_avail;       /* Hardware support available */
394        struct grtm_ioc_config  config;
395        struct grtm_ioc_stats   stats;
396
397        rtems_id                sem_tx;
398};
399
400/* Prototypes */
401static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf);
402static void grtm_hw_reset(struct grtm_priv *pDev);
403static void grtm_interrupt(void *arg);
404
405/* Common Global Variables */
406static rtems_id grtm_dev_sem;
407static int grtm_driver_io_registered = 0;
408static rtems_device_major_number grtm_driver_io_major = 0;
409
410/******************* Driver manager interface ***********************/
411
412/* Driver prototypes */
413static int grtm_register_io(rtems_device_major_number *m);
414static int grtm_device_init(struct grtm_priv *pDev);
415
416static int grtm_init2(struct drvmgr_dev *dev);
417static int grtm_init3(struct drvmgr_dev *dev);
418
419static struct drvmgr_drv_ops grtm_ops =
420{
421        {NULL, grtm_init2, grtm_init3, NULL},
422        NULL,
423        NULL
424};
425
426static struct amba_dev_id grtm_ids[] =
427{
428        {VENDOR_GAISLER, GAISLER_GRTM},
429        {0, 0}          /* Mark end of table */
430};
431
432static struct amba_drv_info grtm_drv_info =
433{
434        {
435                DRVMGR_OBJ_DRV,                 /* Driver */
436                NULL,                           /* Next driver */
437                NULL,                           /* Device list */
438                DRIVER_AMBAPP_GAISLER_GRTM_ID,  /* Driver ID */
439                "GRTM_DRV",                     /* Driver Name */
440                DRVMGR_BUS_TYPE_AMBAPP,         /* Bus Type */
441                &grtm_ops,
442                NULL,                           /* Funcs */
443                0,                              /* No devices yet */
444                0,
445        },
446        &grtm_ids[0]
447};
448
449void grtm_register_drv (void)
450{
451        DBG("Registering GRTM driver\n");
452        drvmgr_drv_register(&grtm_drv_info.general);
453}
454
455static int grtm_init2(struct drvmgr_dev *dev)
456{
457        struct grtm_priv *priv;
458
459        DBG("GRTM[%d] on bus %s\n", dev->minor_drv, dev->parent->dev->name);
460        priv = dev->priv = malloc(sizeof(struct grtm_priv));
461        if ( !priv )
462                return DRVMGR_NOMEM;
463        memset(priv, 0, sizeof(*priv));
464        priv->dev = dev;
465
466        /* This core will not find other cores, so we wait for init2() */
467
468        return DRVMGR_OK;
469}
470
471static int grtm_init3(struct drvmgr_dev *dev)
472{
473        struct grtm_priv *priv;
474        char prefix[32];
475        rtems_status_code status;
476
477        priv = dev->priv;
478
479        /* Do initialization */
480
481        if ( grtm_driver_io_registered == 0) {
482                /* Register the I/O driver only once for all cores */
483                if ( grtm_register_io(&grtm_driver_io_major) ) {
484                        /* Failed to register I/O driver */
485                        dev->priv = NULL;
486                        return DRVMGR_FAIL;
487                }
488
489                grtm_driver_io_registered = 1;
490        }
491
492        /* I/O system registered and initialized
493         * Now we take care of device initialization.
494         */
495        if ( grtm_device_init(priv) ) {
496                return DRVMGR_FAIL;
497        }
498
499        /* Get Filesystem name prefix */
500        prefix[0] = '\0';
501        if ( drvmgr_get_dev_prefix(dev, prefix) ) {
502                /* Failed to get prefix, make sure of a unique FS name
503                 * by using the driver minor.
504                 */
505                sprintf(priv->devName, "/dev/grtm%d", dev->minor_drv);
506        } else {
507                /* Got special prefix, this means we have a bus prefix
508                 * And we should use our "bus minor"
509                 */
510                sprintf(priv->devName, "/dev/%sgrtm%d", prefix, dev->minor_bus);
511        }
512
513        SPIN_INIT(&priv->devlock, priv->devName);
514
515        /* Register Device */
516        status = rtems_io_register_name(priv->devName, grtm_driver_io_major, dev->minor_drv);
517        if (status != RTEMS_SUCCESSFUL) {
518                return DRVMGR_FAIL;
519        }
520
521        return DRVMGR_OK;
522}
523
524/******************* Driver Implementation ***********************/
525
526static int grtm_register_io(rtems_device_major_number *m)
527{
528        rtems_status_code r;
529
530        if ((r = rtems_io_register_driver(0, &grtm_driver, m)) == RTEMS_SUCCESSFUL) {
531                DBG("GRTM driver successfully registered, major: %d\n", *m);
532        } else {
533                switch(r) {
534                case RTEMS_TOO_MANY:
535                        printk("GRTM rtems_io_register_driver failed: RTEMS_TOO_MANY\n");
536                        return -1;
537                case RTEMS_INVALID_NUMBER: 
538                        printk("GRTM rtems_io_register_driver failed: RTEMS_INVALID_NUMBER\n");
539                        return -1;
540                case RTEMS_RESOURCE_IN_USE:
541                        printk("GRTM rtems_io_register_driver failed: RTEMS_RESOURCE_IN_USE\n");
542                        return -1;
543                default:
544                        printk("GRTM rtems_io_register_driver failed\n");
545                        return -1;
546                }
547        }
548        return 0;
549}
550
551static int grtm_device_init(struct grtm_priv *pDev)
552{
553        struct amba_dev_info *ambadev;
554        struct ambapp_core *pnpinfo;
555        union drvmgr_key_value *value;
556
557        /* Get device information from AMBA PnP information */
558        ambadev = (struct amba_dev_info *)pDev->dev->businfo;
559        if ( ambadev == NULL ) {
560                return -1;
561        }
562        pnpinfo = &ambadev->info;
563        pDev->irq = pnpinfo->irq;
564        pDev->regs = (struct grtm_regs *)pnpinfo->apb_slv->start;
565        pDev->minor = pDev->dev->minor_drv;
566        pDev->open = 0;
567        pDev->running = 0;
568
569        /* Create Binary RX Semaphore with count = 0 */
570        if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'M', '0' + pDev->minor),
571                0,
572                RTEMS_FIFO|RTEMS_SIMPLE_BINARY_SEMAPHORE|RTEMS_NO_INHERIT_PRIORITY|\
573                RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
574                0,
575                &pDev->sem_tx) != RTEMS_SUCCESSFUL ) {
576                return -1;
577        }
578
579        /* Allocate Memory for Buffer Descriptor Table, or let user provide a custom
580         * address.
581         */
582        value = drvmgr_dev_key_get(pDev->dev, "bdTabAdr", DRVMGR_KT_POINTER);
583        if ( value ) {
584                pDev->bds = (struct grtm_bd *)value->ptr;
585                pDev->_bds = (void *)value->ptr;       
586        } else {
587                pDev->bds = (struct grtm_bd *)grtm_memalign(0x400, 0x400, &pDev->_bds);
588        }
589        if ( !pDev->bds ) {
590                DBG("GRTM: Failed to allocate descriptor table\n");
591                return -1;
592        }
593        memset(pDev->bds, 0, 0x400);
594
595        pDev->_ring = malloc(sizeof(struct grtm_ring) * 128);
596        if ( !pDev->_ring ) {
597                return -1;
598        }
599
600        /* Reset Hardware before attaching IRQ handler */
601        grtm_hw_reset(pDev);
602
603        /* Read SUB revision number, ignore  */
604        pDev->subrev = (READ_REG(&pDev->regs->revision) & GRTM_REV1_REV_SREV)
605                        >> GRTM_REV1_REV_SREV_BIT;
606
607        return 0;
608}
609
610
611static inline void grtm_list_clr(struct grtm_list *list)
612{
613        list->head = NULL;
614        list->tail = NULL;
615}
616
617static void grtm_hw_reset(struct grtm_priv *pDev)
618{
619        /* Reset Core */
620        pDev->regs->dma_ctrl = GRTM_DMA_CTRL_RST;
621}
622
623static void grtm_hw_get_implementation(struct grtm_priv *pDev, struct grtm_ioc_hw *hwcfg)
624{
625        unsigned int cfg = READ_REG(&pDev->regs->cfg);
626
627        hwcfg->cs       = (cfg & GRTM_CFG_SC)   ? 1:0;
628        hwcfg->sp       = (cfg & GRTM_CFG_SP)   ? 1:0;
629        hwcfg->ce       = (cfg & GRTM_CFG_CE)   ? 1:0;
630        hwcfg->nrz      = (cfg & GRTM_CFG_NRZ)  ? 1:0;
631        hwcfg->psr      = (cfg & GRTM_CFG_PSR)  ? 1:0;
632        hwcfg->te       = (cfg & GRTM_CFG_TE)   ? 1:0;
633        hwcfg->rsdep    = (cfg & GRTM_CFG_RSDEP)>>GRTM_CFG_RSDEP_BIT;
634        hwcfg->rs       = (cfg & GRTM_CFG_RS)>>GRTM_CFG_RS_BIT;
635        hwcfg->aasm     = (cfg & GRTM_CFG_AASM) ? 1:0;
636        hwcfg->fecf     = (cfg & GRTM_CFG_FECF) ? 1:0;
637        hwcfg->ocf      = (cfg & GRTM_CFG_OCF)  ? 1:0;
638        hwcfg->evc      = (cfg & GRTM_CFG_EVC)  ? 1:0;
639        hwcfg->idle     = (cfg & GRTM_CFG_IDLE) ? 1:0;
640        hwcfg->fsh      = (cfg & GRTM_CFG_FSH)  ? 1:0;
641        hwcfg->mcg      = (cfg & GRTM_CFG_MCG)  ? 1:0;
642        hwcfg->iz       = (cfg & GRTM_CFG_IZ)   ? 1:0;
643        hwcfg->fhec     = (cfg & GRTM_CFG_FHEC) ? 1:0;
644        hwcfg->aos      = (cfg & GRTM_CFG_AOS)  ? 1:0;
645        hwcfg->cif      = (cfg & GRTM_CFG_CIF)  ? 1:0;
646        hwcfg->ocfb     = (cfg & GRTM_CFG_OCFB) ? 1:0;
647
648        cfg = READ_REG(&pDev->regs->dma_cfg);
649        hwcfg->blk_size = (cfg & GRTM_DMA_CFG_BLKSZ) >> GRTM_DMA_CFG_BLKSZ_BIT;
650        hwcfg->fifo_size= (cfg & GRTM_DMA_CFG_FIFOSZ) >> GRTM_DMA_CFG_FIFOSZ_BIT;
651}
652
653
654/* TODO: Implement proper default calculation from hardware configuration */
655static void grtm_hw_get_default_modes(struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
656{
657        cfg->mode = GRTM_MODE_TM;
658        cfg->frame_length = 223;
659        cfg->limit = 0; /* Make driver auto configure it on START, user may override with non-zero value */
660        cfg->as_marker = 0x1ACFFC1D;
661
662        /* Physical */
663        cfg->phy_subrate = 1;
664        cfg->phy_symbolrate = 1;
665        cfg->phy_opts = 0;
666
667        /* Coding Layer */
668        cfg->code_rsdep = 1;
669        cfg->code_ce_rate = 0;
670        cfg->code_csel = 0;
671        cfg->code_opts = 0;
672
673        /* All Frame Generation */
674        cfg->all_izlen = 0;
675        cfg->all_opts = GRTM_IOC_ALL_FECF;
676
677        /* Master Channel Frame Generation */
678        if ( hwcfg->mcg ) {
679                cfg->mf_opts = GRTM_IOC_MF_MC;
680        } else {
681                cfg->mf_opts = 0;
682        }
683
684        /* Idle Frame Generation */
685        cfg->idle_scid = 0;
686        cfg->idle_vcid = 0;
687        if ( hwcfg->idle ) {
688                cfg->idle_opts = GRTM_IOC_IDLE_EN;
689        } else {
690                cfg->idle_opts = 0;
691        }
692
693        /* Interrupt options */
694        cfg->blocking = 0;      /* non-blocking mode is default */
695        cfg->enable_cnt = 16;   /* generate interrupt every 16 descriptor */
696        cfg->isr_desc_proc = 1; /* Let interrupt handler do descriptor processing */
697        cfg->timeout = RTEMS_NO_TIMEOUT;
698       
699}
700
701static void *grtm_memalign(unsigned int boundary, unsigned int length, void *realbuf)
702{
703        *(int *)realbuf = (int)malloc(length+boundary);
704        DBG("GRTM: Alloced %d (0x%x) bytes, requested: %d\n",length+boundary,length+boundary,length);
705        return (void *)(((*(unsigned int *)realbuf)+boundary) & ~(boundary-1));
706}
707
708static int grtm_hw_set_config(struct grtm_priv *pDev, struct grtm_ioc_config *cfg, struct grtm_ioc_hw *hwcfg)
709{
710        struct grtm_regs *regs = pDev->regs;
711        unsigned int tmp;
712        unsigned int limit;
713
714        if ( cfg->limit == 0 ) {
715                /* Calculate Limit */
716                if ( cfg->frame_length > hwcfg->blk_size ) {
717                        limit = hwcfg->blk_size*2;
718                } else {
719                        limit = cfg->frame_length;
720                }
721        } else {
722                /* Use user configured limit */
723                limit = cfg->limit;
724        }
725
726        /* Frame Length and Limit */
727        regs->dma_len = (((limit-1) << GRTM_DMA_LEN_LIM_BIT) & GRTM_DMA_LEN_LIM)|
728                        (((cfg->frame_length-1) << GRTM_DMA_LEN_LEN_BIT) & GRTM_DMA_LEN_LEN);
729
730        /* Physical layer options */
731        tmp =   (cfg->phy_opts & (GRTM_IOC_PHY_SCF|GRTM_IOC_PHY_SF)) |
732                (((cfg->phy_symbolrate-1)<<GRTM_PHY_SYM_BIT) & GRTM_PHY_SYM) | (((cfg->phy_subrate-1)<<GRTM_PHY_SUB_BIT) & GRTM_PHY_SUB);
733        regs->phy = tmp;
734
735        /* Coding Sub-layer Options */
736        tmp =   (cfg->code_opts & GRTM_IOC_CODE_ALL) | ((cfg->code_csel<<GRTM_CODE_CSEL_BIT) & GRTM_CODE_CSEL) |
737                (((cfg->code_rsdep-1)<<GRTM_CODE_RSDEP_BIT) & GRTM_CODE_RSDEP) | ((cfg->code_ce_rate<<GRTM_CODE_CERATE_BIT) & GRTM_CODE_CERATE);
738        regs->code = tmp;
739
740        /* Attached synchronization marker register */
741        regs->asmr = cfg->as_marker;
742
743        /* All Frames Generation */
744        tmp =   ((cfg->all_opts & GRTM_IOC_ALL_ALL)<<14) |
745                ((cfg->all_izlen<<GRTM_ALL_IZLEN_BIT) & GRTM_ALL_IZLEN) |
746                ((cfg->mode<<GRTM_ALL_VER_BIT) & GRTM_ALL_VER);
747        regs->all_frm = tmp;
748
749        /* Master Frame Generation */
750        regs->mst_frm = cfg->mf_opts & GRTM_IOC_MF_ALL;
751
752        /* Idle frame Generation */
753        tmp =   ((cfg->idle_opts & GRTM_IOC_IDLE_ALL) << 16) |
754                ((cfg->idle_vcid << GRTM_IDLE_VCID_BIT) & GRTM_IDLE_VCID) |
755                ((cfg->idle_scid << GRTM_IDLE_SCID_BIT) & GRTM_IDLE_SCID);
756        regs->idle_frm = tmp;
757
758        return 0;
759}
760
761static int grtm_start(struct grtm_priv *pDev)
762{
763        struct grtm_regs *regs = pDev->regs;
764        int i;
765        struct grtm_ioc_config *cfg = &pDev->config;
766        unsigned int txrdy;
767
768        /* Clear Descriptors */
769        memset(pDev->bds,0,0x400);
770       
771        /* Clear stats */
772        memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
773       
774        /* Init Descriptor Ring */
775        memset(pDev->_ring,0,sizeof(struct grtm_ring)*128);
776        for(i=0;i<127;i++){
777                pDev->_ring[i].next = &pDev->_ring[i+1];
778                pDev->_ring[i].bd = &pDev->bds[i];
779                pDev->_ring[i].frm = NULL;
780        }
781        pDev->_ring[127].next = &pDev->_ring[0];
782        pDev->_ring[127].bd = &pDev->bds[127];
783        pDev->_ring[127].frm = NULL;
784
785        pDev->ring = &pDev->_ring[0];
786        pDev->ring_end = &pDev->_ring[0];
787
788        /* Clear Scheduled, Ready and Sent list */
789        grtm_list_clr(&pDev->ready);
790        grtm_list_clr(&pDev->scheduled);
791        grtm_list_clr(&pDev->sent);
792
793        /* Software init */
794        pDev->handling_transmission = 0;
795       
796        /* Reset the transmitter */
797        regs->dma_ctrl = GRTM_DMA_CTRL_TXRST;
798        regs->dma_ctrl = 0;     /* Leave Reset */
799
800        /* Clear old interrupts */
801        regs->dma_status = GRTM_DMA_STS_ALL;
802
803        /* Set Descriptor Pointer Base register to point to first descriptor */
804        drvmgr_translate_check(pDev->dev, CPUMEM_TO_DMA, (void *)pDev->bds,
805                                (void **)&regs->dma_bd, 0x400);
806
807        /* Set hardware options as defined by config */
808        if ( grtm_hw_set_config(pDev, cfg, &pDev->hw_avail) ) {
809                return RTEMS_IO_ERROR;
810        }
811
812        /* Enable TM Transmitter */
813        regs->ctrl = GRTM_CTRL_EN;
814
815        /* Wait for TXRDY to be cleared */
816        i=1000;
817        while( i > 0 ) {
818                asm volatile ("nop"::);
819                i--;
820        }
821
822        /* Check transmitter startup OK */
823        i = 1000000;
824        do {
825                /* Location of TXRDY Bit is different for different revisions */
826                if ( pDev->subrev == 0 ) {
827                        txrdy = READ_REG(&regs->dma_ctrl) &
828                                GRTM_REV0_DMA_CTRL_TXRDY;
829                } else {
830                        txrdy = READ_REG(&regs->dma_status) &
831                                GRTM_REV1_DMA_STS_TXRDY;
832                }
833                if (txrdy != 0)
834                        break;
835
836                asm volatile ("nop"::);
837        } while ( --i > 0 );
838        if ( i == 0 ) {
839                /* Reset Failed */
840                DBG("GRTM: start: Reseting transmitter failed (%d)\n",i);
841                return RTEMS_IO_ERROR;
842        }
843        DBG("GRTM: reset time %d\n",i);
844
845        /* Everything is configured, the TM transmitter is started
846         * and idle frames has been sent.
847         */
848
849        /* Mark running before enabling the DMA transmitter */
850        pDev->running = 1;
851
852        /* Enable interrupts (Error and DMA TX) */
853        regs->dma_ctrl = GRTM_DMA_CTRL_IE;
854
855        DBG("GRTM: STARTED\n");
856
857        return RTEMS_SUCCESSFUL;
858}
859
860static void grtm_stop(struct grtm_priv *pDev)
861{
862        struct grtm_regs *regs = pDev->regs;
863
864        /* Disable the transmitter & Interrupts */
865        regs->dma_ctrl = 0;
866       
867        /* Clear any pending interrupt  */
868        regs->dma_status = GRTM_DMA_STS_ALL;
869
870        DBG("GRTM: STOPPED\n");
871
872        /* Flush semaphore in case a thread is stuck waiting for TX Interrupts */
873        rtems_semaphore_flush(pDev->sem_tx);
874}
875
876static rtems_device_driver grtm_open(
877        rtems_device_major_number major,
878        rtems_device_minor_number minor,
879        void *arg)
880{
881        struct grtm_priv *pDev;
882        struct drvmgr_dev *dev;
883
884        FUNCDBG();
885
886        if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
887                DBG("Wrong minor %d\n", minor);
888                return RTEMS_INVALID_NUMBER;
889        }
890        pDev = (struct grtm_priv *)dev->priv;
891       
892        /* Wait until we get semaphore */
893        if ( rtems_semaphore_obtain(grtm_dev_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT) != RTEMS_SUCCESSFUL ){
894                return RTEMS_INTERNAL_ERROR;
895        }
896
897        /* Is device in use? */
898        if ( pDev->open ){
899                rtems_semaphore_release(grtm_dev_sem);
900                return RTEMS_RESOURCE_IN_USE;
901        }
902       
903        /* Mark device taken */
904        pDev->open = 1;
905       
906        rtems_semaphore_release(grtm_dev_sem);
907       
908        DBG("grtm_open: OPENED minor %d (pDev: 0x%x)\n",pDev->minor,(unsigned int)pDev);
909       
910        /* Set defaults */
911        pDev->config.timeout = RTEMS_NO_TIMEOUT;        /* no timeout (wait forever) */
912        pDev->config.blocking = 0;                      /* polling mode */
913       
914        pDev->running = 0;                              /* not in running mode yet */
915
916        memset(&pDev->config,0,sizeof(pDev->config));
917       
918        /* The core has been reset when we execute here, so it is possible
919         * to read out what HW is implemented from core.
920         */
921        grtm_hw_get_implementation(pDev, &pDev->hw_avail);
922
923        /* Get default modes */
924        grtm_hw_get_default_modes(&pDev->config,&pDev->hw_avail);
925       
926        return RTEMS_SUCCESSFUL;
927}
928
929static rtems_device_driver grtm_close(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
930{
931        struct grtm_priv *pDev;
932        struct drvmgr_dev *dev;
933
934        FUNCDBG();
935
936        if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
937                return RTEMS_INVALID_NUMBER;
938        }
939        pDev = (struct grtm_priv *)dev->priv;
940
941        if ( pDev->running ){
942                drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev);
943                grtm_stop(pDev);
944                pDev->running = 0;
945        }
946
947        /* Reset core */
948        grtm_hw_reset(pDev);
949
950        /* Clear descriptor area just for sure */
951        memset(pDev->bds, 0, 0x400);
952       
953        /* Mark not open */
954        pDev->open = 0;
955
956        return RTEMS_SUCCESSFUL;
957}
958
959static rtems_device_driver grtm_read(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
960{
961        FUNCDBG();
962        return RTEMS_NOT_IMPLEMENTED;
963}
964
965static rtems_device_driver grtm_write(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
966{
967        FUNCDBG();
968        return RTEMS_NOT_IMPLEMENTED;
969}
970
971/* Scans the desciptor table for scheduled frames that has been sent,
972 * and moves these frames from the head of the scheduled queue to the
973 * tail of the sent queue.
974 *
975 * Also, for all frames the status is updated.
976 *
977 * Return Value
978 * Number of frames freed.
979 */
980static int grtm_free_sent(struct grtm_priv *pDev)
981{
982        struct grtm_ring *curr;
983        struct grtm_frame *last_frm, *first_frm;
984        int freed_frame_cnt=0;
985        unsigned int ctrl;
986
987        curr = pDev->ring_end;
988
989        /* Step into TX ring to find sent frames */
990        if ( !curr->frm ){
991                /* No scheduled frames, abort */
992                return 0;
993        }
994
995        /* There has been messages scheduled ==> scheduled messages may have been
996         * transmitted and needs to be collected.
997         */
998
999        first_frm = curr->frm;
1000
1001        /* Loop until first enabled unsent frame is found.
1002         * A unused descriptor is indicated by an unassigned frm field
1003         */
1004        while ( curr->frm && !((ctrl=READ_REG(&curr->bd->ctrl)) & GRTM_BD_EN) ){
1005                /* Handle one sent Frame */
1006               
1007                /* Remember last handled frame so that insertion/removal from
1008                 * frames lists go fast.
1009                 */
1010                last_frm = curr->frm;
1011               
1012                /* 1. Set flags to indicate error(s) and other information */
1013                last_frm->flags |= GRTM_FLAGS_SENT; /* Mark sent */
1014               
1015                /* Update Stats */
1016                pDev->stats.frames_sent++;
1017   
1018                /* Did packet encounter link error? */
1019                if ( ctrl & GRTM_BD_UE ) {
1020                        pDev->stats.err_underrun++;
1021                        last_frm->flags |= GRRM_FLAGS_ERR;
1022                }
1023
1024                curr->frm = NULL; /* Mark unused */
1025
1026                /* Increment */
1027                curr = curr->next;
1028                freed_frame_cnt++;
1029        }
1030
1031        /* 1. Remove all handled frames from scheduled queue
1032         * 2. Put all handled frames into sent queue
1033         */
1034        if ( freed_frame_cnt > 0 ){
1035
1036                /* Save TX ring posistion */
1037                pDev->ring_end = curr;
1038
1039                /* Remove all sent frames from scheduled list */
1040                if ( pDev->scheduled.tail == last_frm ){
1041                        /* All scheduled frames sent... */
1042                        pDev->scheduled.head = NULL;
1043                        pDev->scheduled.tail = NULL;
1044                }else{
1045                        pDev->scheduled.head = last_frm->next;
1046                }
1047                last_frm->next = NULL;
1048
1049                /* Put all sent frames into "Sent queue" for user to
1050                 * collect, later on.
1051                 */
1052                if ( !pDev->sent.head ){
1053                        /* Sent queue empty */
1054                        pDev->sent.head = first_frm;
1055                        pDev->sent.tail = last_frm;
1056                }else{
1057                        pDev->sent.tail->next = first_frm;
1058                        pDev->sent.tail = last_frm;
1059                }
1060        }
1061        return freed_frame_cnt;
1062}
1063
1064
1065/* Moves as many frames in the ready queue (as there are free descriptors for)
1066 * to the scheduled queue. The free descriptors are then assigned one frame
1067 * each and enabled for transmission.
1068 *
1069 * Return Value
1070 * Returns number of frames moved from ready to scheduled queue
1071 */
1072static int grtm_schedule_ready(struct grtm_priv *pDev)
1073{
1074        int cnt;
1075        unsigned int ctrl, dmactrl;
1076        struct grtm_ring *curr_bd;
1077        struct grtm_frame *curr_frm, *last_frm;
1078
1079        if ( !pDev->ready.head ){
1080                return 0;
1081        }
1082
1083        cnt=0;
1084        curr_frm = pDev->ready.head;
1085        curr_bd = pDev->ring;
1086        while( !curr_bd->frm ){
1087                /* Assign frame to descriptor */
1088                curr_bd->frm = curr_frm;
1089
1090                /* Prepare descriptor address. Three cases:
1091                 *  - GRTM core on same bus as CPU ==> no translation (Address used by CPU = address used by GRTM)
1092                 *  - GRTM core on remote bus, and payload address given as used by CPU ==> Translation needed
1093                 *  - GRTM core on remote bus, and payload address given as used by GRTM ==> no translation  [ USER does custom translation]
1094                 */
1095                if ( curr_frm->flags & (GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER) ) {
1096                        /* Do translation */
1097                        drvmgr_translate(pDev->dev, CPUMEM_TO_DMA, (void *)curr_frm->payload, (void **)&curr_bd->bd->address);
1098                        if ( curr_frm->flags & GRTM_FLAGS_TRANSLATE_AND_REMEMBER ) {
1099                                if ( curr_frm->payload != (unsigned int *)curr_bd->bd->address ) {
1100                                        /* Translation needed */
1101                                        curr_frm->flags &= ~GRTM_FLAGS_TRANSLATE_AND_REMEMBER;
1102                                        curr_frm->flags |= GRTM_FLAGS_TRANSLATE;
1103                                } else {
1104                                        /* No Trnaslation needed */
1105                                        curr_frm->flags &= ~(GRTM_FLAGS_TRANSLATE|GRTM_FLAGS_TRANSLATE_AND_REMEMBER);
1106                                }
1107                        }
1108                } else {
1109                        /* Custom translation or no translation needed */
1110                        curr_bd->bd->address = (unsigned int)curr_frm->payload;
1111                }
1112
1113                ctrl = GRTM_BD_EN;
1114                if ( curr_bd->next == pDev->_ring ){
1115                        ctrl |= GRTM_BD_WR; /* Wrap around */
1116                }
1117                /* Apply user options/flags */
1118                ctrl |= (curr_frm->flags & GRTM_FLAGS_MASK);
1119
1120                /* Is this Frame going to be an interrupt Frame? */
1121                if ( (--pDev->enable_cnt_curr) <= 0 ){
1122                        if ( pDev->config.enable_cnt == 0 ){
1123                                pDev->enable_cnt_curr = 0x3fffffff;
1124                        }else{
1125                                pDev->enable_cnt_curr = pDev->config.enable_cnt;
1126                                ctrl |= GRTM_BD_IE;
1127                        }
1128                }
1129
1130                /* Enable descriptor */
1131                curr_bd->bd->ctrl = ctrl;
1132
1133                last_frm = curr_frm;
1134                curr_bd = curr_bd->next;
1135                cnt++;
1136               
1137                /* Get Next Frame from Ready Queue */
1138                if ( curr_frm == pDev->ready.tail ){
1139                        /* Handled all in ready queue. */
1140                        curr_frm = NULL;
1141                        break;
1142                }
1143                curr_frm = curr_frm->next;
1144        }
1145       
1146        /* Has frames have been scheduled? */
1147        if ( cnt > 0 ){
1148                /* Make last frame mark end of chain, probably pointless... */
1149                last_frm->next = NULL;
1150
1151                /* Insert scheduled packets into scheduled queue */
1152                if ( !pDev->scheduled.head ){
1153                        /* empty scheduled queue */
1154                        pDev->scheduled.head = pDev->ready.head;
1155                        pDev->scheduled.tail = last_frm;
1156                }else{
1157                        pDev->scheduled.tail->next = pDev->ready.head;
1158                        pDev->scheduled.tail = last_frm;
1159                }
1160
1161                /* Remove scheduled packets from ready queue */
1162                pDev->ready.head = curr_frm;
1163                if ( !curr_frm ){
1164                        pDev->ready.tail = NULL;
1165                }
1166
1167                /* Update TX ring posistion */
1168                pDev->ring = curr_bd;
1169
1170                /* Make hardware aware of the newly enabled descriptors */
1171                dmactrl = READ_REG(&pDev->regs->dma_ctrl);
1172                dmactrl &= ~(GRTM_DMA_CTRL_TXRST | GRTM_DMA_CTRL_RST);
1173                dmactrl |= GRTM_DMA_CTRL_EN;
1174                pDev->regs->dma_ctrl = dmactrl;
1175        }
1176
1177        return cnt;
1178}
1179
1180static void grtm_tx_process(struct grtm_priv *pDev)
1181{
1182        int num;
1183
1184        /* Free used descriptors and put the sent frame into the "Sent queue" 
1185         *   (SCHEDULED->SENT)
1186         */
1187        num = grtm_free_sent(pDev);
1188        pDev->scheduled_cnt -= num;
1189        pDev->sent_cnt += num;
1190
1191        /* Use all available free descriptors there are frames for
1192         * in the ready queue.
1193         *   (READY->SCHEDULED)
1194         */
1195        if (pDev->running) {
1196                num = grtm_schedule_ready(pDev);
1197                pDev->ready_cnt -= num;
1198                pDev->scheduled_cnt += num;
1199        }
1200}
1201
1202/*
1203 * The TX lock protects user tasks from the ISR. If TX DMA interrupt occurs
1204 * while the user task is processing the TX DMA descriptors the ISR will
1205 * ignore interrupt the request by not processing the DMA table since that
1206 * is done by the user task anyway. In SMP, when a user task enters the TX DMA
1207 * processing while the ISR (on another CPU) is also processing the user task
1208 * will loop waiting for the ISR to complete.
1209 */
1210static int grtm_request_txlock(struct grtm_priv *pDev, int block)
1211{
1212        SPIN_IRQFLAGS(irqflags);
1213        int got_lock = 0;
1214
1215        do {
1216                SPIN_LOCK_IRQ(&pDev->devlock, irqflags);
1217                if (pDev->handling_transmission == 0) {
1218                        pDev->handling_transmission = 1;
1219                        got_lock = 1;
1220                }
1221                SPIN_UNLOCK_IRQ(&pDev->devlock, irqflags);
1222        } while (!got_lock && block);
1223
1224        return got_lock;
1225}
1226
1227static inline int grtm_request_txlock_isr(struct grtm_priv *pDev)
1228{
1229        SPIN_ISR_IRQFLAGS(irqflags);
1230        int got_lock = 0;
1231
1232        SPIN_LOCK(&pDev->devlock, irqflags);
1233        if (pDev->handling_transmission == 0) {
1234                pDev->handling_transmission = 1;
1235                got_lock = 1;
1236        }
1237        SPIN_UNLOCK(&pDev->devlock, irqflags);
1238
1239        return got_lock;
1240}
1241
1242static inline void grtm_release_txlock(struct grtm_priv *pDev)
1243{
1244        pDev->handling_transmission = 0;
1245}
1246
1247static rtems_device_driver grtm_ioctl(rtems_device_major_number major, rtems_device_minor_number minor, void *arg)
1248{
1249        struct grtm_priv *pDev;
1250        struct drvmgr_dev *dev;
1251        rtems_libio_ioctl_args_t *ioarg = (rtems_libio_ioctl_args_t *)arg;
1252        unsigned int *data;
1253        int status;
1254        struct grtm_ioc_config *cfg;
1255        struct grtm_ioc_hw_status *hwregs;
1256        struct grtm_list *chain;
1257        struct grtm_frame *curr;
1258        struct grtm_ioc_hw *hwimpl;
1259        struct grtm_ioc_stats *stats;
1260        int num,ret;
1261
1262        FUNCDBG();
1263
1264        if ( drvmgr_get_dev(&grtm_drv_info.general, minor, &dev) ) {
1265                return RTEMS_INVALID_NUMBER;
1266        }
1267        pDev = (struct grtm_priv *)dev->priv;
1268
1269        if (!ioarg)
1270                return RTEMS_INVALID_NAME;
1271
1272        data = ioarg->buffer;
1273        ioarg->ioctl_return = 0;
1274        switch(ioarg->command) {
1275                case GRTM_IOC_START:
1276                if ( pDev->running ) {
1277                        return RTEMS_RESOURCE_IN_USE; /* EBUSY */
1278                }
1279                if ( (status=grtm_start(pDev)) != RTEMS_SUCCESSFUL ){
1280                        return status;
1281                }
1282                /* Register ISR & Enable interrupt */
1283                drvmgr_interrupt_register(dev, 0, "grtm", grtm_interrupt, pDev);
1284
1285                /* Read and write are now open... */
1286                break;
1287
1288                case GRTM_IOC_STOP:
1289                if ( !pDev->running ) {
1290                        return RTEMS_RESOURCE_IN_USE;
1291                }
1292
1293                /* Disable interrupts */
1294                drvmgr_interrupt_unregister(dev, 0, grtm_interrupt, pDev);
1295                grtm_stop(pDev);
1296                pDev->running = 0;
1297                break;
1298
1299                case GRTM_IOC_ISSTARTED:
1300                if ( !pDev->running ) {
1301                        return RTEMS_RESOURCE_IN_USE;
1302                }
1303                break;
1304
1305                case GRTM_IOC_SET_BLOCKING_MODE:
1306                if ( (unsigned int)data > GRTM_BLKMODE_BLK ) {
1307                        return RTEMS_INVALID_NAME;
1308                }
1309                DBG("GRTM: Set blocking mode: %d\n",(unsigned int)data);
1310                pDev->config.blocking = (unsigned int)data;
1311                break;
1312
1313                case GRTM_IOC_SET_TIMEOUT:
1314                DBG("GRTM: Timeout: %d\n",(unsigned int)data);
1315                pDev->config.timeout = (rtems_interval)data;
1316                break;
1317
1318                case GRTM_IOC_SET_CONFIG:
1319                cfg = (struct grtm_ioc_config *)data;
1320                if ( !cfg ) {
1321                        return RTEMS_INVALID_NAME;
1322                }
1323               
1324                if ( pDev->running ) {
1325                        return RTEMS_RESOURCE_IN_USE;
1326                }
1327
1328                pDev->config = *cfg;
1329                break;
1330
1331                case GRTM_IOC_GET_STATS:
1332                stats = (struct grtm_ioc_stats *)data;
1333                if ( !stats ) {
1334                        return RTEMS_INVALID_NAME;
1335                }
1336                memcpy(stats,&pDev->stats,sizeof(struct grtm_ioc_stats));
1337                break;
1338
1339                case GRTM_IOC_CLR_STATS:
1340                memset(&pDev->stats,0,sizeof(struct grtm_ioc_stats));
1341                break;
1342
1343                case GRTM_IOC_GET_CONFIG:
1344                cfg = (struct grtm_ioc_config *)data;
1345                if ( !cfg ) {
1346                        return RTEMS_INVALID_NAME;
1347                }
1348
1349                *cfg = pDev->config;
1350                break;
1351
1352                case GRTM_IOC_GET_OCFREG:
1353                if ( !pDev->hw_avail.ocf ) {
1354                        /* Hardware does not implement the OCF register */
1355                        return RTEMS_NOT_DEFINED;
1356                }
1357                if ( !data ) {
1358                        return RTEMS_INVALID_NAME;
1359                }
1360                *(unsigned int **)data = (unsigned int *)&pDev->regs->ocf;
1361                break;
1362
1363                case GRTM_IOC_GET_HW_IMPL:
1364                hwimpl = (struct grtm_ioc_hw *)data;
1365                if ( !hwimpl ) {
1366                        return RTEMS_INVALID_NAME;
1367                }
1368                *hwimpl = pDev->hw_avail;
1369                break;
1370
1371                case GRTM_IOC_GET_HW_STATUS:
1372                hwregs = (struct grtm_ioc_hw_status *)data;
1373                if ( !hwregs ) {
1374                        return RTEMS_INVALID_NAME;
1375                }
1376                /* We disable interrupt in order to get a snapshot of the registers */
1377/* TODO: implement hwregs */
1378                break;
1379
1380                /* Put a chain of frames at the back of the "Ready frames" queue. This
1381                 * triggers the driver to put frames from the Ready queue into unused
1382                 * available descriptors. (Ready -> Scheduled)
1383                 */
1384
1385                case GRTM_IOC_SEND:
1386                if ( !pDev->running ){
1387                        return RTEMS_RESOURCE_IN_USE;
1388                }
1389
1390                /* Get pointer to frame chain wished be sent */
1391                chain = (struct grtm_list *)ioarg->buffer;
1392                if ( !chain ){
1393                        /* No new frames to send ==> just trigger hardware
1394                         * to send previously made ready frames to be sent.
1395                         * If someone else is processing the DMA we igore the
1396                         * request.
1397                         */
1398                        if (grtm_request_txlock(pDev, 0)) {
1399                                grtm_tx_process(pDev);
1400                                grtm_release_txlock(pDev);
1401                        }
1402                        break;
1403                }
1404                if ( !chain->tail || !chain->head ){
1405                        return RTEMS_INVALID_NAME;
1406                }
1407
1408                DBG("GRTM_SEND: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
1409
1410                /* Mark ready frames unsent by clearing GRTM_FLAGS_SENT of all frames */
1411
1412                num = 0;
1413                curr = chain->head;
1414                while(curr != chain->tail){
1415                        curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
1416                        curr = curr->next;
1417                        num++;
1418                }
1419                curr->flags = curr->flags & ~(GRTM_FLAGS_SENT|GRRM_FLAGS_ERR);
1420                num++;
1421
1422                /* wait until we get the device lock */
1423                grtm_request_txlock(pDev, 1);
1424
1425                /* 1. Put frames into ready queue
1426                 *    (New Frames->READY)
1427                 */
1428                if ( pDev->ready.head ){
1429                        /* Frames already on ready queue (no free descriptors previously) ==>
1430                         * Put frames at end of ready queue
1431                         */
1432                        pDev->ready.tail->next = chain->head;
1433                        pDev->ready.tail = chain->tail;
1434                        chain->tail->next = NULL;
1435                }else{
1436                        /* All frames is put into the ready queue for later processing */
1437                        pDev->ready.head = chain->head;
1438                        pDev->ready.tail = chain->tail;
1439                        chain->tail->next = NULL;
1440                }
1441                pDev->ready_cnt += num; /* Added 'num' frames to ready queue */
1442
1443                /* 2. SCHEDULED->SENT
1444                 * 3. READY->SCHEDULED
1445                 */
1446                grtm_tx_process(pDev);
1447                grtm_release_txlock(pDev);
1448                break;
1449
1450                /* Take all available sent frames from the "Sent frames" queue.
1451                 * If no frames has been sent, the thread may get blocked if in blocking
1452                 * mode. The blocking mode is not available if driver is not in running mode.
1453                 *
1454                 * Note this ioctl may return success even if the driver is not in STARTED mode.
1455                 * This is because in case of a error (link error of similar) and the driver switch
1456                 * from START to STOP mode we must still be able to get our frames back.
1457                 *
1458                 * Note in case the driver fails to send a frame for some reason (link error),
1459                 * the sent flag is set to 0 indicating a failure.
1460                 *
1461                 */
1462                case GRTM_IOC_RECLAIM:
1463                /* Get pointer to were to place reaped chain */
1464                chain = (struct grtm_list *)ioarg->buffer;
1465                if ( !chain ){
1466                        return RTEMS_INVALID_NAME;
1467                }
1468
1469                /* Lock out interrupt handler */
1470                grtm_request_txlock(pDev, 1);
1471
1472                do {
1473                        /* Process descriptor table and populate with new
1474                         * buffers:
1475                         *    * SCHEDULED->SENT
1476                         *    * READY->SCHEDULED
1477                         */
1478                        grtm_tx_process(pDev);
1479
1480                        /* Are there any frames on the sent queue waiting to be
1481                         * reclaimed?
1482                         */
1483
1484                        if ( !pDev->sent.head ){
1485                                /* No frames to reclaim - no frame in sent queue.
1486                                 * Instead we block thread until frames have been sent
1487                                 * if in blocking mode.
1488                                 */
1489                                if ( pDev->running && pDev->config.blocking ){
1490                                        ret = rtems_semaphore_obtain(pDev->sem_tx,RTEMS_WAIT,pDev->config.timeout);
1491                                        if ( ret == RTEMS_TIMEOUT ) {
1492                                                grtm_release_txlock(pDev);
1493                                                return RTEMS_TIMEOUT;
1494                                        } else if ( ret == RTEMS_SUCCESSFUL ) {
1495                                                /* There might be frames available, go check */
1496                                                continue;
1497                                        } else {
1498                                                /* any error (driver closed, internal error etc.) */
1499                                                grtm_release_txlock(pDev);
1500                                                return RTEMS_UNSATISFIED;
1501                                        }
1502
1503                                }else{
1504                                        /* non-blocking mode, we quit */
1505                                        chain->head = NULL;
1506                                        chain->tail = NULL;
1507                                        /* do not lock out interrupt handler any more */
1508                                        grtm_release_txlock(pDev);
1509                                        return RTEMS_TIMEOUT;
1510                                }
1511                        }else{
1512                                /* Take all sent framess from sent queue to userspace queue */
1513                                chain->head = pDev->sent.head;
1514                                chain->tail = pDev->sent.tail;
1515                                chain->tail->next = NULL; /* Just for sure */
1516
1517                                /* Mark no Sent */
1518                                grtm_list_clr(&pDev->sent);
1519                                pDev->sent_cnt = 0;
1520
1521                                DBG("TX_RECLAIM: head: 0x%x, tail: 0x%x\n",chain->head,chain->tail);
1522                                break;
1523                        }
1524
1525                }while(1);
1526               
1527                /* do not lock out interrupt handler any more */
1528                grtm_release_txlock(pDev);
1529                break;
1530
1531                default:
1532                return RTEMS_NOT_DEFINED;
1533        }
1534        return RTEMS_SUCCESSFUL;
1535}
1536
1537static void grtm_interrupt(void *arg)
1538{
1539        struct grtm_priv *pDev = arg;
1540        struct grtm_regs *regs = pDev->regs;
1541        unsigned int status;
1542
1543        /* Clear interrupt by reading it */
1544        status = READ_REG(&regs->dma_status);
1545
1546        /* Spurious Interrupt? */
1547        if ( !pDev->running || !status)
1548                return;
1549
1550        regs->dma_status = status;
1551
1552        if ( status & GRTM_DMA_STS_TFF ){
1553                pDev->stats.err_transfer_frame++;
1554        }
1555
1556        if ( status & GRTM_DMA_STS_TA ){
1557                pDev->stats.err_ahb++;
1558        }
1559
1560        if ( status & GRTM_DMA_STS_TE ){
1561                pDev->stats.err_tx++;
1562        }
1563
1564        if ( status & GRTM_DMA_STS_TI ){
1565               
1566                if ( pDev->config.isr_desc_proc) {
1567                        if (grtm_request_txlock_isr(pDev)) {
1568                                grtm_tx_process(pDev);
1569                                grtm_release_txlock(pDev);
1570                        }
1571
1572#if 0
1573                        if ( (pDev->config.blocking==GRTM_BLKMODE_COMPLETE) && pDev->timeout ){
1574                                /* Signal to thread only if enough data is available */
1575                                if ( pDev->wait_for_frames > grtm_data_avail(pDev) ){
1576                                        /* Not enough data available */
1577                                        goto procceed_processing_interrupts;
1578                                }
1579
1580                                /* Enough number of frames has been transmitted which means that
1581                                 * the waiting thread should be woken up.
1582                                 */
1583                                rtems_semaphore_release(pDev->sem_tx);
1584                        }
1585#endif
1586                }
1587
1588                if ( pDev->config.blocking == GRTM_BLKMODE_BLK ) {
1589                        /* Blocking mode */
1590
1591#if 0
1592                        /* Disable further Interrupts until handled by waiting task. */
1593                        regs->dma_ctrl = READ_REG(&regs->dma_ctrl) & ~GRTM_DMA_CTRL_IE;
1594#endif
1595               
1596                        /* Signal Semaphore to wake waiting thread in ioctl(SEND|RECLAIM) */
1597                        rtems_semaphore_release(pDev->sem_tx);
1598                }
1599
1600        }
1601#if 0
1602procceed_processing_interrupts:
1603        ;
1604#endif
1605}
1606
1607static rtems_device_driver grtm_initialize(
1608  rtems_device_major_number major,
1609  rtems_device_minor_number unused,
1610  void *arg
1611  )
1612{
1613        /* Device Semaphore created with count = 1 */
1614        if ( rtems_semaphore_create(rtems_build_name('G', 'R', 'T', 'M'),
1615                1,
1616                RTEMS_FIFO|RTEMS_NO_INHERIT_PRIORITY|RTEMS_LOCAL|RTEMS_NO_PRIORITY_CEILING,
1617                0,
1618                &grtm_dev_sem) != RTEMS_SUCCESSFUL ) {
1619                return RTEMS_INTERNAL_ERROR;
1620        }
1621
1622        return RTEMS_SUCCESSFUL;
1623}
Note: See TracBrowser for help on using the repository browser.