source: rtems/cpukit/score/cpu/mips/rtems/score/cpu.h @ 143696a

5
Last change on this file since 143696a was 143696a, checked in by Sebastian Huber <sebastian.huber@…>, on 10/16/15 at 06:15:03

basedefs.h: Add and use RTEMS_NO_RETURN

  • Property mode set to 100644
File size: 40.9 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief Mips CPU Dependent Header File
5 */
6
7/*
8 *  Conversion to MIPS port by Alan Cudmore <alanc@linuxstart.com> and
9 *           Joel Sherrill <joel@OARcorp.com>.
10 *
11 *    These changes made the code conditional on standard cpp predefines,
12 *    merged the mips1 and mips3 code sequences as much as possible,
13 *    and moved some of the assembly code to C.  Alan did much of the
14 *    initial analysis and rework.  Joel took over from there and
15 *    wrote the JMR3904 BSP so this could be tested.  Joel also
16 *    added the new interrupt vectoring support in libcpu and
17 *    tried to better support the various interrupt controllers.
18 *
19 */
20
21/*
22 *  Original MIP64ORION port by Craig Lebakken <craigl@transition.com>
23 *           COPYRIGHT (c) 1996 by Transition Networks Inc.
24 *
25 *    To anyone who acknowledges that this file is provided "AS IS"
26 *    without any express or implied warranty:
27 *      permission to use, copy, modify, and distribute this file
28 *      for any purpose is hereby granted without fee, provided that
29 *      the above copyright notice and this notice appears in all
30 *      copies, and that the name of Transition Networks not be used in
31 *      advertising or publicity pertaining to distribution of the
32 *      software without specific, written prior permission.
33 *      Transition Networks makes no representations about the suitability
34 *      of this software for any purpose.
35 *
36 *  COPYRIGHT (c) 1989-2012.
37 *  On-Line Applications Research Corporation (OAR).
38 *
39 *  The license and distribution terms for this file may be
40 *  found in the file LICENSE in this distribution or at
41 *  http://www.rtems.org/license/LICENSE.
42 */
43
44#ifndef _RTEMS_SCORE_CPU_H
45#define _RTEMS_SCORE_CPU_H
46
47/**
48 *  @defgroup ScoreCPU CPU CPU
49 *
50 *  @ingroup Score
51 *
52 */
53/**@{*/
54
55#ifdef __cplusplus
56extern "C" {
57#endif
58
59#include <rtems/score/types.h>
60#include <rtems/score/mips.h>
61
62/* conditional compilation parameters */
63
64/*
65 *  Should the calls to _Thread_Enable_dispatch be inlined?
66 *
67 *  If TRUE, then they are inlined.
68 *  If FALSE, then a subroutine call is made.
69 *
70 *  Basically this is an example of the classic trade-off of size
71 *  versus speed.  Inlining the call (TRUE) typically increases the
72 *  size of RTEMS while speeding up the enabling of dispatching.
73 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
74 *  only be 0 or 1 unless you are in an interrupt handler and that
75 *  interrupt handler invokes the executive.]  When not inlined
76 *  something calls _Thread_Enable_dispatch which in turns calls
77 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
78 *  one subroutine call is avoided entirely.]
79 */
80
81#define CPU_INLINE_ENABLE_DISPATCH       FALSE
82
83/*
84 *  Does RTEMS manage a dedicated interrupt stack in software?
85 *
86 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
87 *  If FALSE, nothing is done.
88 *
89 *  If the CPU supports a dedicated interrupt stack in hardware,
90 *  then it is generally the responsibility of the BSP to allocate it
91 *  and set it up.
92 *
93 *  If the CPU does not support a dedicated interrupt stack, then
94 *  the porter has two options: (1) execute interrupts on the
95 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
96 *  interrupt stack.
97 *
98 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
99 *
100 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
101 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
102 *  possible that both are FALSE for a particular CPU.  Although it
103 *  is unclear what that would imply about the interrupt processing
104 *  procedure on that CPU.
105 */
106
107#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
108
109/*
110 *  Does the CPU follow the simple vectored interrupt model?
111 *
112 *  If TRUE, then RTEMS allocates the vector table it internally manages.
113 *  If FALSE, then the BSP is assumed to allocate and manage the vector
114 *  table
115 *
116 *  MIPS Specific Information:
117 *
118 *  Up to and including RTEMS 4.10, the MIPS port used simple vectored
119 *  interrupts. But this was changed to the PIC model after 4.10.
120 */
121#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
122
123/*
124 *  Does this CPU have hardware support for a dedicated interrupt stack?
125 *
126 *  If TRUE, then it must be installed during initialization.
127 *  If FALSE, then no installation is performed.
128 *
129 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
130 *
131 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
132 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
133 *  possible that both are FALSE for a particular CPU.  Although it
134 *  is unclear what that would imply about the interrupt processing
135 *  procedure on that CPU.
136 */
137
138#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
139
140/*
141 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
142 *
143 *  If TRUE, then the memory is allocated during initialization.
144 *  If FALSE, then the memory is allocated during initialization.
145 *
146 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
147 */
148
149#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
150
151/*
152 *  Does the RTEMS invoke the user's ISR with the vector number and
153 *  a pointer to the saved interrupt frame (1) or just the vector
154 *  number (0)?
155 *
156 */
157
158#define CPU_ISR_PASSES_FRAME_POINTER 1
159
160
161
162/*
163 *  Does the CPU have hardware floating point?
164 *
165 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
166 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
167 *
168 *  If there is a FP coprocessor such as the i387 or mc68881, then
169 *  the answer is TRUE.
170 *
171 *  The macro name "MIPS_HAS_FPU" should be made CPU specific.
172 *  It indicates whether or not this CPU model has FP support.  For
173 *  example, it would be possible to have an i386_nofp CPU model
174 *  which set this to false to indicate that you have an i386 without
175 *  an i387 and wish to leave floating point support out of RTEMS.
176 */
177
178#if ( MIPS_HAS_FPU == 1 )
179#define CPU_HARDWARE_FP     TRUE
180#else
181#define CPU_HARDWARE_FP     FALSE
182#endif
183
184/*
185 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
186 *
187 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
188 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
189 *
190 *  So far, the only CPU in which this option has been used is the
191 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
192 *  floating point registers to perform integer multiplies.  If
193 *  a function which you would not think utilize the FP unit DOES,
194 *  then one can not easily predict which tasks will use the FP hardware.
195 *  In this case, this option should be TRUE.
196 *
197 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
198 *
199 *  Mips Note: It appears the GCC can implicitly generate FPU
200 *  and Altivec instructions when you least expect them.  So make
201 *  all tasks floating point.
202 */
203
204#define CPU_ALL_TASKS_ARE_FP CPU_HARDWARE_FP
205
206/*
207 *  Should the IDLE task have a floating point context?
208 *
209 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
210 *  and it has a floating point context which is switched in and out.
211 *  If FALSE, then the IDLE task does not have a floating point context.
212 *
213 *  Setting this to TRUE negatively impacts the time required to preempt
214 *  the IDLE task from an interrupt because the floating point context
215 *  must be saved as part of the preemption.
216 */
217
218#define CPU_IDLE_TASK_IS_FP      FALSE
219
220/*
221 *  Should the saving of the floating point registers be deferred
222 *  until a context switch is made to another different floating point
223 *  task?
224 *
225 *  If TRUE, then the floating point context will not be stored until
226 *  necessary.  It will remain in the floating point registers and not
227 *  disturned until another floating point task is switched to.
228 *
229 *  If FALSE, then the floating point context is saved when a floating
230 *  point task is switched out and restored when the next floating point
231 *  task is restored.  The state of the floating point registers between
232 *  those two operations is not specified.
233 *
234 *  If the floating point context does NOT have to be saved as part of
235 *  interrupt dispatching, then it should be safe to set this to TRUE.
236 *
237 *  Setting this flag to TRUE results in using a different algorithm
238 *  for deciding when to save and restore the floating point context.
239 *  The deferred FP switch algorithm minimizes the number of times
240 *  the FP context is saved and restored.  The FP context is not saved
241 *  until a context switch is made to another, different FP task.
242 *  Thus in a system with only one FP task, the FP context will never
243 *  be saved or restored.
244 */
245
246#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
247
248/*
249 *  Does this port provide a CPU dependent IDLE task implementation?
250 *
251 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
252 *  must be provided and is the default IDLE thread body instead of
253 *  _Internal_threads_Idle_thread_body.
254 *
255 *  If FALSE, then use the generic IDLE thread body if the BSP does
256 *  not provide one.
257 *
258 *  This is intended to allow for supporting processors which have
259 *  a low power or idle mode.  When the IDLE thread is executed, then
260 *  the CPU can be powered down.
261 *
262 *  The order of precedence for selecting the IDLE thread body is:
263 *
264 *    1.  BSP provided
265 *    2.  CPU dependent (if provided)
266 *    3.  generic (if no BSP and no CPU dependent)
267 */
268
269/* we can use the low power wait instruction for the IDLE thread */
270#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
271
272/*
273 *  Does the stack grow up (toward higher addresses) or down
274 *  (toward lower addresses)?
275 *
276 *  If TRUE, then the grows upward.
277 *  If FALSE, then the grows toward smaller addresses.
278 */
279
280/* our stack grows down */
281#define CPU_STACK_GROWS_UP               FALSE
282
283/*
284 *  The following is the variable attribute used to force alignment
285 *  of critical RTEMS structures.  On some processors it may make
286 *  sense to have these aligned on tighter boundaries than
287 *  the minimum requirements of the compiler in order to have as
288 *  much of the critical data area as possible in a cache line.
289 *
290 *  The placement of this macro in the declaration of the variables
291 *  is based on the syntactically requirements of the GNU C
292 *  "__attribute__" extension.  For example with GNU C, use
293 *  the following to force a structures to a 32 byte boundary.
294 *
295 *      __attribute__ ((aligned (32)))
296 *
297 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
298 *         To benefit from using this, the data must be heavily
299 *         used so it will stay in the cache and used frequently enough
300 *         in the executive to justify turning this on.
301 */
302
303/* our cache line size is 16 bytes */
304#if __GNUC__
305#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
306#else
307#define CPU_STRUCTURE_ALIGNMENT
308#endif
309
310#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
311
312/*
313 *  Define what is required to specify how the network to host conversion
314 *  routines are handled.
315 */
316
317/* __MIPSEB__ or __MIPSEL__ is defined by GCC based on -EB or -EL command line options */
318#if defined(__MIPSEB__)
319#define CPU_BIG_ENDIAN                           TRUE
320#define CPU_LITTLE_ENDIAN                        FALSE
321#elif defined(__MIPSEL__)
322#define CPU_BIG_ENDIAN                           FALSE
323#define CPU_LITTLE_ENDIAN                        TRUE
324#else
325#error "Unknown endianness"
326#endif
327
328/*
329 *  The following defines the number of bits actually used in the
330 *  interrupt field of the task mode.  How those bits map to the
331 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
332 */
333
334#define CPU_MODES_INTERRUPT_MASK   0x000000ff
335
336#define CPU_SIZEOF_POINTER 4
337
338#define CPU_PER_CPU_CONTROL_SIZE 0
339
340/*
341 *  Processor defined structures
342 *
343 *  Examples structures include the descriptor tables from the i386
344 *  and the processor control structure on the i960ca.
345 */
346
347/* may need to put some structures here.  */
348
349/*
350 * Contexts
351 *
352 *  Generally there are 2 types of context to save.
353 *     1. Interrupt registers to save
354 *     2. Task level registers to save
355 *
356 *  This means we have the following 3 context items:
357 *     1. task level context stuff::  Context_Control
358 *     2. floating point task stuff:: Context_Control_fp
359 *     3. special interrupt level context :: Context_Control_interrupt
360 *
361 *  On some processors, it is cost-effective to save only the callee
362 *  preserved registers during a task context switch.  This means
363 *  that the ISR code needs to save those registers which do not
364 *  persist across function calls.  It is not mandatory to make this
365 *  distinctions between the caller/callee saves registers for the
366 *  purpose of minimizing context saved during task switch and on interrupts.
367 *  If the cost of saving extra registers is minimal, simplicity is the
368 *  choice.  Save the same context on interrupt entry as for tasks in
369 *  this case.
370 *
371 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
372 *  care should be used in designing the context area.
373 *
374 *  On some CPUs with hardware floating point support, the Context_Control_fp
375 *  structure will not be used or it simply consist of an array of a
376 *  fixed number of bytes.   This is done when the floating point context
377 *  is dumped by a "FP save context" type instruction and the format
378 *  is not really defined by the CPU.  In this case, there is no need
379 *  to figure out the exact format -- only the size.  Of course, although
380 *  this is enough information for RTEMS, it is probably not enough for
381 *  a debugger such as gdb.  But that is another problem.
382 */
383
384#ifndef ASM
385
386typedef struct {
387  /* There is no CPU specific per-CPU state */
388} CPU_Per_CPU_control;
389
390/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
391#if (__mips == 1) || (__mips == 32)
392#define __MIPS_REGISTER_TYPE     uint32_t
393#define __MIPS_FPU_REGISTER_TYPE uint32_t
394#elif __mips == 3
395#define __MIPS_REGISTER_TYPE     uint64_t
396#define __MIPS_FPU_REGISTER_TYPE uint64_t
397#else
398#error "mips register size: unknown architecture level!!"
399#endif
400typedef struct {
401    __MIPS_REGISTER_TYPE s0;
402    __MIPS_REGISTER_TYPE s1;
403    __MIPS_REGISTER_TYPE s2;
404    __MIPS_REGISTER_TYPE s3;
405    __MIPS_REGISTER_TYPE s4;
406    __MIPS_REGISTER_TYPE s5;
407    __MIPS_REGISTER_TYPE s6;
408    __MIPS_REGISTER_TYPE s7;
409    __MIPS_REGISTER_TYPE sp;
410    __MIPS_REGISTER_TYPE fp;
411    __MIPS_REGISTER_TYPE ra;
412    __MIPS_REGISTER_TYPE c0_sr;
413    __MIPS_REGISTER_TYPE c0_epc;
414} Context_Control;
415
416#define _CPU_Context_Get_SP( _context ) \
417  (uintptr_t) (_context)->sp
418
419/* WARNING: If this structure is modified, the constants in cpu.h
420 *          must also be updated.
421 */
422
423typedef struct {
424#if ( CPU_HARDWARE_FP == TRUE )
425    __MIPS_FPU_REGISTER_TYPE fp0;
426    __MIPS_FPU_REGISTER_TYPE fp1;
427    __MIPS_FPU_REGISTER_TYPE fp2;
428    __MIPS_FPU_REGISTER_TYPE fp3;
429    __MIPS_FPU_REGISTER_TYPE fp4;
430    __MIPS_FPU_REGISTER_TYPE fp5;
431    __MIPS_FPU_REGISTER_TYPE fp6;
432    __MIPS_FPU_REGISTER_TYPE fp7;
433    __MIPS_FPU_REGISTER_TYPE fp8;
434    __MIPS_FPU_REGISTER_TYPE fp9;
435    __MIPS_FPU_REGISTER_TYPE fp10;
436    __MIPS_FPU_REGISTER_TYPE fp11;
437    __MIPS_FPU_REGISTER_TYPE fp12;
438    __MIPS_FPU_REGISTER_TYPE fp13;
439    __MIPS_FPU_REGISTER_TYPE fp14;
440    __MIPS_FPU_REGISTER_TYPE fp15;
441    __MIPS_FPU_REGISTER_TYPE fp16;
442    __MIPS_FPU_REGISTER_TYPE fp17;
443    __MIPS_FPU_REGISTER_TYPE fp18;
444    __MIPS_FPU_REGISTER_TYPE fp19;
445    __MIPS_FPU_REGISTER_TYPE fp20;
446    __MIPS_FPU_REGISTER_TYPE fp21;
447    __MIPS_FPU_REGISTER_TYPE fp22;
448    __MIPS_FPU_REGISTER_TYPE fp23;
449    __MIPS_FPU_REGISTER_TYPE fp24;
450    __MIPS_FPU_REGISTER_TYPE fp25;
451    __MIPS_FPU_REGISTER_TYPE fp26;
452    __MIPS_FPU_REGISTER_TYPE fp27;
453    __MIPS_FPU_REGISTER_TYPE fp28;
454    __MIPS_FPU_REGISTER_TYPE fp29;
455    __MIPS_FPU_REGISTER_TYPE fp30;
456    __MIPS_FPU_REGISTER_TYPE fp31;
457    uint32_t fpcs;
458#endif
459} Context_Control_fp;
460
461/*
462 *  This struct reflects the stack frame employed in ISR_Handler.  Note
463 *  that the ISR routine save some of the registers to this frame for
464 *  all interrupts and exceptions.  Other registers are saved only on
465 *  exceptions, while others are not touched at all.  The untouched
466 *  registers are not normally disturbed by high-level language
467 *  programs so they can be accessed when required.
468 *
469 *  The registers and their ordering in this struct must directly
470 *  correspond to the layout and ordering of * shown in iregdef.h,
471 *  as cpu_asm.S uses those definitions to fill the stack frame.
472 *  This struct provides access to the stack frame for C code.
473 *
474 *  Similarly, this structure is used by debugger stubs and exception
475 *  processing routines so be careful when changing the format.
476 *
477 *  NOTE: The comments with this structure and cpu_asm.S should be kept
478 *        in sync.  When in doubt, look in the  code to see if the
479 *        registers you're interested in are actually treated as expected.
480 *        The order of the first portion of this structure follows the
481 *        order of registers expected by gdb.
482 */
483
484typedef struct
485{
486  __MIPS_REGISTER_TYPE  r0;       /*  0 -- NOT FILLED IN */
487  __MIPS_REGISTER_TYPE  at;       /*  1 -- saved always */
488  __MIPS_REGISTER_TYPE  v0;       /*  2 -- saved always */
489  __MIPS_REGISTER_TYPE  v1;       /*  3 -- saved always */
490  __MIPS_REGISTER_TYPE  a0;       /*  4 -- saved always */
491  __MIPS_REGISTER_TYPE  a1;       /*  5 -- saved always */
492  __MIPS_REGISTER_TYPE  a2;       /*  6 -- saved always */
493  __MIPS_REGISTER_TYPE  a3;       /*  7 -- saved always */
494  __MIPS_REGISTER_TYPE  t0;       /*  8 -- saved always */
495  __MIPS_REGISTER_TYPE  t1;       /*  9 -- saved always */
496  __MIPS_REGISTER_TYPE  t2;       /* 10 -- saved always */
497  __MIPS_REGISTER_TYPE  t3;       /* 11 -- saved always */
498  __MIPS_REGISTER_TYPE  t4;       /* 12 -- saved always */
499  __MIPS_REGISTER_TYPE  t5;       /* 13 -- saved always */
500  __MIPS_REGISTER_TYPE  t6;       /* 14 -- saved always */
501  __MIPS_REGISTER_TYPE  t7;       /* 15 -- saved always */
502  __MIPS_REGISTER_TYPE  s0;       /* 16 -- saved on exceptions */
503  __MIPS_REGISTER_TYPE  s1;       /* 17 -- saved on exceptions */
504  __MIPS_REGISTER_TYPE  s2;       /* 18 -- saved on exceptions */
505  __MIPS_REGISTER_TYPE  s3;       /* 19 -- saved on exceptions */
506  __MIPS_REGISTER_TYPE  s4;       /* 20 -- saved on exceptions */
507  __MIPS_REGISTER_TYPE  s5;       /* 21 -- saved on exceptions */
508  __MIPS_REGISTER_TYPE  s6;       /* 22 -- saved on exceptions */
509  __MIPS_REGISTER_TYPE  s7;       /* 23 -- saved on exceptions */
510  __MIPS_REGISTER_TYPE  t8;       /* 24 -- saved always */
511  __MIPS_REGISTER_TYPE  t9;       /* 25 -- saved always */
512  __MIPS_REGISTER_TYPE  k0;       /* 26 -- NOT FILLED IN, kernel tmp reg */
513  __MIPS_REGISTER_TYPE  k1;       /* 27 -- NOT FILLED IN, kernel tmp reg */
514  __MIPS_REGISTER_TYPE  gp;       /* 28 -- saved always */
515  __MIPS_REGISTER_TYPE  sp;       /* 29 -- saved on exceptions NOT RESTORED */
516  __MIPS_REGISTER_TYPE  fp;       /* 30 -- saved always */
517  __MIPS_REGISTER_TYPE  ra;       /* 31 -- saved always */
518  __MIPS_REGISTER_TYPE  c0_sr;    /* 32 -- saved always, some bits are */
519                                  /*    manipulated per-thread          */
520  __MIPS_REGISTER_TYPE  mdlo;     /* 33 -- saved always */
521  __MIPS_REGISTER_TYPE  mdhi;     /* 34 -- saved always */
522  __MIPS_REGISTER_TYPE  badvaddr; /* 35 -- saved on exceptions, read-only */
523  __MIPS_REGISTER_TYPE  cause;    /* 36 -- saved on exceptions NOT restored */
524  __MIPS_REGISTER_TYPE  epc;      /* 37 -- saved always, read-only register */
525                                  /*        but logically restored */
526  __MIPS_FPU_REGISTER_TYPE f0;    /* 38 -- saved if FP enabled */
527  __MIPS_FPU_REGISTER_TYPE f1;    /* 39 -- saved if FP enabled */
528  __MIPS_FPU_REGISTER_TYPE f2;    /* 40 -- saved if FP enabled */
529  __MIPS_FPU_REGISTER_TYPE f3;    /* 41 -- saved if FP enabled */
530  __MIPS_FPU_REGISTER_TYPE f4;    /* 42 -- saved if FP enabled */
531  __MIPS_FPU_REGISTER_TYPE f5;    /* 43 -- saved if FP enabled */
532  __MIPS_FPU_REGISTER_TYPE f6;    /* 44 -- saved if FP enabled */
533  __MIPS_FPU_REGISTER_TYPE f7;    /* 45 -- saved if FP enabled */
534  __MIPS_FPU_REGISTER_TYPE f8;    /* 46 -- saved if FP enabled */
535  __MIPS_FPU_REGISTER_TYPE f9;    /* 47 -- saved if FP enabled */
536  __MIPS_FPU_REGISTER_TYPE f10;   /* 48 -- saved if FP enabled */
537  __MIPS_FPU_REGISTER_TYPE f11;   /* 49 -- saved if FP enabled */
538  __MIPS_FPU_REGISTER_TYPE f12;   /* 50 -- saved if FP enabled */
539  __MIPS_FPU_REGISTER_TYPE f13;   /* 51 -- saved if FP enabled */
540  __MIPS_FPU_REGISTER_TYPE f14;   /* 52 -- saved if FP enabled */
541  __MIPS_FPU_REGISTER_TYPE f15;   /* 53 -- saved if FP enabled */
542  __MIPS_FPU_REGISTER_TYPE f16;   /* 54 -- saved if FP enabled */
543  __MIPS_FPU_REGISTER_TYPE f17;   /* 55 -- saved if FP enabled */
544  __MIPS_FPU_REGISTER_TYPE f18;   /* 56 -- saved if FP enabled */
545  __MIPS_FPU_REGISTER_TYPE f19;   /* 57 -- saved if FP enabled */
546  __MIPS_FPU_REGISTER_TYPE f20;   /* 58 -- saved if FP enabled */
547  __MIPS_FPU_REGISTER_TYPE f21;   /* 59 -- saved if FP enabled */
548  __MIPS_FPU_REGISTER_TYPE f22;   /* 60 -- saved if FP enabled */
549  __MIPS_FPU_REGISTER_TYPE f23;   /* 61 -- saved if FP enabled */
550  __MIPS_FPU_REGISTER_TYPE f24;   /* 62 -- saved if FP enabled */
551  __MIPS_FPU_REGISTER_TYPE f25;   /* 63 -- saved if FP enabled */
552  __MIPS_FPU_REGISTER_TYPE f26;   /* 64 -- saved if FP enabled */
553  __MIPS_FPU_REGISTER_TYPE f27;   /* 65 -- saved if FP enabled */
554  __MIPS_FPU_REGISTER_TYPE f28;   /* 66 -- saved if FP enabled */
555  __MIPS_FPU_REGISTER_TYPE f29;   /* 67 -- saved if FP enabled */
556  __MIPS_FPU_REGISTER_TYPE f30;   /* 68 -- saved if FP enabled */
557  __MIPS_FPU_REGISTER_TYPE f31;   /* 69 -- saved if FP enabled */
558  __MIPS_REGISTER_TYPE     fcsr;  /* 70 -- saved on exceptions */
559                                  /*    (oddly not documented on MGV) */
560  __MIPS_REGISTER_TYPE     feir;  /* 71 -- saved on exceptions */
561                                  /*    (oddly not documented on MGV) */
562
563  /* GDB does not seem to care about anything past this point */
564
565  __MIPS_REGISTER_TYPE  tlbhi;    /* 72 - NOT FILLED IN, doesn't exist on */
566                                  /*         all MIPS CPUs (at least MGV) */
567#if __mips == 1
568  __MIPS_REGISTER_TYPE  tlblo;    /* 73 - NOT FILLED IN, doesn't exist on */
569                                  /*         all MIPS CPUs (at least MGV) */
570#endif
571#if  (__mips == 3) || (__mips == 32)
572  __MIPS_REGISTER_TYPE  tlblo0;   /* 73 - NOT FILLED IN, doesn't exist on */
573                                  /*         all MIPS CPUs (at least MGV) */
574#endif
575
576  __MIPS_REGISTER_TYPE  inx;      /* 74 -- NOT FILLED IN, doesn't exist on */
577                                  /*         all MIPS CPUs (at least MGV) */
578  __MIPS_REGISTER_TYPE  rand;     /* 75 -- NOT FILLED IN, doesn't exist on */
579                                  /*         all MIPS CPUs (at least MGV) */
580  __MIPS_REGISTER_TYPE  ctxt;     /* 76 -- NOT FILLED IN, doesn't exist on */
581                                  /*         all MIPS CPUs (at least MGV) */
582  __MIPS_REGISTER_TYPE  exctype;  /* 77 -- NOT FILLED IN (not enough info) */
583  __MIPS_REGISTER_TYPE  mode;     /* 78 -- NOT FILLED IN (not enough info) */
584  __MIPS_REGISTER_TYPE  prid;     /* 79 -- NOT FILLED IN (not need to do so) */
585  __MIPS_REGISTER_TYPE  tar ;     /* 80 -- target address register, filled on exceptions */
586  /* end of __mips == 1 so NREGS == 81 */
587#if  (__mips == 3) || (__mips == 32)
588  __MIPS_REGISTER_TYPE  tlblo1;   /* 81 -- NOT FILLED IN */
589  __MIPS_REGISTER_TYPE  pagemask; /* 82 -- NOT FILLED IN */
590  __MIPS_REGISTER_TYPE  wired;    /* 83 -- NOT FILLED IN */
591  __MIPS_REGISTER_TYPE  count;    /* 84 -- NOT FILLED IN */
592  __MIPS_REGISTER_TYPE  compare;  /* 85 -- NOT FILLED IN */
593  __MIPS_REGISTER_TYPE  config;   /* 86 -- NOT FILLED IN */
594  __MIPS_REGISTER_TYPE  lladdr;   /* 87 -- NOT FILLED IN */
595  __MIPS_REGISTER_TYPE  watchlo;  /* 88 -- NOT FILLED IN */
596  __MIPS_REGISTER_TYPE  watchhi;  /* 89 -- NOT FILLED IN */
597  __MIPS_REGISTER_TYPE  ecc;      /* 90 -- NOT FILLED IN */
598  __MIPS_REGISTER_TYPE  cacheerr; /* 91 -- NOT FILLED IN */
599  __MIPS_REGISTER_TYPE  taglo;    /* 92 -- NOT FILLED IN */
600  __MIPS_REGISTER_TYPE  taghi;    /* 93 -- NOT FILLED IN */
601  __MIPS_REGISTER_TYPE  errpc;    /* 94 -- NOT FILLED IN */
602  __MIPS_REGISTER_TYPE  xctxt;    /* 95 -- NOT FILLED IN */
603 /* end of __mips == 3 so NREGS == 96 */
604#endif
605
606} CPU_Interrupt_frame;
607
608typedef CPU_Interrupt_frame CPU_Exception_frame;
609
610/*
611 *  This variable is optional.  It is used on CPUs on which it is difficult
612 *  to generate an "uninitialized" FP context.  It is filled in by
613 *  _CPU_Initialize and copied into the task's FP context area during
614 *  _CPU_Context_Initialize.
615 */
616
617SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
618
619/*
620 *  Nothing prevents the porter from declaring more CPU specific variables.
621 */
622
623/* XXX: if needed, put more variables here */
624
625/*
626 *  The size of the floating point context area.  On some CPUs this
627 *  will not be a "sizeof" because the format of the floating point
628 *  area is not defined -- only the size is.  This is usually on
629 *  CPUs with a "floating point save context" instruction.
630 */
631
632#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
633
634/*
635 *  Amount of extra stack (above minimum stack size) required by
636 *  system initialization thread.  Remember that in a multiprocessor
637 *  system the system intialization thread becomes the MP server thread.
638 */
639
640#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
641
642/*
643 *  Should be large enough to run all RTEMS tests.  This ensures
644 *  that a "reasonable" small application should not have any problems.
645 */
646
647#define CPU_STACK_MINIMUM_SIZE          (8 * 1024)
648
649/*
650 *  CPU's worst alignment requirement for data types on a byte boundary.  This
651 *  alignment does not take into account the requirements for the stack.
652 */
653
654#define CPU_ALIGNMENT              8
655
656/*
657 *  This number corresponds to the byte alignment requirement for the
658 *  heap handler.  This alignment requirement may be stricter than that
659 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
660 *  common for the heap to follow the same alignment requirement as
661 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
662 *  then this should be set to CPU_ALIGNMENT.
663 *
664 *  NOTE:  This does not have to be a power of 2.  It does have to
665 *         be greater or equal to than CPU_ALIGNMENT.
666 */
667
668#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
669
670/*
671 *  This number corresponds to the byte alignment requirement for memory
672 *  buffers allocated by the partition manager.  This alignment requirement
673 *  may be stricter than that for the data types alignment specified by
674 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
675 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
676 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
677 *
678 *  NOTE:  This does not have to be a power of 2.  It does have to
679 *         be greater or equal to than CPU_ALIGNMENT.
680 */
681
682#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
683
684/*
685 *  This number corresponds to the byte alignment requirement for the
686 *  stack.  This alignment requirement may be stricter than that for the
687 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
688 *  is strict enough for the stack, then this should be set to 0.
689 *
690 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
691 */
692
693#define CPU_STACK_ALIGNMENT        CPU_ALIGNMENT
694
695void mips_vector_exceptions( CPU_Interrupt_frame *frame );
696
697/*
698 *  ISR handler macros
699 */
700
701/*
702 *  Declare the function that is present in the shared libcpu directory,
703 *  that returns the processor dependent interrupt mask.
704 */
705
706uint32_t mips_interrupt_mask( void );
707
708/*
709 *  Disable all interrupts for an RTEMS critical section.  The previous
710 *  level is returned in _level.
711 */
712
713#define _CPU_ISR_Disable( _level ) \
714  do { \
715    unsigned int _scratch; \
716    mips_get_sr( _scratch ); \
717    mips_set_sr( _scratch & ~SR_INTERRUPT_ENABLE_BITS ); \
718    _level = _scratch & SR_INTERRUPT_ENABLE_BITS; \
719  } while(0)
720
721/*
722 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
723 *  This indicates the end of an RTEMS critical section.  The parameter
724 *  _level is not modified.
725 */
726
727#define _CPU_ISR_Enable( _level )  \
728  do { \
729    unsigned int _scratch; \
730    mips_get_sr( _scratch ); \
731    mips_set_sr( (_scratch & ~SR_INTERRUPT_ENABLE_BITS) | (_level & SR_INTERRUPT_ENABLE_BITS) ); \
732  } while(0)
733
734/*
735 *  This temporarily restores the interrupt to _level before immediately
736 *  disabling them again.  This is used to divide long RTEMS critical
737 *  sections into two or more parts.  The parameter _level is not
738 *  modified.
739 */
740
741#define _CPU_ISR_Flash( _xlevel ) \
742  do { \
743    unsigned int _scratch2 = _xlevel; \
744    _CPU_ISR_Enable( _scratch2 ); \
745    _CPU_ISR_Disable( _scratch2 ); \
746    _xlevel = _scratch2; \
747  } while(0)
748
749/*
750 *  Map interrupt level in task mode onto the hardware that the CPU
751 *  actually provides.  Currently, interrupt levels which do not
752 *  map onto the CPU in a generic fashion are undefined.  Someday,
753 *  it would be nice if these were "mapped" by the application
754 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
755 *  8 - 255 would be available for bsp/application specific meaning.
756 *  This could be used to manage a programmable interrupt controller
757 *  via the rtems_task_mode directive.
758 *
759 *  On the MIPS, 0 is all on.  Non-zero is all off.  This only
760 *  manipulates the IEC.
761 */
762
763uint32_t   _CPU_ISR_Get_level( void );  /* in cpu.c */
764
765void _CPU_ISR_Set_level( uint32_t   );  /* in cpu.c */
766
767/* end of ISR handler macros */
768
769/* Context handler macros */
770
771/*
772 *  Initialize the context to a state suitable for starting a
773 *  task after a context restore operation.  Generally, this
774 *  involves:
775 *
776 *     - setting a starting address
777 *     - preparing the stack
778 *     - preparing the stack and frame pointers
779 *     - setting the proper interrupt level in the context
780 *     - initializing the floating point context
781 *
782 *  This routine generally does not set any unnecessary register
783 *  in the context.  The state of the "general data" registers is
784 *  undefined at task start time.
785 *
786 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
787 *        point thread.  This is typically only used on CPUs where the
788 *        FPU may be easily disabled by software such as on the SPARC
789 *        where the PSR contains an enable FPU bit.
790 *
791 *  The per-thread status register holds the interrupt enable, FP enable
792 *  and global interrupt enable for that thread.  It means each thread can
793 *  enable its own set of interrupts.  If interrupts are disabled, RTEMS
794 *  can still dispatch via blocking calls.  This is the function of the
795 *  "Interrupt Level", and on the MIPS, it controls the IEC bit and all
796 *  the hardware interrupts as defined in the SR.  Software ints
797 *  are automatically enabled for all threads, as they will only occur under
798 *  program control anyhow.  Besides, the interrupt level parm is only 8 bits,
799 *  and controlling the software ints plus the others would require 9.
800 *
801 *  If the Interrupt Level is 0, all ints are on.  Otherwise, the
802 *  Interrupt Level should supply a bit pattern to impose on the SR
803 *  interrupt bits; bit 0 applies to the mips1 IEC bit/mips3 EXL&IE, bits 1 thru 6
804 *  apply to the SR register Intr bits from bit 10 thru bit 15.  Bit 7 of
805 *  the Interrupt Level parameter is unused at this time.
806 *
807 *  These are the only per-thread SR bits, the others are maintained
808 *  globally & explicitly preserved by the Context Switch code in cpu_asm.s
809 */
810
811
812#if (__mips == 3) || (__mips == 32)
813#define _INTON          SR_IE
814#if __mips_fpr==64
815#define _EXTRABITS      SR_FR
816#else
817#define _EXTRABITS      0
818#endif /* __mips_fpr==64 */
819#endif /* __mips == 3 */
820#if __mips == 1
821#define _INTON          SR_IEC
822#define _EXTRABITS      0  /* make sure we're in user mode on MIPS1 processors */
823#endif /* __mips == 1 */
824
825
826void _CPU_Context_Initialize(
827  Context_Control  *the_context,
828  uintptr_t        *stack_base,
829  uint32_t          size,
830  uint32_t          new_level,
831  void             *entry_point,
832  bool              is_fp,
833  void             *tls_area
834);
835
836
837/*
838 *  This routine is responsible for somehow restarting the currently
839 *  executing task.  If you are lucky, then all that is necessary
840 *  is restoring the context.  Otherwise, there will need to be
841 *  a special assembly routine which does something special in this
842 *  case.  Context_Restore should work most of the time.  It will
843 *  not work if restarting self conflicts with the stack frame
844 *  assumptions of restoring a context.
845 */
846
847#define _CPU_Context_Restart_self( _the_context ) \
848   _CPU_Context_restore( (_the_context) );
849
850/*
851 *  The purpose of this macro is to allow the initial pointer into
852 *  A floating point context area (used to save the floating point
853 *  context) to be at an arbitrary place in the floating point
854 *  context area.
855 *
856 *  This is necessary because some FP units are designed to have
857 *  their context saved as a stack which grows into lower addresses.
858 *  Other FP units can be saved by simply moving registers into offsets
859 *  from the base of the context area.  Finally some FP units provide
860 *  a "dump context" instruction which could fill in from high to low
861 *  or low to high based on the whim of the CPU designers.
862 */
863
864#define _CPU_Context_Fp_start( _base, _offset ) \
865   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
866
867/*
868 *  This routine initializes the FP context area passed to it to.
869 *  There are a few standard ways in which to initialize the
870 *  floating point context.  The code included for this macro assumes
871 *  that this is a CPU in which a "initial" FP context was saved into
872 *  _CPU_Null_fp_context and it simply copies it to the destination
873 *  context passed to it.
874 *
875 *  Other models include (1) not doing anything, and (2) putting
876 *  a "null FP status word" in the correct place in the FP context.
877 */
878
879#if ( CPU_HARDWARE_FP == TRUE )
880#define _CPU_Context_Initialize_fp( _destination ) \
881  { \
882   *(*(_destination)) = _CPU_Null_fp_context; \
883  }
884#endif
885
886/* end of Context handler macros */
887
888/* Fatal Error manager macros */
889
890/*
891 *  This routine copies _error into a known place -- typically a stack
892 *  location or a register, optionally disables interrupts, and
893 *  halts/stops the CPU.
894 */
895
896#define _CPU_Fatal_halt( _source, _error ) \
897  do { \
898    unsigned int _level; \
899    _CPU_ISR_Disable(_level); \
900    (void)_level; \
901    loop: goto loop; \
902  } while (0)
903
904
905extern void mips_break( int error );
906
907/* Bitfield handler macros */
908
909/*
910 *  This routine sets _output to the bit number of the first bit
911 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Word.
912 *  This type may be either 16 or 32 bits wide although only the 16
913 *  least significant bits will be used.
914 *
915 *  There are a number of variables in using a "find first bit" type
916 *  instruction.
917 *
918 *    (1) What happens when run on a value of zero?
919 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
920 *    (3) The numbering may be zero or one based.
921 *    (4) The "find first bit" instruction may search from MSB or LSB.
922 *
923 *  RTEMS guarantees that (1) will never happen so it is not a concern.
924 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
925 *  _CPU_Priority_bits_index().  These three form a set of routines
926 *  which must logically operate together.  Bits in the _value are
927 *  set and cleared based on masks built by _CPU_Priority_mask().
928 *  The basic major and minor values calculated by _Priority_Major()
929 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
930 *  to properly range between the values returned by the "find first bit"
931 *  instruction.  This makes it possible for _Priority_Get_highest() to
932 *  calculate the major and directly index into the minor table.
933 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
934 *  is the first bit found.
935 *
936 *  This entire "find first bit" and mapping process depends heavily
937 *  on the manner in which a priority is broken into a major and minor
938 *  components with the major being the 4 MSB of a priority and minor
939 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
940 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
941 *  to the lowest priority.
942 *
943 *  If your CPU does not have a "find first bit" instruction, then
944 *  there are ways to make do without it.  Here are a handful of ways
945 *  to implement this in software:
946 *
947 *    - a series of 16 bit test instructions
948 *    - a "binary search using if's"
949 *    - _number = 0
950 *      if _value > 0x00ff
951 *        _value >>=8
952 *        _number = 8;
953 *
954 *      if _value > 0x0000f
955 *        _value >=8
956 *        _number += 4
957 *
958 *      _number += bit_set_table[ _value ]
959 *
960 *    where bit_set_table[ 16 ] has values which indicate the first
961 *      bit set
962 */
963
964#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
965#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
966
967#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
968
969#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
970  { \
971    (_output) = 0;   /* do something to prevent warnings */ \
972  }
973
974#endif
975
976/* end of Bitfield handler macros */
977
978/*
979 *  This routine builds the mask which corresponds to the bit fields
980 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
981 *  for that routine.
982 */
983
984#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
985
986#define _CPU_Priority_Mask( _bit_number ) \
987  ( 1 << (_bit_number) )
988
989#endif
990
991/*
992 *  This routine translates the bit numbers returned by
993 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
994 *  a major or minor component of a priority.  See the discussion
995 *  for that routine.
996 */
997
998#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
999
1000#define _CPU_Priority_bits_index( _priority ) \
1001  (_priority)
1002
1003#endif
1004
1005/* end of Priority handler macros */
1006
1007/* functions */
1008
1009/*
1010 *  _CPU_Initialize
1011 *
1012 *  This routine performs CPU dependent initialization.
1013 */
1014
1015void _CPU_Initialize(void);
1016
1017/*
1018 *  _CPU_ISR_install_raw_handler
1019 *
1020 *  This routine installs a "raw" interrupt handler directly into the
1021 *  processor's vector table.
1022 */
1023
1024void _CPU_ISR_install_raw_handler(
1025  uint32_t    vector,
1026  proc_ptr    new_handler,
1027  proc_ptr   *old_handler
1028);
1029
1030/*
1031 *  _CPU_ISR_install_vector
1032 *
1033 *  This routine installs an interrupt vector.
1034 */
1035
1036void _CPU_ISR_install_vector(
1037  uint32_t    vector,
1038  proc_ptr    new_handler,
1039  proc_ptr   *old_handler
1040);
1041
1042/*
1043 *  _CPU_Install_interrupt_stack
1044 *
1045 *  This routine installs the hardware interrupt stack pointer.
1046 *
1047 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
1048 *         is TRUE.
1049 */
1050
1051void _CPU_Install_interrupt_stack( void );
1052
1053/*
1054 *  _CPU_Internal_threads_Idle_thread_body
1055 *
1056 *  This routine is the CPU dependent IDLE thread body.
1057 *
1058 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
1059 *         is TRUE.
1060 */
1061
1062void *_CPU_Thread_Idle_body( uintptr_t ignored );
1063
1064/*
1065 *  _CPU_Context_switch
1066 *
1067 *  This routine switches from the run context to the heir context.
1068 */
1069
1070void _CPU_Context_switch(
1071  Context_Control  *run,
1072  Context_Control  *heir
1073);
1074
1075/*
1076 *  _CPU_Context_restore
1077 *
1078 *  This routine is generally used only to restart self in an
1079 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1080 *
1081 *  NOTE: May be unnecessary to reload some registers.
1082 */
1083
1084void _CPU_Context_restore(
1085  Context_Control *new_context
1086) RTEMS_NO_RETURN;
1087
1088/*
1089 *  _CPU_Context_save_fp
1090 *
1091 *  This routine saves the floating point context passed to it.
1092 */
1093
1094void _CPU_Context_save_fp(
1095  Context_Control_fp **fp_context_ptr
1096);
1097
1098/*
1099 *  _CPU_Context_restore_fp
1100 *
1101 *  This routine restores the floating point context passed to it.
1102 */
1103
1104void _CPU_Context_restore_fp(
1105  Context_Control_fp **fp_context_ptr
1106);
1107
1108static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
1109{
1110  /* TODO */
1111}
1112
1113static inline void _CPU_Context_validate( uintptr_t pattern )
1114{
1115  while (1) {
1116    /* TODO */
1117  }
1118}
1119
1120void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1121
1122/*  The following routine swaps the endian format of an unsigned int.
1123 *  It must be static because it is referenced indirectly.
1124 *
1125 *  This version will work on any processor, but if there is a better
1126 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1127 *
1128 *     swap least significant two bytes with 16-bit rotate
1129 *     swap upper and lower 16-bits
1130 *     swap most significant two bytes with 16-bit rotate
1131 *
1132 *  Some CPUs have special instructions which swap a 32-bit quantity in
1133 *  a single instruction (e.g. i486).  It is probably best to avoid
1134 *  an "endian swapping control bit" in the CPU.  One good reason is
1135 *  that interrupts would probably have to be disabled to ensure that
1136 *  an interrupt does not try to access the same "chunk" with the wrong
1137 *  endian.  Another good reason is that on some CPUs, the endian bit
1138 *  endianness for ALL fetches -- both code and data -- so the code
1139 *  will be fetched incorrectly.
1140 */
1141
1142static inline uint32_t CPU_swap_u32(
1143  uint32_t value
1144)
1145{
1146  uint32_t   byte1, byte2, byte3, byte4, swapped;
1147
1148  byte4 = (value >> 24) & 0xff;
1149  byte3 = (value >> 16) & 0xff;
1150  byte2 = (value >> 8)  & 0xff;
1151  byte1 =  value        & 0xff;
1152
1153  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1154  return( swapped );
1155}
1156
1157#define CPU_swap_u16( value ) \
1158  (((value&0xff) << 8) | ((value >> 8)&0xff))
1159
1160typedef uint32_t CPU_Counter_ticks;
1161
1162CPU_Counter_ticks _CPU_Counter_read( void );
1163
1164static inline CPU_Counter_ticks _CPU_Counter_difference(
1165  CPU_Counter_ticks second,
1166  CPU_Counter_ticks first
1167)
1168{
1169  return second - first;
1170}
1171
1172#endif
1173
1174
1175
1176#ifdef __cplusplus
1177}
1178#endif
1179
1180/**@}*/
1181#endif
Note: See TracBrowser for help on using the repository browser.