source: rtems/c/src/exec/score/cpu/mips64orion/cpu.h @ e2d79559

4.104.114.84.95
Last change on this file since e2d79559 was e2d79559, checked in by Joel Sherrill <joel.sherrill@…>, on 04/09/97 at 14:05:50

Added ka9q tcpip stack and network driver for the gen68360. This effort
was done based on the 3.6.0 release and had to be autoconf'ed locally.
It is turned on is the bsp enables it and it is not explicitly disabled
via the configure option --disable-tcpip. As many warnings as possible
were removed locally after the code was merged. Only the gen68360
and mvme136 bsps were compiled this way.

The ka9q port and network driver were submitted by Eric Norum
(eric@…).

The network demo programs are not included in the tree at this point.

  • Property mode set to 100644
File size: 31.0 KB
Line 
1/*  cpu.h
2 *
3 *  This include file contains information pertaining to the IDT 4650
4 *  processor.
5 *
6 *  Author:     Craig Lebakken <craigl@transition.com>
7 *
8 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
9 *
10 *  To anyone who acknowledges that this file is provided "AS IS"
11 *  without any express or implied warranty:
12 *      permission to use, copy, modify, and distribute this file
13 *      for any purpose is hereby granted without fee, provided that
14 *      the above copyright notice and this notice appears in all
15 *      copies, and that the name of Transition Networks not be used in
16 *      advertising or publicity pertaining to distribution of the
17 *      software without specific, written prior permission.
18 *      Transition Networks makes no representations about the suitability
19 *      of this software for any purpose.
20 *
21 *  Derived from c/src/exec/score/cpu/no_cpu/cpu.h:
22 *
23 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
24 *  On-Line Applications Research Corporation (OAR).
25 *  All rights assigned to U.S. Government, 1994.
26 *
27 *  This material may be reproduced by or for the U.S. Government pursuant
28 *  to the copyright license under the clause at DFARS 252.227-7013.  This
29 *  notice must appear in all copies of this file and its derivatives.
30 *
31 *  $Id$
32 */
33/* @(#)cpu.h       08/29/96     1.7 */
34
35#ifndef __CPU_h
36#define __CPU_h
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/mips64orion.h>       /* pick up machine definitions */
43#ifndef ASM
44#include <rtems/score/mipstypes.h>
45#endif
46
47extern int mips_disable_interrupts( void );
48extern void mips_enable_interrupts( int _level );
49extern int mips_disable_global_interrupts( void );
50extern void mips_enable_global_interrupts( void );
51extern void mips_fatal_error ( int error );
52
53/* conditional compilation parameters */
54
55/*
56 *  Should the calls to _Thread_Enable_dispatch be inlined?
57 *
58 *  If TRUE, then they are inlined.
59 *  If FALSE, then a subroutine call is made.
60 *
61 *  Basically this is an example of the classic trade-off of size
62 *  versus speed.  Inlining the call (TRUE) typically increases the
63 *  size of RTEMS while speeding up the enabling of dispatching.
64 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
65 *  only be 0 or 1 unless you are in an interrupt handler and that
66 *  interrupt handler invokes the executive.]  When not inlined
67 *  something calls _Thread_Enable_dispatch which in turns calls
68 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
69 *  one subroutine call is avoided entirely.]
70 */
71
72#define CPU_INLINE_ENABLE_DISPATCH       TRUE
73
74/*
75 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
76 *  be unrolled one time?  In unrolled each iteration of the loop examines
77 *  two "nodes" on the chain being searched.  Otherwise, only one node
78 *  is examined per iteration.
79 *
80 *  If TRUE, then the loops are unrolled.
81 *  If FALSE, then the loops are not unrolled.
82 *
83 *  The primary factor in making this decision is the cost of disabling
84 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
85 *  body of the loop.  On some CPUs, the flash is more expensive than
86 *  one iteration of the loop body.  In this case, it might be desirable
87 *  to unroll the loop.  It is important to note that on some CPUs, this
88 *  code is the longest interrupt disable period in RTEMS.  So it is
89 *  necessary to strike a balance when setting this parameter.
90 */
91
92#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
93
94/*
95 *  Does RTEMS manage a dedicated interrupt stack in software?
96 *
97 *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
98 *  If FALSE, nothing is done.
99 *
100 *  If the CPU supports a dedicated interrupt stack in hardware,
101 *  then it is generally the responsibility of the BSP to allocate it
102 *  and set it up.
103 *
104 *  If the CPU does not support a dedicated interrupt stack, then
105 *  the porter has two options: (1) execute interrupts on the
106 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
107 *  interrupt stack.
108 *
109 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
110 *
111 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
112 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
113 *  possible that both are FALSE for a particular CPU.  Although it
114 *  is unclear what that would imply about the interrupt processing
115 *  procedure on that CPU.
116 */
117
118#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
119
120/*
121 *  Does this CPU have hardware support for a dedicated interrupt stack?
122 *
123 *  If TRUE, then it must be installed during initialization.
124 *  If FALSE, then no installation is performed.
125 *
126 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
127 *
128 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
129 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
130 *  possible that both are FALSE for a particular CPU.  Although it
131 *  is unclear what that would imply about the interrupt processing
132 *  procedure on that CPU.
133 */
134
135#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
136
137/*
138 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
139 *
140 *  If TRUE, then the memory is allocated during initialization.
141 *  If FALSE, then the memory is allocated during initialization.
142 *
143 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
144 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
145 */
146
147#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
148
149/*
150 *  Does the CPU have hardware floating point?
151 *
152 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
153 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
154 *
155 *  If there is a FP coprocessor such as the i387 or mc68881, then
156 *  the answer is TRUE.
157 *
158 *  The macro name "MIPS64ORION_HAS_FPU" should be made CPU specific.
159 *  It indicates whether or not this CPU model has FP support.  For
160 *  example, it would be possible to have an i386_nofp CPU model
161 *  which set this to false to indicate that you have an i386 without
162 *  an i387 and wish to leave floating point support out of RTEMS.
163 */
164
165#if ( MIPS64ORION_HAS_FPU == 1 )
166#define CPU_HARDWARE_FP     TRUE
167#else
168#define CPU_HARDWARE_FP     FALSE
169#endif
170
171/*
172 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
173 *
174 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
175 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
176 *
177 *  So far, the only CPU in which this option has been used is the
178 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
179 *  floating point registers to perform integer multiplies.  If
180 *  a function which you would not think utilize the FP unit DOES,
181 *  then one can not easily predict which tasks will use the FP hardware.
182 *  In this case, this option should be TRUE.
183 *
184 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
185 */
186
187#define CPU_ALL_TASKS_ARE_FP    FALSE
188
189/*
190 *  Should the IDLE task have a floating point context?
191 *
192 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
193 *  and it has a floating point context which is switched in and out.
194 *  If FALSE, then the IDLE task does not have a floating point context.
195 *
196 *  Setting this to TRUE negatively impacts the time required to preempt
197 *  the IDLE task from an interrupt because the floating point context
198 *  must be saved as part of the preemption.
199 */
200
201#define CPU_IDLE_TASK_IS_FP      FALSE
202
203/*
204 *  Should the saving of the floating point registers be deferred
205 *  until a context switch is made to another different floating point
206 *  task?
207 *
208 *  If TRUE, then the floating point context will not be stored until
209 *  necessary.  It will remain in the floating point registers and not
210 *  disturned until another floating point task is switched to.
211 *
212 *  If FALSE, then the floating point context is saved when a floating
213 *  point task is switched out and restored when the next floating point
214 *  task is restored.  The state of the floating point registers between
215 *  those two operations is not specified.
216 *
217 *  If the floating point context does NOT have to be saved as part of
218 *  interrupt dispatching, then it should be safe to set this to TRUE.
219 *
220 *  Setting this flag to TRUE results in using a different algorithm
221 *  for deciding when to save and restore the floating point context.
222 *  The deferred FP switch algorithm minimizes the number of times
223 *  the FP context is saved and restored.  The FP context is not saved
224 *  until a context switch is made to another, different FP task.
225 *  Thus in a system with only one FP task, the FP context will never
226 *  be saved or restored.
227 */
228
229#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
230
231/*
232 *  Does this port provide a CPU dependent IDLE task implementation?
233 *
234 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
235 *  must be provided and is the default IDLE thread body instead of
236 *  _Internal_threads_Idle_thread_body.
237 *
238 *  If FALSE, then use the generic IDLE thread body if the BSP does
239 *  not provide one.
240 *
241 *  This is intended to allow for supporting processors which have
242 *  a low power or idle mode.  When the IDLE thread is executed, then
243 *  the CPU can be powered down.
244 *
245 *  The order of precedence for selecting the IDLE thread body is:
246 *
247 *    1.  BSP provided
248 *    2.  CPU dependent (if provided)
249 *    3.  generic (if no BSP and no CPU dependent)
250 */
251
252/* we can use the low power wait instruction for the IDLE thread */
253#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
254
255/*
256 *  Does the stack grow up (toward higher addresses) or down
257 *  (toward lower addresses)?
258 *
259 *  If TRUE, then the grows upward.
260 *  If FALSE, then the grows toward smaller addresses.
261 */
262
263/* our stack grows down */
264#define CPU_STACK_GROWS_UP               FALSE
265
266/*
267 *  The following is the variable attribute used to force alignment
268 *  of critical RTEMS structures.  On some processors it may make
269 *  sense to have these aligned on tighter boundaries than
270 *  the minimum requirements of the compiler in order to have as
271 *  much of the critical data area as possible in a cache line.
272 *
273 *  The placement of this macro in the declaration of the variables
274 *  is based on the syntactically requirements of the GNU C
275 *  "__attribute__" extension.  For example with GNU C, use
276 *  the following to force a structures to a 32 byte boundary.
277 *
278 *      __attribute__ ((aligned (32)))
279 *
280 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
281 *         To benefit from using this, the data must be heavily
282 *         used so it will stay in the cache and used frequently enough
283 *         in the executive to justify turning this on.
284 */
285
286/* our cache line size is 16 bytes */
287#if __GNUC__
288#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
289#else
290#define CPU_STRUCTURE_ALIGNMENT
291#endif
292
293/*
294 *  Define what is required to specify how the network to host conversion
295 *  routines are handled.
296 */
297
298#define CPU_CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES FALSE
299#define CPU_BIG_ENDIAN                           TRUE
300#define CPU_LITTLE_ENDIAN                        FALSE
301
302/*
303 *  The following defines the number of bits actually used in the
304 *  interrupt field of the task mode.  How those bits map to the
305 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
306 */
307
308#define CPU_MODES_INTERRUPT_MASK   0x00000001
309
310/*
311 *  Processor defined structures
312 *
313 *  Examples structures include the descriptor tables from the i386
314 *  and the processor control structure on the i960ca.
315 */
316
317/* may need to put some structures here.  */
318
319/*
320 * Contexts
321 *
322 *  Generally there are 2 types of context to save.
323 *     1. Interrupt registers to save
324 *     2. Task level registers to save
325 *
326 *  This means we have the following 3 context items:
327 *     1. task level context stuff::  Context_Control
328 *     2. floating point task stuff:: Context_Control_fp
329 *     3. special interrupt level context :: Context_Control_interrupt
330 *
331 *  On some processors, it is cost-effective to save only the callee
332 *  preserved registers during a task context switch.  This means
333 *  that the ISR code needs to save those registers which do not
334 *  persist across function calls.  It is not mandatory to make this
335 *  distinctions between the caller/callee saves registers for the
336 *  purpose of minimizing context saved during task switch and on interrupts.
337 *  If the cost of saving extra registers is minimal, simplicity is the
338 *  choice.  Save the same context on interrupt entry as for tasks in
339 *  this case.
340 *
341 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
342 *  care should be used in designing the context area.
343 *
344 *  On some CPUs with hardware floating point support, the Context_Control_fp
345 *  structure will not be used or it simply consist of an array of a
346 *  fixed number of bytes.   This is done when the floating point context
347 *  is dumped by a "FP save context" type instruction and the format
348 *  is not really defined by the CPU.  In this case, there is no need
349 *  to figure out the exact format -- only the size.  Of course, although
350 *  this is enough information for RTEMS, it is probably not enough for
351 *  a debugger such as gdb.  But that is another problem.
352 */
353
354/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
355typedef struct {
356    unsigned64 s0;
357    unsigned64 s1;
358    unsigned64 s2;
359    unsigned64 s3;
360    unsigned64 s4;
361    unsigned64 s5;
362    unsigned64 s6;
363    unsigned64 s7;
364    unsigned64 sp;
365    unsigned64 fp;
366    unsigned64 ra;
367    unsigned64 c0_sr;
368    unsigned64 c0_epc;
369} Context_Control;
370
371/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
372typedef struct {
373    unsigned32      fp0;
374    unsigned32      fp1;
375    unsigned32      fp2;
376    unsigned32      fp3;
377    unsigned32      fp4;
378    unsigned32      fp5;
379    unsigned32      fp6;
380    unsigned32      fp7;
381    unsigned32      fp8;
382    unsigned32      fp9;
383    unsigned32      fp10;
384    unsigned32      fp11;
385    unsigned32      fp12;
386    unsigned32      fp13;
387    unsigned32      fp14;
388    unsigned32      fp15;
389    unsigned32      fp16;
390    unsigned32      fp17;
391    unsigned32      fp18;
392    unsigned32      fp19;
393    unsigned32      fp20;
394    unsigned32      fp21;
395    unsigned32      fp22;
396    unsigned32      fp23;
397    unsigned32      fp24;
398    unsigned32      fp25;
399    unsigned32      fp26;
400    unsigned32      fp27;
401    unsigned32      fp28;
402    unsigned32      fp29;
403    unsigned32      fp30;
404    unsigned32      fp31;
405} Context_Control_fp;
406
407typedef struct {
408    unsigned32 special_interrupt_register;
409} CPU_Interrupt_frame;
410
411
412/*
413 *  The following table contains the information required to configure
414 *  the mips processor specific parameters.
415 */
416
417typedef struct {
418  void       (*pretasking_hook)( void );
419  void       (*predriver_hook)( void );
420  void       (*postdriver_hook)( void );
421  void       (*idle_task)( void );
422  boolean      do_zero_of_workspace;
423  unsigned32   interrupt_stack_size;
424  unsigned32   extra_mpci_receive_server_stack;
425  void *     (*stack_allocate_hook)( unsigned32 );
426  void       (*stack_free_hook)( void* );
427  /* end of fields required on all CPUs */
428
429  unsigned32   some_other_cpu_dependent_info;
430}   rtems_cpu_table;
431
432/*
433 *  This variable is optional.  It is used on CPUs on which it is difficult
434 *  to generate an "uninitialized" FP context.  It is filled in by
435 *  _CPU_Initialize and copied into the task's FP context area during
436 *  _CPU_Context_Initialize.
437 */
438
439SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
440
441/*
442 *  On some CPUs, RTEMS supports a software managed interrupt stack.
443 *  This stack is allocated by the Interrupt Manager and the switch
444 *  is performed in _ISR_Handler.  These variables contain pointers
445 *  to the lowest and highest addresses in the chunk of memory allocated
446 *  for the interrupt stack.  Since it is unknown whether the stack
447 *  grows up or down (in general), this give the CPU dependent
448 *  code the option of picking the version it wants to use.
449 *
450 *  NOTE: These two variables are required if the macro
451 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
452 */
453
454SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
455SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
456
457/*
458 *  With some compilation systems, it is difficult if not impossible to
459 *  call a high-level language routine from assembly language.  This
460 *  is especially true of commercial Ada compilers and name mangling
461 *  C++ ones.  This variable can be optionally defined by the CPU porter
462 *  and contains the address of the routine _Thread_Dispatch.  This
463 *  can make it easier to invoke that routine at the end of the interrupt
464 *  sequence (if a dispatch is necessary).
465 */
466
467SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
468
469/*
470 *  Nothing prevents the porter from declaring more CPU specific variables.
471 */
472
473/* XXX: if needed, put more variables here */
474
475/*
476 *  The size of the floating point context area.  On some CPUs this
477 *  will not be a "sizeof" because the format of the floating point
478 *  area is not defined -- only the size is.  This is usually on
479 *  CPUs with a "floating point save context" instruction.
480 */
481
482#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
483
484/*
485 *  Amount of extra stack (above minimum stack size) required by
486 *  system initialization thread.  Remember that in a multiprocessor
487 *  system the system intialization thread becomes the MP server thread.
488 */
489
490#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
491
492/*
493 *  This defines the number of entries in the ISR_Vector_table managed
494 *  by RTEMS.
495 */
496
497#define CPU_INTERRUPT_NUMBER_OF_VECTORS      8
498#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
499
500/*
501 *  Should be large enough to run all RTEMS tests.  This insures
502 *  that a "reasonable" small application should not have any problems.
503 */
504
505#define CPU_STACK_MINIMUM_SIZE          (2048*sizeof(unsigned32))
506
507/*
508 *  CPU's worst alignment requirement for data types on a byte boundary.  This
509 *  alignment does not take into account the requirements for the stack.
510 */
511
512#define CPU_ALIGNMENT              8
513
514/*
515 *  This number corresponds to the byte alignment requirement for the
516 *  heap handler.  This alignment requirement may be stricter than that
517 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
518 *  common for the heap to follow the same alignment requirement as
519 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
520 *  then this should be set to CPU_ALIGNMENT.
521 *
522 *  NOTE:  This does not have to be a power of 2.  It does have to
523 *         be greater or equal to than CPU_ALIGNMENT.
524 */
525
526#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
527
528/*
529 *  This number corresponds to the byte alignment requirement for memory
530 *  buffers allocated by the partition manager.  This alignment requirement
531 *  may be stricter than that for the data types alignment specified by
532 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
533 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
534 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
535 *
536 *  NOTE:  This does not have to be a power of 2.  It does have to
537 *         be greater or equal to than CPU_ALIGNMENT.
538 */
539
540#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
541
542/*
543 *  This number corresponds to the byte alignment requirement for the
544 *  stack.  This alignment requirement may be stricter than that for the
545 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
546 *  is strict enough for the stack, then this should be set to 0.
547 *
548 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
549 */
550
551#define CPU_STACK_ALIGNMENT        CPU_ALIGNMENT
552
553/* ISR handler macros */
554
555/*
556 *  Disable all interrupts for an RTEMS critical section.  The previous
557 *  level is returned in _level.
558 */
559
560#define _CPU_ISR_Disable( _int_level ) \
561  do{ \
562        _int_level = mips_disable_interrupts(); \
563  }while(0)
564
565/*
566 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
567 *  This indicates the end of an RTEMS critical section.  The parameter
568 *  _level is not modified.
569 */
570
571#define _CPU_ISR_Enable( _level )  \
572  do{ \
573        mips_enable_interrupts(_level); \
574  }while(0)
575
576/*
577 *  This temporarily restores the interrupt to _level before immediately
578 *  disabling them again.  This is used to divide long RTEMS critical
579 *  sections into two or more parts.  The parameter _level is not
580 * modified.
581 */
582
583#define _CPU_ISR_Flash( _xlevel ) \
584  do{ \
585        int _scratch; \
586        _CPU_ISR_Enable( _xlevel ); \
587        _CPU_ISR_Disable( _scratch ); \
588  }while(0)
589
590/*
591 *  Map interrupt level in task mode onto the hardware that the CPU
592 *  actually provides.  Currently, interrupt levels which do not
593 *  map onto the CPU in a generic fashion are undefined.  Someday,
594 *  it would be nice if these were "mapped" by the application
595 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
596 *  8 - 255 would be available for bsp/application specific meaning.
597 *  This could be used to manage a programmable interrupt controller
598 *  via the rtems_task_mode directive.
599 */
600extern void _CPU_ISR_Set_level( unsigned32 _new_level );
601
602unsigned32 _CPU_ISR_Get_level( void );
603
604/* end of ISR handler macros */
605
606/* Context handler macros */
607
608/*
609 *  Initialize the context to a state suitable for starting a
610 *  task after a context restore operation.  Generally, this
611 *  involves:
612 *
613 *     - setting a starting address
614 *     - preparing the stack
615 *     - preparing the stack and frame pointers
616 *     - setting the proper interrupt level in the context
617 *     - initializing the floating point context
618 *
619 *  This routine generally does not set any unnecessary register
620 *  in the context.  The state of the "general data" registers is
621 *  undefined at task start time.
622 *
623 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
624 *        point thread.  This is typically only used on CPUs where the
625 *        FPU may be easily disabled by software such as on the SPARC
626 *        where the PSR contains an enable FPU bit.
627 */
628
629#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
630                                 _isr, _entry_point, _is_fp ) \
631  { \
632        unsigned32 _stack_tmp = (unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
633        _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
634        (_the_context)->sp = _stack_tmp; \
635        (_the_context)->fp = _stack_tmp; \
636        (_the_context)->ra = (unsigned64)_entry_point; \
637        (_the_context)->c0_sr = 0; \
638  }
639
640/*
641 *  This routine is responsible for somehow restarting the currently
642 *  executing task.  If you are lucky, then all that is necessary
643 *  is restoring the context.  Otherwise, there will need to be
644 *  a special assembly routine which does something special in this
645 *  case.  Context_Restore should work most of the time.  It will
646 *  not work if restarting self conflicts with the stack frame
647 *  assumptions of restoring a context.
648 */
649
650#define _CPU_Context_Restart_self( _the_context ) \
651   _CPU_Context_restore( (_the_context) );
652
653/*
654 *  The purpose of this macro is to allow the initial pointer into
655 *  A floating point context area (used to save the floating point
656 *  context) to be at an arbitrary place in the floating point
657 *  context area.
658 *
659 *  This is necessary because some FP units are designed to have
660 *  their context saved as a stack which grows into lower addresses.
661 *  Other FP units can be saved by simply moving registers into offsets
662 *  from the base of the context area.  Finally some FP units provide
663 *  a "dump context" instruction which could fill in from high to low
664 *  or low to high based on the whim of the CPU designers.
665 */
666
667#define _CPU_Context_Fp_start( _base, _offset ) \
668   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
669
670/*
671 *  This routine initializes the FP context area passed to it to.
672 *  There are a few standard ways in which to initialize the
673 *  floating point context.  The code included for this macro assumes
674 *  that this is a CPU in which a "initial" FP context was saved into
675 *  _CPU_Null_fp_context and it simply copies it to the destination
676 *  context passed to it.
677 *
678 *  Other models include (1) not doing anything, and (2) putting
679 *  a "null FP status word" in the correct place in the FP context.
680 */
681
682#define _CPU_Context_Initialize_fp( _destination ) \
683  { \
684   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
685  }
686
687/* end of Context handler macros */
688
689/* Fatal Error manager macros */
690
691/*
692 *  This routine copies _error into a known place -- typically a stack
693 *  location or a register, optionally disables interrupts, and
694 *  halts/stops the CPU.
695 */
696
697#define _CPU_Fatal_halt( _error ) \
698  { \
699    mips_disable_global_interrupts(); \
700    mips_fatal_error(_error); \
701  }
702
703/* end of Fatal Error manager macros */
704
705/* Bitfield handler macros */
706
707/*
708 *  This routine sets _output to the bit number of the first bit
709 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
710 *  This type may be either 16 or 32 bits wide although only the 16
711 *  least significant bits will be used.
712 *
713 *  There are a number of variables in using a "find first bit" type
714 *  instruction.
715 *
716 *    (1) What happens when run on a value of zero?
717 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
718 *    (3) The numbering may be zero or one based.
719 *    (4) The "find first bit" instruction may search from MSB or LSB.
720 *
721 *  RTEMS guarantees that (1) will never happen so it is not a concern.
722 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
723 *  _CPU_Priority_bits_index().  These three form a set of routines
724 *  which must logically operate together.  Bits in the _value are
725 *  set and cleared based on masks built by _CPU_Priority_mask().
726 *  The basic major and minor values calculated by _Priority_Major()
727 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
728 *  to properly range between the values returned by the "find first bit"
729 *  instruction.  This makes it possible for _Priority_Get_highest() to
730 *  calculate the major and directly index into the minor table.
731 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
732 *  is the first bit found.
733 *
734 *  This entire "find first bit" and mapping process depends heavily
735 *  on the manner in which a priority is broken into a major and minor
736 *  components with the major being the 4 MSB of a priority and minor
737 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
738 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
739 *  to the lowest priority.
740 *
741 *  If your CPU does not have a "find first bit" instruction, then
742 *  there are ways to make do without it.  Here are a handful of ways
743 *  to implement this in software:
744 *
745 *    - a series of 16 bit test instructions
746 *    - a "binary search using if's"
747 *    - _number = 0
748 *      if _value > 0x00ff
749 *        _value >>=8
750 *        _number = 8;
751 *
752 *      if _value > 0x0000f
753 *        _value >=8
754 *        _number += 4
755 *
756 *      _number += bit_set_table[ _value ]
757 *
758 *    where bit_set_table[ 16 ] has values which indicate the first
759 *      bit set
760 */
761
762#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
763#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
764
765#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
766
767#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
768  { \
769    (_output) = 0;   /* do something to prevent warnings */ \
770  }
771
772#endif
773
774/* end of Bitfield handler macros */
775
776/*
777 *  This routine builds the mask which corresponds to the bit fields
778 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
779 *  for that routine.
780 */
781
782#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
783
784#define _CPU_Priority_Mask( _bit_number ) \
785  ( 1 << (_bit_number) )
786
787#endif
788
789/*
790 *  This routine translates the bit numbers returned by
791 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
792 *  a major or minor component of a priority.  See the discussion
793 *  for that routine.
794 */
795
796#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
797
798#define _CPU_Priority_bits_index( _priority ) \
799  (_priority)
800
801#endif
802
803/* end of Priority handler macros */
804
805/* functions */
806
807/*
808 *  _CPU_Initialize
809 *
810 *  This routine performs CPU dependent initialization.
811 */
812
813void _CPU_Initialize(
814  rtems_cpu_table  *cpu_table,
815  void      (*thread_dispatch)
816);
817
818/*
819 *  _CPU_ISR_install_raw_handler
820 *
821 *  This routine installs a "raw" interrupt handler directly into the
822 *  processor's vector table.
823 */
824 
825void _CPU_ISR_install_raw_handler(
826  unsigned32  vector,
827  proc_ptr    new_handler,
828  proc_ptr   *old_handler
829);
830
831/*
832 *  _CPU_ISR_install_vector
833 *
834 *  This routine installs an interrupt vector.
835 */
836
837void _CPU_ISR_install_vector(
838  unsigned32  vector,
839  proc_ptr    new_handler,
840  proc_ptr   *old_handler
841);
842
843/*
844 *  _CPU_Install_interrupt_stack
845 *
846 *  This routine installs the hardware interrupt stack pointer.
847 *
848 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
849 *         is TRUE.
850 */
851
852void _CPU_Install_interrupt_stack( void );
853
854/*
855 *  _CPU_Internal_threads_Idle_thread_body
856 *
857 *  This routine is the CPU dependent IDLE thread body.
858 *
859 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
860 *         is TRUE.
861 */
862
863void _CPU_Thread_Idle_body( void );
864
865/*
866 *  _CPU_Context_switch
867 *
868 *  This routine switches from the run context to the heir context.
869 */
870
871void _CPU_Context_switch(
872  Context_Control  *run,
873  Context_Control  *heir
874);
875
876/*
877 *  _CPU_Context_restore
878 *
879 *  This routine is generallu used only to restart self in an
880 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
881 *
882 *  NOTE: May be unnecessary to reload some registers.
883 */
884
885void _CPU_Context_restore(
886  Context_Control *new_context
887);
888
889/*
890 *  _CPU_Context_save_fp
891 *
892 *  This routine saves the floating point context passed to it.
893 */
894
895void _CPU_Context_save_fp(
896  void **fp_context_ptr
897);
898
899/*
900 *  _CPU_Context_restore_fp
901 *
902 *  This routine restores the floating point context passed to it.
903 */
904
905void _CPU_Context_restore_fp(
906  void **fp_context_ptr
907);
908
909/*  The following routine swaps the endian format of an unsigned int.
910 *  It must be static because it is referenced indirectly.
911 *
912 *  This version will work on any processor, but if there is a better
913 *  way for your CPU PLEASE use it.  The most common way to do this is to:
914 *
915 *     swap least significant two bytes with 16-bit rotate
916 *     swap upper and lower 16-bits
917 *     swap most significant two bytes with 16-bit rotate
918 *
919 *  Some CPUs have special instructions which swap a 32-bit quantity in
920 *  a single instruction (e.g. i486).  It is probably best to avoid
921 *  an "endian swapping control bit" in the CPU.  One good reason is
922 *  that interrupts would probably have to be disabled to insure that
923 *  an interrupt does not try to access the same "chunk" with the wrong
924 *  endian.  Another good reason is that on some CPUs, the endian bit
925 *  endianness for ALL fetches -- both code and data -- so the code
926 *  will be fetched incorrectly.
927 */
928 
929static inline unsigned int CPU_swap_u32(
930  unsigned int value
931)
932{
933  unsigned32 byte1, byte2, byte3, byte4, swapped;
934 
935  byte4 = (value >> 24) & 0xff;
936  byte3 = (value >> 16) & 0xff;
937  byte2 = (value >> 8)  & 0xff;
938  byte1 =  value        & 0xff;
939 
940  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
941  return( swapped );
942}
943
944/*
945 *  Miscellaneous prototypes
946 *
947 *  NOTE:  The names should have mips64orion in them.
948 */
949
950void disable_int( unsigned32 mask );
951void enable_int( unsigned32 mask );
952
953#ifdef __cplusplus
954}
955#endif
956
957#endif
Note: See TracBrowser for help on using the repository browser.