source: rtems/cpukit/score/cpu/h8300/rtems/score/cpu.h @ 143696a

5
Last change on this file since 143696a was 143696a, checked in by Sebastian Huber <sebastian.huber@…>, on 10/16/15 at 06:15:03

basedefs.h: Add and use RTEMS_NO_RETURN

  • Property mode set to 100644
File size: 32.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Hitachi H8300 CPU Department Source
5 *
6 * This include file contains information pertaining to the H8300
7 *  processor.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2006.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#ifndef _RTEMS_SCORE_CPU_H
20#define _RTEMS_SCORE_CPU_H
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
26#include <rtems/score/types.h>
27#include <rtems/score/h8300.h>
28#ifndef ASM
29  #include <rtems/bspIo.h>
30#endif
31
32/* conditional compilation parameters */
33
34/*
35 *  Should the calls to _Thread_Enable_dispatch be inlined?
36 *
37 *  If TRUE, then they are inlined.
38 *  If FALSE, then a subroutine call is made.
39 *
40 *  Basically this is an example of the classic trade-off of size
41 *  versus speed.  Inlining the call (TRUE) typically increases the
42 *  size of RTEMS while speeding up the enabling of dispatching.
43 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
44 *  only be 0 or 1 unless you are in an interrupt handler and that
45 *  interrupt handler invokes the executive.]  When not inlined
46 *  something calls _Thread_Enable_dispatch which in turns calls
47 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
48 *  one subroutine call is avoided entirely.]
49 *
50 *  H8300 Specific Information:
51 *
52 *  XXX
53 */
54
55#define CPU_INLINE_ENABLE_DISPATCH       FALSE
56
57/*
58 *  Should this target use 16 or 32 bit object Ids?
59 *
60 */
61#define RTEMS_USE_16_BIT_OBJECT
62
63/*
64 *  Does RTEMS manage a dedicated interrupt stack in software?
65 *
66 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
67 *  If FALSE, nothing is done.
68 *
69 *  If the CPU supports a dedicated interrupt stack in hardware,
70 *  then it is generally the responsibility of the BSP to allocate it
71 *  and set it up.
72 *
73 *  If the CPU does not support a dedicated interrupt stack, then
74 *  the porter has two options: (1) execute interrupts on the
75 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
76 *  interrupt stack.
77 *
78 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
79 *
80 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
81 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
82 *  possible that both are FALSE for a particular CPU.  Although it
83 *  is unclear what that would imply about the interrupt processing
84 *  procedure on that CPU.
85 *
86 *  H8300 Specific Information:
87 *
88 *  XXX
89 */
90
91#define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
92
93/*
94 *  Does the CPU follow the simple vectored interrupt model?
95 *
96 *  If TRUE, then RTEMS allocates the vector table it internally manages.
97 *  If FALSE, then the BSP is assumed to allocate and manage the vector
98 *  table
99 *
100 *  H8300 Specific Information:
101 *
102 *  XXX document implementation including references if appropriate
103 */
104#define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
105
106/*
107 *  Does this CPU have hardware support for a dedicated interrupt stack?
108 *
109 *  If TRUE, then it must be installed during initialization.
110 *  If FALSE, then no installation is performed.
111 *
112 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
113 *
114 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
115 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
116 *  possible that both are FALSE for a particular CPU.  Although it
117 *  is unclear what that would imply about the interrupt processing
118 *  procedure on that CPU.
119 *
120 *  H8300 Specific Information:
121 *
122 *  XXX
123 */
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127/*
128 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
129 *
130 *  If TRUE, then the memory is allocated during initialization.
131 *  If FALSE, then the memory is allocated during initialization.
132 *
133 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
134 *
135 *  H8300 Specific Information:
136 *
137 *  XXX
138 */
139
140#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
141
142/*
143 *  Does the CPU have hardware floating point?
144 *
145 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
146 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
147 *
148 *  If there is a FP coprocessor such as the i387 or mc68881, then
149 *  the answer is TRUE.
150 *
151 *  The macro name "H8300_HAS_FPU" should be made CPU specific.
152 *  It indicates whether or not this CPU model has FP support.  For
153 *  example, it would be possible to have an i386_nofp CPU model
154 *  which set this to false to indicate that you have an i386 without
155 *  an i387 and wish to leave floating point support out of RTEMS.
156 *
157 *  H8300 Specific Information:
158 *
159 *  XXX
160 */
161
162#define CPU_HARDWARE_FP     FALSE
163
164/*
165 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
166 *
167 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
168 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
169 *
170 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
171 *
172 *  H8300 Specific Information:
173 *
174 *  XXX
175 */
176
177#define CPU_ALL_TASKS_ARE_FP     FALSE
178
179/*
180 *  Should the IDLE task have a floating point context?
181 *
182 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
183 *  and it has a floating point context which is switched in and out.
184 *  If FALSE, then the IDLE task does not have a floating point context.
185 *
186 *  Setting this to TRUE negatively impacts the time required to preempt
187 *  the IDLE task from an interrupt because the floating point context
188 *  must be saved as part of the preemption.
189 *
190 *  H8300 Specific Information:
191 *
192 *  XXX
193 */
194
195#define CPU_IDLE_TASK_IS_FP      FALSE
196
197/*
198 *  Should the saving of the floating point registers be deferred
199 *  until a context switch is made to another different floating point
200 *  task?
201 *
202 *  If TRUE, then the floating point context will not be stored until
203 *  necessary.  It will remain in the floating point registers and not
204 *  disturned until another floating point task is switched to.
205 *
206 *  If FALSE, then the floating point context is saved when a floating
207 *  point task is switched out and restored when the next floating point
208 *  task is restored.  The state of the floating point registers between
209 *  those two operations is not specified.
210 *
211 *  If the floating point context does NOT have to be saved as part of
212 *  interrupt dispatching, then it should be safe to set this to TRUE.
213 *
214 *  Setting this flag to TRUE results in using a different algorithm
215 *  for deciding when to save and restore the floating point context.
216 *  The deferred FP switch algorithm minimizes the number of times
217 *  the FP context is saved and restored.  The FP context is not saved
218 *  until a context switch is made to another, different FP task.
219 *  Thus in a system with only one FP task, the FP context will never
220 *  be saved or restored.
221 *
222 *  H8300 Specific Information:
223 *
224 *  XXX
225 */
226
227#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
228
229/*
230 *  Does this port provide a CPU dependent IDLE task implementation?
231 *
232 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
233 *  must be provided and is the default IDLE thread body instead of
234 *  _Internal_threads_Idle_thread_body.
235 *
236 *  If FALSE, then use the generic IDLE thread body if the BSP does
237 *  not provide one.
238 *
239 *  This is intended to allow for supporting processors which have
240 *  a low power or idle mode.  When the IDLE thread is executed, then
241 *  the CPU can be powered down.
242 *
243 *  The order of precedence for selecting the IDLE thread body is:
244 *
245 *    1.  BSP provided
246 *    2.  CPU dependent (if provided)
247 *    3.  generic (if no BSP and no CPU dependent)
248 *
249 *  H8300 Specific Information:
250 *
251 *  XXX
252 *  The port initially called a BSP dependent routine called
253 *  IDLE_Monitor.  The idle task body can be overridden by
254 *  the BSP in newer versions of RTEMS.
255 */
256
257#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
258
259/*
260 *  Does the stack grow up (toward higher addresses) or down
261 *  (toward lower addresses)?
262 *
263 *  If TRUE, then the grows upward.
264 *  If FALSE, then the grows toward smaller addresses.
265 *
266 *  H8300 Specific Information:
267 *
268 *  XXX
269 */
270
271#define CPU_STACK_GROWS_UP               FALSE
272
273/*
274 *  The following is the variable attribute used to force alignment
275 *  of critical RTEMS structures.  On some processors it may make
276 *  sense to have these aligned on tighter boundaries than
277 *  the minimum requirements of the compiler in order to have as
278 *  much of the critical data area as possible in a cache line.
279 *
280 *  The placement of this macro in the declaration of the variables
281 *  is based on the syntactically requirements of the GNU C
282 *  "__attribute__" extension.  For example with GNU C, use
283 *  the following to force a structures to a 32 byte boundary.
284 *
285 *      __attribute__ ((aligned (32)))
286 *
287 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
288 *         To benefit from using this, the data must be heavily
289 *         used so it will stay in the cache and used frequently enough
290 *         in the executive to justify turning this on.
291 *
292 *  H8300 Specific Information:
293 *
294 *  XXX
295 */
296
297#define CPU_STRUCTURE_ALIGNMENT
298
299#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC TRUE
300
301/*
302 *  Define what is required to specify how the network to host conversion
303 *  routines are handled.
304 */
305
306#define CPU_BIG_ENDIAN                           TRUE
307#define CPU_LITTLE_ENDIAN                        FALSE
308
309/*
310 *  The following defines the number of bits actually used in the
311 *  interrupt field of the task mode.  How those bits map to the
312 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
313 *
314 *  H8300 Specific Information:
315 *
316 *  XXX
317 */
318
319#define CPU_MODES_INTERRUPT_MASK   0x00000001
320
321#define CPU_PER_CPU_CONTROL_SIZE 0
322
323/*
324 *  Processor defined structures required for cpukit/score.
325 *
326 *  H8300 Specific Information:
327 *
328 *  XXX
329 */
330
331/* may need to put some structures here.  */
332
333/*
334 * Contexts
335 *
336 *  Generally there are 2 types of context to save.
337 *     1. Interrupt registers to save
338 *     2. Task level registers to save
339 *
340 *  This means we have the following 3 context items:
341 *     1. task level context stuff::  Context_Control
342 *     2. floating point task stuff:: Context_Control_fp
343 *     3. special interrupt level context :: Context_Control_interrupt
344 *
345 *  On some processors, it is cost-effective to save only the callee
346 *  preserved registers during a task context switch.  This means
347 *  that the ISR code needs to save those registers which do not
348 *  persist across function calls.  It is not mandatory to make this
349 *  distinctions between the caller/callee saves registers for the
350 *  purpose of minimizing context saved during task switch and on interrupts.
351 *  If the cost of saving extra registers is minimal, simplicity is the
352 *  choice.  Save the same context on interrupt entry as for tasks in
353 *  this case.
354 *
355 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
356 *  care should be used in designing the context area.
357 *
358 *  On some CPUs with hardware floating point support, the Context_Control_fp
359 *  structure will not be used or it simply consist of an array of a
360 *  fixed number of bytes.   This is done when the floating point context
361 *  is dumped by a "FP save context" type instruction and the format
362 *  is not really defined by the CPU.  In this case, there is no need
363 *  to figure out the exact format -- only the size.  Of course, although
364 *  this is enough information for RTEMS, it is probably not enough for
365 *  a debugger such as gdb.  But that is another problem.
366 *
367 *  H8300 Specific Information:
368 *
369 *  XXX
370 */
371
372#ifndef ASM
373
374typedef struct {
375  /* There is no CPU specific per-CPU state */
376} CPU_Per_CPU_control;
377
378#define nogap __attribute__ ((packed))
379
380typedef struct {
381    uint16_t    ccr nogap;
382    void        *er7 nogap;
383    void        *er6 nogap;
384    uint32_t    er5 nogap;
385    uint32_t    er4 nogap;
386    uint32_t    er3 nogap;
387    uint32_t    er2 nogap;
388    uint32_t    er1 nogap;
389    uint32_t    er0 nogap;
390    uint32_t    xxx nogap;
391} Context_Control;
392
393#define _CPU_Context_Get_SP( _context ) \
394  (_context)->er7
395
396typedef struct {
397    double      some_float_register[2];
398} Context_Control_fp;
399
400typedef struct {
401    uint32_t   special_interrupt_register;
402} CPU_Interrupt_frame;
403
404/*
405 *  This variable is optional.  It is used on CPUs on which it is difficult
406 *  to generate an "uninitialized" FP context.  It is filled in by
407 *  _CPU_Initialize and copied into the task's FP context area during
408 *  _CPU_Context_Initialize.
409 *
410 *  H8300 Specific Information:
411 *
412 *  XXX
413 */
414
415SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
416
417/*
418 *  Nothing prevents the porter from declaring more CPU specific variables.
419 *
420 *  H8300 Specific Information:
421 *
422 *  XXX
423 */
424
425/* XXX: if needed, put more variables here */
426
427/*
428 *  The size of the floating point context area.  On some CPUs this
429 *  will not be a "sizeof" because the format of the floating point
430 *  area is not defined -- only the size is.  This is usually on
431 *  CPUs with a "floating point save context" instruction.
432 *
433 *  H8300 Specific Information:
434 *
435 *  XXX
436 */
437
438#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
439
440#endif /* ASM */
441
442/*
443 *  Amount of extra stack (above minimum stack size) required by
444 *  system initialization thread.  Remember that in a multiprocessor
445 *  system the system intialization thread becomes the MP server thread.
446 *
447 *  H8300 Specific Information:
448 *
449 *  It is highly unlikely the H8300 will get used in a multiprocessor system.
450 */
451
452#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
453
454/*
455 *  This defines the number of entries in the ISR_Vector_table managed
456 *  by RTEMS.
457 *
458 *  H8300 Specific Information:
459 *
460 *  XXX
461 */
462
463#define CPU_INTERRUPT_NUMBER_OF_VECTORS      64
464#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
465
466/*
467 *  This is defined if the port has a special way to report the ISR nesting
468 *  level.  Most ports maintain the variable _ISR_Nest_level.
469 */
470
471#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
472
473/*
474 *  Should be large enough to run all RTEMS tests.  This ensures
475 *  that a "reasonable" small application should not have any problems.
476 *
477 *  H8300 Specific Information:
478 *
479 *  XXX
480 */
481
482#define CPU_STACK_MINIMUM_SIZE          (1536)
483
484#if defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
485  #define CPU_SIZEOF_POINTER 4
486#else
487  #define CPU_SIZEOF_POINTER 2
488#endif
489
490/*
491 *  CPU's worst alignment requirement for data types on a byte boundary.  This
492 *  alignment does not take into account the requirements for the stack.
493 *
494 *  H8300 Specific Information:
495 *
496 *  XXX
497 */
498
499#define CPU_ALIGNMENT              8
500
501/*
502 *  This number corresponds to the byte alignment requirement for the
503 *  heap handler.  This alignment requirement may be stricter than that
504 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
505 *  common for the heap to follow the same alignment requirement as
506 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
507 *  then this should be set to CPU_ALIGNMENT.
508 *
509 *  NOTE:  This does not have to be a power of 2.  It does have to
510 *         be greater or equal to than CPU_ALIGNMENT.
511 *
512 *  H8300 Specific Information:
513 *
514 *  XXX
515 */
516
517#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
518
519/*
520 *  This number corresponds to the byte alignment requirement for memory
521 *  buffers allocated by the partition manager.  This alignment requirement
522 *  may be stricter than that for the data types alignment specified by
523 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
524 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
525 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
526 *
527 *  NOTE:  This does not have to be a power of 2.  It does have to
528 *         be greater or equal to than CPU_ALIGNMENT.
529 *
530 *  H8300 Specific Information:
531 *
532 *  XXX
533 */
534
535#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
536
537/*
538 *  This number corresponds to the byte alignment requirement for the
539 *  stack.  This alignment requirement may be stricter than that for the
540 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
541 *  is strict enough for the stack, then this should be set to 0.
542 *
543 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
544 *
545 *  H8300 Specific Information:
546 *
547 *  XXX
548 */
549
550#define CPU_STACK_ALIGNMENT        2
551
552/*
553 *  ISR handler macros
554 */
555
556/*
557 *  Support routine to initialize the RTEMS vector table after it is allocated.
558 */
559
560#define _CPU_Initialize_vectors()
561
562/* COPE With Brain dead version of GCC distributed with Hitachi HIView Tools.
563   Note requires ISR_Level be uint16_t or assembler croaks.
564*/
565
566#if (__GNUC__ == 2 && __GNUC_MINOR__ == 7 )
567
568
569/*
570 *  Disable all interrupts for an RTEMS critical section.  The previous
571 *  level is returned in _level.
572 */
573
574#define _CPU_ISR_Disable( _isr_cookie ) \
575  do { \
576    __asm__ volatile( "stc.w ccr, @-er7 ;\n orc #0xC0,ccr ;\n mov.w @er7+,%0" :  : "r" (_isr_cookie) ); \
577  } while (0)
578
579
580/*
581 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
582 *  This indicates the end of an RTEMS critical section.  The parameter
583 *  _level is not modified.
584 */
585
586
587#define _CPU_ISR_Enable( _isr_cookie )  \
588  do { \
589    __asm__ volatile( "mov.w %0,@-er7 ;\n ldc.w @er7+, ccr" :  : "r" (_isr_cookie) ); \
590  } while (0)
591
592
593/*
594 *  This temporarily restores the interrupt to _level before immediately
595 *  disabling them again.  This is used to divide long RTEMS critical
596 *  sections into two or more parts.  The parameter _level is not
597 * modified.
598 */
599
600
601#define _CPU_ISR_Flash( _isr_cookie ) \
602  do { \
603    __asm__ volatile( "mov.w %0,@-er7 ;\n ldc.w @er7+, ccr ;\n orc #0xC0,ccr" :  : "r" (_isr_cookie) ); \
604  } while (0)
605
606/* end of ISR handler macros */
607
608#else /* modern gcc version */
609
610/*
611 *  Disable all interrupts for an RTEMS critical section.  The previous
612 *  level is returned in _level.
613 *
614 *  H8300 Specific Information:
615 *
616 *  TODO: As of 8 October 2014, this method is not implemented for the SX.
617 */
618
619#if defined(__H8300H__) || defined(__H8300S__)
620#define _CPU_ISR_Disable( _isr_cookie ) \
621  do { \
622    unsigned char __ccr; \
623    __asm__ volatile( "stc ccr, %0 ; orc #0x80,ccr " \
624             : "=m" (__ccr) /* : "0" (__ccr) */ ); \
625    (_isr_cookie) = __ccr; \
626  } while (0)
627#else
628#define _CPU_ISR_Disable( _isr_cookie ) \
629  do { \
630    (_isr_cookie) = 0; \
631  } while (0)
632#endif
633
634
635/*
636 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
637 *  This indicates the end of an RTEMS critical section.  The parameter
638 *  _level is not modified.
639 *
640 *  H8300 Specific Information:
641 *
642 *  TODO: As of 8 October 2014, this method is not implemented for the SX.
643 */
644
645#if defined(__H8300H__) || defined(__H8300S__)
646#define _CPU_ISR_Enable( _isr_cookie )  \
647  do { \
648    unsigned char __ccr = (unsigned char) (_isr_cookie); \
649    __asm__ volatile( "ldc %0, ccr" :  : "m" (__ccr) ); \
650  } while (0)
651#else
652#define _CPU_ISR_Enable( _isr_cookie ) \
653  do { \
654    (_isr_cookie) = (_isr_cookie); \
655  } while (0)
656#endif
657
658/*
659 *  This temporarily restores the interrupt to _level before immediately
660 *  disabling them again.  This is used to divide long RTEMS critical
661 *  sections into two or more parts.  The parameter _level is not
662 *  modified.
663 *
664 *  H8300 Specific Information:
665 *
666 *  TODO: As of 8 October 2014, this method is not implemented for the SX.
667 */
668
669#if defined(__H8300H__) || defined(__H8300S__)
670#define _CPU_ISR_Flash( _isr_cookie ) \
671  do { \
672    unsigned char __ccr = (unsigned char) (_isr_cookie); \
673    __asm__ volatile( "ldc %0, ccr ; orc #0x80,ccr " :  : "m" (__ccr) ); \
674  } while (0)
675#else
676#define _CPU_ISR_Flash( _isr_cookie ) \
677  do { \
678    _CPU_ISR_Enable( _isr_cookie ); \
679    _CPU_ISR_Disable( _isr_cookie ); \
680  } while (0)
681#endif
682
683#endif /* end of old gcc */
684
685
686/*
687 *  Map interrupt level in task mode onto the hardware that the CPU
688 *  actually provides.  Currently, interrupt levels which do not
689 *  map onto the CPU in a generic fashion are undefined.  Someday,
690 *  it would be nice if these were "mapped" by the application
691 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
692 *  8 - 255 would be available for bsp/application specific meaning.
693 *  This could be used to manage a programmable interrupt controller
694 *  via the rtems_task_mode directive.
695 *
696 *  H8300 Specific Information:
697 *
698 *  XXX
699 */
700
701#define _CPU_ISR_Set_level( _new_level ) \
702  { \
703    if ( _new_level ) __asm__ volatile ( "orc #0x80,ccr\n" ); \
704    else              __asm__ volatile ( "andc #0x7f,ccr\n" ); \
705  }
706
707#ifndef ASM
708
709uint32_t   _CPU_ISR_Get_level( void );
710
711/* end of ISR handler macros */
712
713/* Context handler macros */
714
715/*
716 *  Initialize the context to a state suitable for starting a
717 *  task after a context restore operation.  Generally, this
718 *  involves:
719 *
720 *     - setting a starting address
721 *     - preparing the stack
722 *     - preparing the stack and frame pointers
723 *     - setting the proper interrupt level in the context
724 *     - initializing the floating point context
725 *
726 *  This routine generally does not set any unnecessary register
727 *  in the context.  The state of the "general data" registers is
728 *  undefined at task start time.
729 *
730 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
731 *        point thread.  This is typically only used on CPUs where the
732 *        FPU may be easily disabled by software such as on the SPARC
733 *        where the PSR contains an enable FPU bit.
734 *
735 *  H8300 Specific Information:
736 *
737 *  XXX
738 */
739
740
741#define CPU_CCR_INTERRUPTS_ON  0x80
742#define CPU_CCR_INTERRUPTS_OFF 0x00
743
744#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
745                                   _isr, _entry_point, _is_fp, _tls_area ) \
746  /* Locate Me */ \
747  do { \
748    uintptr_t   _stack; \
749    \
750    if ( (_isr) ) (_the_context)->ccr = CPU_CCR_INTERRUPTS_OFF; \
751    else          (_the_context)->ccr = CPU_CCR_INTERRUPTS_ON; \
752    \
753    (void) _is_fp; /* to eliminate set but not used warning */ \
754    _stack = ((uintptr_t)(_stack_base)) + (_size) - 4; \
755    *((proc_ptr *)(_stack)) = (_entry_point); \
756     (_the_context)->er7     = (void *) _stack; \
757     (_the_context)->er6     = (void *) _stack; \
758     (_the_context)->er5     = 0; \
759     (_the_context)->er4     = 1; \
760     (_the_context)->er3     = 2; \
761  } while (0)
762
763
764/*
765 *  This routine is responsible for somehow restarting the currently
766 *  executing task.  If you are lucky, then all that is necessary
767 *  is restoring the context.  Otherwise, there will need to be
768 *  a special assembly routine which does something special in this
769 *  case.  Context_Restore should work most of the time.  It will
770 *  not work if restarting self conflicts with the stack frame
771 *  assumptions of restoring a context.
772 *
773 *  H8300 Specific Information:
774 *
775 *  XXX
776 */
777
778#define _CPU_Context_Restart_self( _the_context ) \
779   _CPU_Context_restore( (_the_context) );
780
781/*
782 *  The purpose of this macro is to allow the initial pointer into
783 *  a floating point context area (used to save the floating point
784 *  context) to be at an arbitrary place in the floating point
785 *  context area.
786 *
787 *  This is necessary because some FP units are designed to have
788 *  their context saved as a stack which grows into lower addresses.
789 *  Other FP units can be saved by simply moving registers into offsets
790 *  from the base of the context area.  Finally some FP units provide
791 *  a "dump context" instruction which could fill in from high to low
792 *  or low to high based on the whim of the CPU designers.
793 *
794 *  H8300 Specific Information:
795 *
796 *  XXX
797 */
798
799#define _CPU_Context_Fp_start( _base, _offset ) \
800   ( (void *) (_base) + (_offset) )
801
802/*
803 *  This routine initializes the FP context area passed to it to.
804 *  There are a few standard ways in which to initialize the
805 *  floating point context.  The code included for this macro assumes
806 *  that this is a CPU in which a "initial" FP context was saved into
807 *  _CPU_Null_fp_context and it simply copies it to the destination
808 *  context passed to it.
809 *
810 *  Other models include (1) not doing anything, and (2) putting
811 *  a "null FP status word" in the correct place in the FP context.
812 *
813 *  H8300 Specific Information:
814 *
815 *  XXX
816 */
817
818#define _CPU_Context_Initialize_fp( _destination ) \
819  { \
820   *(*(_destination)) = _CPU_Null_fp_context; \
821  }
822
823/* end of Context handler macros */
824
825/* Fatal Error manager macros */
826
827/*
828 *  This routine copies _error into a known place -- typically a stack
829 *  location or a register, optionally disables interrupts, and
830 *  halts/stops the CPU.
831 *
832 *  H8300 Specific Information:
833 *
834 *  XXX
835 */
836
837#define _CPU_Fatal_halt( _source, _error ) \
838        printk("Fatal Error %d.%d Halted\n",_source, _error); \
839        for(;;)
840
841
842/* end of Fatal Error manager macros */
843
844/* Bitfield handler macros */
845
846/*
847 *  This routine sets _output to the bit number of the first bit
848 *  set in _value.  _value is of CPU dependent type Priority_bit_map_Word.
849 *  This type may be either 16 or 32 bits wide although only the 16
850 *  least significant bits will be used.
851 *
852 *  There are a number of variables in using a "find first bit" type
853 *  instruction.
854 *
855 *    (1) What happens when run on a value of zero?
856 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
857 *    (3) The numbering may be zero or one based.
858 *    (4) The "find first bit" instruction may search from MSB or LSB.
859 *
860 *  RTEMS guarantees that (1) will never happen so it is not a concern.
861 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
862 *  _CPU_Priority_bits_index().  These three form a set of routines
863 *  which must logically operate together.  Bits in the _value are
864 *  set and cleared based on masks built by _CPU_Priority_mask().
865 *  The basic major and minor values calculated by _Priority_Major()
866 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
867 *  to properly range between the values returned by the "find first bit"
868 *  instruction.  This makes it possible for _Priority_Get_highest() to
869 *  calculate the major and directly index into the minor table.
870 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
871 *  is the first bit found.
872 *
873 *  This entire "find first bit" and mapping process depends heavily
874 *  on the manner in which a priority is broken into a major and minor
875 *  components with the major being the 4 MSB of a priority and minor
876 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
877 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
878 *  to the lowest priority.
879 *
880 *  If your CPU does not have a "find first bit" instruction, then
881 *  there are ways to make do without it.  Here are a handful of ways
882 *  to implement this in software:
883 *
884 *    - a series of 16 bit test instructions
885 *    - a "binary search using if's"
886 *    - _number = 0
887 *      if _value > 0x00ff
888 *        _value >>=8
889 *        _number = 8;
890 *
891 *      if _value > 0x0000f
892 *        _value >=8
893 *        _number += 4
894 *
895 *      _number += bit_set_table[ _value ]
896 *
897 *    where bit_set_table[ 16 ] has values which indicate the first
898 *      bit set
899 *
900 *  H8300 Specific Information:
901 *
902 *  XXX
903 */
904
905#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
906#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
907
908#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
909
910#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
911  { \
912    (_output) = 0;   /* do something to prevent warnings */ \
913  }
914
915#endif
916
917/* end of Bitfield handler macros */
918
919/*
920 *  This routine builds the mask which corresponds to the bit fields
921 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
922 *  for that routine.
923 *
924 *  H8300 Specific Information:
925 *
926 *  XXX
927 */
928
929#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
930
931#define _CPU_Priority_Mask( _bit_number ) \
932  ( 1 << (_bit_number) )
933
934#endif
935
936/*
937 *  This routine translates the bit numbers returned by
938 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
939 *  a major or minor component of a priority.  See the discussion
940 *  for that routine.
941 *
942 *  H8300 Specific Information:
943 *
944 *  XXX
945 */
946
947#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
948
949#define _CPU_Priority_bits_index( _priority ) \
950  (_priority)
951
952#endif
953
954/* end of Priority handler macros */
955
956/* functions */
957
958/*
959 *  _CPU_Initialize
960 *
961 *  This routine performs CPU dependent initialization.
962 *
963 *  H8300 Specific Information:
964 *
965 *  XXX
966 */
967
968void _CPU_Initialize(void);
969
970/*
971 *  _CPU_ISR_install_raw_handler
972 *
973 *  This routine installs a "raw" interrupt handler directly into the
974 *  processor's vector table.
975 *
976 *  H8300 Specific Information:
977 *
978 *  XXX
979 */
980
981void _CPU_ISR_install_raw_handler(
982  uint32_t    vector,
983  proc_ptr    new_handler,
984  proc_ptr   *old_handler
985);
986
987/*
988 *  _CPU_ISR_install_vector
989 *
990 *  This routine installs an interrupt vector.
991 *
992 *  H8300 Specific Information:
993 *
994 *  XXX
995 */
996
997void _CPU_ISR_install_vector(
998  uint32_t    vector,
999  proc_ptr    new_handler,
1000  proc_ptr   *old_handler
1001);
1002
1003/*
1004 *  _CPU_Install_interrupt_stack
1005 *
1006 *  This routine installs the hardware interrupt stack pointer.
1007 *
1008 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
1009 *         is TRUE.
1010 *
1011 *  H8300 Specific Information:
1012 *
1013 *  XXX
1014 */
1015
1016void _CPU_Install_interrupt_stack( void );
1017
1018/*
1019 *  _CPU_Internal_threads_Idle_thread_body
1020 *
1021 *  This routine is the CPU dependent IDLE thread body.
1022 *
1023 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
1024 *         is TRUE.
1025 *
1026 *  H8300 Specific Information:
1027 *
1028 *  XXX
1029 */
1030
1031void *_CPU_Thread_Idle_body( uint32_t );
1032
1033/*
1034 *  _CPU_Context_switch
1035 *
1036 *  This routine switches from the run context to the heir context.
1037 *
1038 *  H8300 Specific Information:
1039 *
1040 *  XXX
1041 */
1042
1043void _CPU_Context_switch(
1044  Context_Control  *run,
1045  Context_Control  *heir
1046);
1047
1048/*
1049 *  _CPU_Context_restore
1050 *
1051 *  This routine is generallu used only to restart self in an
1052 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1053 *
1054 *  NOTE: May be unnecessary to reload some registers.
1055 *
1056 *  H8300 Specific Information:
1057 *
1058 *  XXX
1059 */
1060
1061void _CPU_Context_restore(
1062  Context_Control *new_context
1063) RTEMS_NO_RETURN;
1064
1065/*
1066 *  _CPU_Context_save_fp
1067 *
1068 *  This routine saves the floating point context passed to it.
1069 *
1070 *  H8300 Specific Information:
1071 *
1072 *  XXX
1073 */
1074
1075void _CPU_Context_save_fp(
1076  Context_Control_fp **fp_context_ptr
1077);
1078
1079/*
1080 *  _CPU_Context_restore_fp
1081 *
1082 *  This routine restores the floating point context passed to it.
1083 *
1084 *  H8300 Specific Information:
1085 *
1086 *  XXX
1087 */
1088
1089void _CPU_Context_restore_fp(
1090  Context_Control_fp **fp_context_ptr
1091);
1092
1093static inline void _CPU_Context_volatile_clobber( uintptr_t pattern )
1094{
1095  /* TODO */
1096}
1097
1098static inline void _CPU_Context_validate( uintptr_t pattern )
1099{
1100  while (1) {
1101    /* TODO */
1102  }
1103}
1104
1105/* FIXME */
1106typedef CPU_Interrupt_frame CPU_Exception_frame;
1107
1108void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
1109
1110/*  The following routine swaps the endian format of an unsigned int.
1111 *  It must be static because it is referenced indirectly.
1112 *
1113 *  This version will work on any processor, but if there is a better
1114 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1115 *
1116 *     swap least significant two bytes with 16-bit rotate
1117 *     swap upper and lower 16-bits
1118 *     swap most significant two bytes with 16-bit rotate
1119 *
1120 *  Some CPUs have special instructions which swap a 32-bit quantity in
1121 *  a single instruction (e.g. i486).  It is probably best to avoid
1122 *  an "endian swapping control bit" in the CPU.  One good reason is
1123 *  that interrupts would probably have to be disabled to ensure that
1124 *  an interrupt does not try to access the same "chunk" with the wrong
1125 *  endian.  Another good reason is that on some CPUs, the endian bit
1126 *  endianness for ALL fetches -- both code and data -- so the code
1127 *  will be fetched incorrectly.
1128 *
1129 *  H8300 Specific Information:
1130 *
1131 *  This is the generic implementation.
1132 */
1133
1134static inline uint32_t   CPU_swap_u32(
1135  uint32_t   value
1136)
1137{
1138  uint32_t   byte1, byte2, byte3, byte4, swapped;
1139
1140  byte4 = (value >> 24) & 0xff;
1141  byte3 = (value >> 16) & 0xff;
1142  byte2 = (value >> 8)  & 0xff;
1143  byte1 =  value        & 0xff;
1144
1145  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1146  return( swapped );
1147}
1148
1149#define CPU_swap_u16( value ) \
1150  (((value&0xff) << 8) | ((value >> 8)&0xff))
1151
1152typedef uint32_t CPU_Counter_ticks;
1153
1154CPU_Counter_ticks _CPU_Counter_read( void );
1155
1156static inline CPU_Counter_ticks _CPU_Counter_difference(
1157  CPU_Counter_ticks second,
1158  CPU_Counter_ticks first
1159)
1160{
1161  return second - first;
1162}
1163
1164/* to be provided by the BSP */
1165extern void H8BD_Install_IRQ(
1166  uint32_t      vector,
1167  proc_ptr      new_handler,
1168  proc_ptr      *old_handler );
1169
1170#endif /* ASM */
1171
1172#ifdef __cplusplus
1173}
1174#endif
1175
1176#endif
Note: See TracBrowser for help on using the repository browser.