source: rtems/cpukit/score/cpu/epiphany/include/rtems/score/cpu.h @ 27bbc05

Last change on this file since 27bbc05 was 27bbc05, checked in by Sebastian Huber <sebastian.huber@…>, on Aug 2, 2018 at 12:49:01 PM

score: Remove CPU_PARTITION_ALIGNMENT

Use the CPU_SIZEOF_POINTER alignment instead. The internal alignment
requirement is defined by the use of Chain_Node (consisting of two
pointers) to manage the free chain of partitions.

It seems that previously the condition

CPU_PARTITION_ALIGNMENT >= sizeof(Chain_Node)

was true on all CPU ports. Now, we need an additional check.

Update #3482.

  • Property mode set to 100644
File size: 22.2 KB
Line 
1/**
2 * @file rtems/score/cpu.h
3 */
4
5/*
6 *
7 * Copyright (c) 2015 University of York.
8 * Hesham ALMatary <hmka501@york.ac.uk>
9 *
10 * COPYRIGHT (c) 1989-1999.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _EPIPHANY_CPU_H
36#define _EPIPHANY_CPU_H
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <rtems/score/basedefs.h>
43#include <rtems/score/epiphany.h> /* pick up machine definitions */
44#ifndef ASM
45#include <rtems/bspIo.h>
46#include <stdint.h>
47#include <stdio.h> /* for printk */
48#endif
49
50/* conditional compilation parameters */
51
52/*
53 *  Does the RTEMS invoke the user's ISR with the vector number and
54 *  a pointer to the saved interrupt frame (1) or just the vector
55 *  number (0)?
56 *
57 */
58
59#define CPU_ISR_PASSES_FRAME_POINTER TRUE
60
61/*
62 *  Does the CPU have hardware floating point?
63 *
64 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
65 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
66 *
67 *  If there is a FP coprocessor such as the i387 or mc68881, then
68 *  the answer is TRUE.
69 *
70 *  The macro name "epiphany_HAS_FPU" should be made CPU specific.
71 *  It indicates whether or not this CPU model has FP support.  For
72 *  example, it would be possible to have an i386_nofp CPU model
73 *  which set this to false to indicate that you have an i386 without
74 *  an i387 and wish to leave floating point support out of RTEMS.
75 *
76 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
77 *  is software implemented floating point that must be context
78 *  switched.  The determination of whether or not this applies
79 *  is very tool specific and the state saved/restored is also
80 *  compiler specific.
81 *
82 *  epiphany Specific Information:
83 *
84 *  At this time there are no implementations of Epiphany that are
85 *  expected to implement floating point.
86 */
87
88#define CPU_HARDWARE_FP     FALSE
89#define CPU_SOFTWARE_FP     FALSE
90
91/*
92 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
93 *
94 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
95 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
96 *
97 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
98 *
99 */
100
101#define CPU_ALL_TASKS_ARE_FP     FALSE
102
103/*
104 *  Should the IDLE task have a floating point context?
105 *
106 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
107 *  and it has a floating point context which is switched in and out.
108 *  If FALSE, then the IDLE task does not have a floating point context.
109 *
110 *  Setting this to TRUE negatively impacts the time required to preempt
111 *  the IDLE task from an interrupt because the floating point context
112 *  must be saved as part of the preemption.
113 *
114 */
115
116#define CPU_IDLE_TASK_IS_FP      FALSE
117
118/*
119 *  Should the saving of the floating point registers be deferred
120 *  until a context switch is made to another different floating point
121 *  task?
122 *
123 *  If TRUE, then the floating point context will not be stored until
124 *  necessary.  It will remain in the floating point registers and not
125 *  disturned until another floating point task is switched to.
126 *
127 *  If FALSE, then the floating point context is saved when a floating
128 *  point task is switched out and restored when the next floating point
129 *  task is restored.  The state of the floating point registers between
130 *  those two operations is not specified.
131 *
132 *  If the floating point context does NOT have to be saved as part of
133 *  interrupt dispatching, then it should be safe to set this to TRUE.
134 *
135 *  Setting this flag to TRUE results in using a different algorithm
136 *  for deciding when to save and restore the floating point context.
137 *  The deferred FP switch algorithm minimizes the number of times
138 *  the FP context is saved and restored.  The FP context is not saved
139 *  until a context switch is made to another, different FP task.
140 *  Thus in a system with only one FP task, the FP context will never
141 *  be saved or restored.
142 *
143 */
144
145#define CPU_USE_DEFERRED_FP_SWITCH       FALSE
146
147#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
148
149/*
150 *  Does this port provide a CPU dependent IDLE task implementation?
151 *
152 *  If TRUE, then the routine _CPU_Thread_Idle_body
153 *  must be provided and is the default IDLE thread body instead of
154 *  _CPU_Thread_Idle_body.
155 *
156 *  If FALSE, then use the generic IDLE thread body if the BSP does
157 *  not provide one.
158 *
159 *  This is intended to allow for supporting processors which have
160 *  a low power or idle mode.  When the IDLE thread is executed, then
161 *  the CPU can be powered down.
162 *
163 *  The order of precedence for selecting the IDLE thread body is:
164 *
165 *    1.  BSP provided
166 *    2.  CPU dependent (if provided)
167 *    3.  generic (if no BSP and no CPU dependent)
168 *
169 */
170
171#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE
172
173/*
174 *  Does the stack grow up (toward higher addresses) or down
175 *  (toward lower addresses)?
176 *
177 *  If TRUE, then the grows upward.
178 *  If FALSE, then the grows toward smaller addresses.
179 *
180 */
181
182#define CPU_STACK_GROWS_UP               FALSE
183
184/* FIXME: Is this the right value? */
185#define CPU_CACHE_LINE_BYTES 64
186
187#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
188
189/*
190 *  The following defines the number of bits actually used in the
191 *  interrupt field of the task mode.  How those bits map to the
192 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
193 *
194 */
195
196#define CPU_MODES_INTERRUPT_MASK   0x00000001
197
198/*
199 *  Processor defined structures required for cpukit/score.
200 */
201
202/*
203 * Contexts
204 *
205 *  Generally there are 2 types of context to save.
206 *     1. Interrupt registers to save
207 *     2. Task level registers to save
208 *
209 *  This means we have the following 3 context items:
210 *     1. task level context stuff::  Context_Control
211 *     2. floating point task stuff:: Context_Control_fp
212 *     3. special interrupt level context :: Context_Control_interrupt
213 *
214 *  On some processors, it is cost-effective to save only the callee
215 *  preserved registers during a task context switch.  This means
216 *  that the ISR code needs to save those registers which do not
217 *  persist across function calls.  It is not mandatory to make this
218 *  distinctions between the caller/callee saves registers for the
219 *  purpose of minimizing context saved during task switch and on interrupts.
220 *  If the cost of saving extra registers is minimal, simplicity is the
221 *  choice.  Save the same context on interrupt entry as for tasks in
222 *  this case.
223 *
224 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
225 *  care should be used in designing the context area.
226 *
227 *  On some CPUs with hardware floating point support, the Context_Control_fp
228 *  structure will not be used or it simply consist of an array of a
229 *  fixed number of bytes.   This is done when the floating point context
230 *  is dumped by a "FP save context" type instruction and the format
231 *  is not really defined by the CPU.  In this case, there is no need
232 *  to figure out the exact format -- only the size.  Of course, although
233 *  this is enough information for RTEMS, it is probably not enough for
234 *  a debugger such as gdb.  But that is another problem.
235 *
236 *
237 */
238#ifndef ASM
239
240typedef struct {
241  uint32_t  r[64];
242
243  uint32_t status;
244  uint32_t config;
245  uint32_t iret;
246
247#ifdef RTEMS_SMP
248    /**
249     * @brief On SMP configurations the thread context must contain a boolean
250     * indicator to signal if this context is executing on a processor.
251     *
252     * This field must be updated during a context switch.  The context switch
253     * to the heir must wait until the heir context indicates that it is no
254     * longer executing on a processor.  The context switch must also check if
255     * a thread dispatch is necessary to honor updates of the heir thread for
256     * this processor.  This indicator must be updated using an atomic test and
257     * set operation to ensure that at most one processor uses the heir
258     * context at the same time.
259     *
260     * @code
261     * void _CPU_Context_switch(
262     *   Context_Control *executing,
263     *   Context_Control *heir
264     * )
265     * {
266     *   save( executing );
267     *
268     *   executing->is_executing = false;
269     *   memory_barrier();
270     *
271     *   if ( test_and_set( &heir->is_executing ) ) {
272     *     do {
273     *       Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
274     *
275     *       if ( cpu_self->dispatch_necessary ) {
276     *         heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
277     *       }
278     *     } while ( test_and_set( &heir->is_executing ) );
279     *   }
280     *
281     *   restore( heir );
282     * }
283     * @endcode
284     */
285    volatile bool is_executing;
286#endif
287} Context_Control;
288
289#define _CPU_Context_Get_SP( _context ) \
290  (_context)->r[13]
291
292typedef struct {
293  /** FPU registers are listed here */
294  double  some_float_register;
295} Context_Control_fp;
296
297typedef Context_Control CPU_Interrupt_frame;
298
299/*
300 *  The size of the floating point context area.  On some CPUs this
301 *  will not be a "sizeof" because the format of the floating point
302 *  area is not defined -- only the size is.  This is usually on
303 *  CPUs with a "floating point save context" instruction.
304 *
305 *  epiphany Specific Information:
306 *
307 */
308
309#define CPU_CONTEXT_FP_SIZE  0
310
311/*
312 *  Amount of extra stack (above minimum stack size) required by
313 *  MPCI receive server thread.  Remember that in a multiprocessor
314 *  system this thread must exist and be able to process all directives.
315 *
316 */
317
318#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
319
320/*
321 *  Should be large enough to run all RTEMS tests.  This insures
322 *  that a "reasonable" small application should not have any problems.
323 *
324 */
325
326#define CPU_STACK_MINIMUM_SIZE  4096
327
328/*
329 *  CPU's worst alignment requirement for data types on a byte boundary.  This
330 *  alignment does not take into account the requirements for the stack.
331 *
332 */
333
334#define CPU_ALIGNMENT 8
335
336/*
337 *  This is defined if the port has a special way to report the ISR nesting
338 *  level.  Most ports maintain the variable _ISR_Nest_level.
339 */
340#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
341
342/*
343 *  This number corresponds to the byte alignment requirement for the
344 *  heap handler.  This alignment requirement may be stricter than that
345 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
346 *  common for the heap to follow the same alignment requirement as
347 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
348 *  then this should be set to CPU_ALIGNMENT.
349 *
350 *  NOTE:  This does not have to be a power of 2 although it should be
351 *         a multiple of 2 greater than or equal to 2.  The requirement
352 *         to be a multiple of 2 is because the heap uses the least
353 *         significant field of the front and back flags to indicate
354 *         that a block is in use or free.  So you do not want any odd
355 *         length blocks really putting length data in that bit.
356 *
357 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
358 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
359 *         elements allocated from the heap meet all restrictions.
360 *
361 */
362
363#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
364
365/*
366 *  This number corresponds to the byte alignment requirement for the
367 *  stack.  This alignment requirement may be stricter than that for the
368 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
369 *  is strict enough for the stack, then this should be set to 0.
370 *
371 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
372 *
373 */
374
375#define CPU_STACK_ALIGNMENT        8
376
377#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
378
379/* ISR handler macros */
380
381/*
382 *  Support routine to initialize the RTEMS vector table after it is allocated.
383 *
384 *  NO_CPU Specific Information:
385 *
386 *  XXX document implementation including references if appropriate
387 */
388
389#define _CPU_Initialize_vectors()
390
391/*
392 *  Disable all interrupts for an RTEMS critical section.  The previous
393 *  level is returned in _level.
394 *
395 */
396
397static inline uint32_t epiphany_interrupt_disable( void )
398{
399  uint32_t sr;
400  __asm__ __volatile__ ("movfs %[sr], status \n" : [sr] "=r" (sr):);
401  __asm__ __volatile__("gid \n");
402  return sr;
403}
404
405static inline void epiphany_interrupt_enable(uint32_t level)
406{
407  __asm__ __volatile__("gie \n");
408  __asm__ __volatile__ ("movts status, %[level] \n" :: [level] "r" (level):);
409}
410
411#define _CPU_ISR_Disable( _level ) \
412    _level = epiphany_interrupt_disable()
413
414/*
415 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
416 *  This indicates the end of an RTEMS critical section.  The parameter
417 *  _level is not modified.
418 *
419 */
420
421#define _CPU_ISR_Enable( _level )  \
422  epiphany_interrupt_enable( _level )
423
424/*
425 *  This temporarily restores the interrupt to _level before immediately
426 *  disabling them again.  This is used to divide long RTEMS critical
427 *  sections into two or more parts.  The parameter _level is not
428 *  modified.
429 *
430 */
431
432#define _CPU_ISR_Flash( _level ) \
433  do{ \
434      if ( (_level & 0x2) != 0 ) \
435        _CPU_ISR_Enable( _level ); \
436      epiphany_interrupt_disable(); \
437    } while(0)
438
439RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
440{
441  return ( level & 0x2 ) != 0;
442}
443
444/*
445 *  Map interrupt level in task mode onto the hardware that the CPU
446 *  actually provides.  Currently, interrupt levels which do not
447 *  map onto the CPU in a generic fashion are undefined.  Someday,
448 *  it would be nice if these were "mapped" by the application
449 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
450 *  8 - 255 would be available for bsp/application specific meaning.
451 *  This could be used to manage a programmable interrupt controller
452 *  via the rtems_task_mode directive.
453 *
454 *  The get routine usually must be implemented as a subroutine.
455 *
456 */
457
458void _CPU_ISR_Set_level( uint32_t level );
459
460uint32_t _CPU_ISR_Get_level( void );
461
462/* end of ISR handler macros */
463
464/* Context handler macros */
465
466/*
467 *  Initialize the context to a state suitable for starting a
468 *  task after a context restore operation.  Generally, this
469 *  involves:
470 *
471 *     - setting a starting address
472 *     - preparing the stack
473 *     - preparing the stack and frame pointers
474 *     - setting the proper interrupt level in the context
475 *     - initializing the floating point context
476 *
477 *  This routine generally does not set any unnecessary register
478 *  in the context.  The state of the "general data" registers is
479 *  undefined at task start time.
480 *
481 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
482 *        point thread.  This is typically only used on CPUs where the
483 *        FPU may be easily disabled by software such as on the SPARC
484 *        where the PSR contains an enable FPU bit.
485 *
486 */
487
488/**
489 * @brief Account for GCC red-zone
490 *
491 * The following macro is used when initializing task's stack
492 * to account for GCC red-zone.
493 */
494
495#define EPIPHANY_GCC_RED_ZONE_SIZE 128
496
497/**
498 * @brief Initializes the CPU context.
499 *
500 * The following steps are performed:
501 *  - setting a starting address
502 *  - preparing the stack
503 *  - preparing the stack and frame pointers
504 *  - setting the proper interrupt level in the context
505 *
506 * @param[in] context points to the context area
507 * @param[in] stack_area_begin is the low address of the allocated stack area
508 * @param[in] stack_area_size is the size of the stack area in bytes
509 * @param[in] new_level is the interrupt level for the task
510 * @param[in] entry_point is the task's entry point
511 * @param[in] is_fp is set to @c true if the task is a floating point task
512 * @param[in] tls_area is the thread-local storage (TLS) area
513 */
514void _CPU_Context_Initialize(
515  Context_Control *context,
516  void *stack_area_begin,
517  size_t stack_area_size,
518  uint32_t new_level,
519  void (*entry_point)( void ),
520  bool is_fp,
521  void *tls_area
522);
523
524/*
525 *  This routine is responsible for somehow restarting the currently
526 *  executing task.  If you are lucky, then all that is necessary
527 *  is restoring the context.  Otherwise, there will need to be
528 *  a special assembly routine which does something special in this
529 *  case.  Context_Restore should work most of the time.  It will
530 *  not work if restarting self conflicts with the stack frame
531 *  assumptions of restoring a context.
532 *
533 */
534
535#define _CPU_Context_Restart_self( _the_context ) \
536   _CPU_Context_restore( (_the_context) )
537
538#define _CPU_Context_Initialize_fp( _destination ) \
539  memset( *( _destination ), 0, CPU_CONTEXT_FP_SIZE );
540
541/* end of Context handler macros */
542
543/* Fatal Error manager macros */
544
545/*
546 *  This routine copies _error into a known place -- typically a stack
547 *  location or a register, optionally disables interrupts, and
548 *  halts/stops the CPU.
549 *
550 */
551
552#include <inttypes.h>
553
554#define _CPU_Fatal_halt(_source, _error ) \
555          printk("Fatal Error %d.%" PRIu32 " Halted\n",_source, _error); \
556          asm("trap 3" :: "r" (_error)); \
557          for(;;)
558
559/* end of Fatal Error manager macros */
560
561#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
562
563#endif /* ASM */
564
565/**
566 * Size of a pointer.
567 *
568 * This must be an integer literal that can be used by the assembler.  This
569 * value will be used to calculate offsets of structure members.  These
570 * offsets will be used in assembler code.
571 */
572#define CPU_SIZEOF_POINTER 4
573#define CPU_EXCEPTION_FRAME_SIZE 260
574
575#define CPU_MAXIMUM_PROCESSORS 32
576
577#ifndef ASM
578
579typedef struct {
580  uint32_t r[62];
581  uint32_t status;
582  uint32_t config;
583  uint32_t iret;
584} CPU_Exception_frame;
585
586/**
587 * @brief Prints the exception frame via printk().
588 *
589 * @see rtems_fatal() and RTEMS_FATAL_SOURCE_EXCEPTION.
590 */
591void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
592
593
594/* end of Priority handler macros */
595
596/* functions */
597
598/*
599 *  _CPU_Initialize
600 *
601 *  This routine performs CPU dependent initialization.
602 *
603 */
604
605void _CPU_Initialize(
606  void
607);
608
609/*
610 *  _CPU_ISR_install_raw_handler
611 *
612 *  This routine installs a "raw" interrupt handler directly into the
613 *  processor's vector table.
614 *
615 */
616
617void _CPU_ISR_install_raw_handler(
618  uint32_t    vector,
619  proc_ptr    new_handler,
620  proc_ptr   *old_handler
621);
622
623/*
624 *  _CPU_ISR_install_vector
625 *
626 *  This routine installs an interrupt vector.
627 *
628 *  NO_CPU Specific Information:
629 *
630 *  XXX document implementation including references if appropriate
631 */
632
633void _CPU_ISR_install_vector(
634  uint32_t    vector,
635  proc_ptr   new_handler,
636  proc_ptr   *old_handler
637);
638
639/*
640 *  _CPU_Thread_Idle_body
641 *
642 *  This routine is the CPU dependent IDLE thread body.
643 *
644 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
645 *         is TRUE.
646 *
647 */
648
649void *_CPU_Thread_Idle_body( uintptr_t ignored );
650
651/*
652 *  _CPU_Context_switch
653 *
654 *  This routine switches from the run context to the heir context.
655 *
656 *  epiphany Specific Information:
657 *
658 *  Please see the comments in the .c file for a description of how
659 *  this function works. There are several things to be aware of.
660 */
661
662void _CPU_Context_switch(
663  Context_Control  *run,
664  Context_Control  *heir
665);
666
667/*
668 *  _CPU_Context_restore
669 *
670 *  This routine is generally used only to restart self in an
671 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
672 *
673 *  NOTE: May be unnecessary to reload some registers.
674 *
675 */
676
677void _CPU_Context_restore(
678  Context_Control *new_context
679) RTEMS_NO_RETURN;
680
681/*
682 *  _CPU_Context_save_fp
683 *
684 *  This routine saves the floating point context passed to it.
685 *
686 */
687
688void _CPU_Context_save_fp(
689  void **fp_context_ptr
690);
691
692/*
693 *  _CPU_Context_restore_fp
694 *
695 *  This routine restores the floating point context passed to it.
696 *
697 */
698
699void _CPU_Context_restore_fp(
700  void **fp_context_ptr
701);
702
703/*  The following routine swaps the endian format of an unsigned int.
704 *  It must be static because it is referenced indirectly.
705 *
706 *  This version will work on any processor, but if there is a better
707 *  way for your CPU PLEASE use it.  The most common way to do this is to:
708 *
709 *     swap least significant two bytes with 16-bit rotate
710 *     swap upper and lower 16-bits
711 *     swap most significant two bytes with 16-bit rotate
712 *
713 *  Some CPUs have special instructions which swap a 32-bit quantity in
714 *  a single instruction (e.g. i486).  It is probably best to avoid
715 *  an "endian swapping control bit" in the CPU.  One good reason is
716 *  that interrupts would probably have to be disabled to insure that
717 *  an interrupt does not try to access the same "chunk" with the wrong
718 *  endian.  Another good reason is that on some CPUs, the endian bit
719 *  endianness for ALL fetches -- both code and data -- so the code
720 *  will be fetched incorrectly.
721 *
722 */
723
724static inline unsigned int CPU_swap_u32(
725  unsigned int value
726)
727{
728  uint32_t   byte1, byte2, byte3, byte4, swapped;
729
730  byte4 = (value >> 24) & 0xff;
731  byte3 = (value >> 16) & 0xff;
732  byte2 = (value >> 8)  & 0xff;
733  byte1 =  value        & 0xff;
734
735  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
736  return( swapped );
737}
738
739#define CPU_swap_u16( value ) \
740  (((value&0xff) << 8) | ((value >> 8)&0xff))
741
742typedef uint32_t CPU_Counter_ticks;
743
744uint32_t _CPU_Counter_frequency( void );
745
746CPU_Counter_ticks _CPU_Counter_read( void );
747
748static inline CPU_Counter_ticks _CPU_Counter_difference(
749  CPU_Counter_ticks second,
750  CPU_Counter_ticks first
751)
752{
753  return second - first;
754}
755
756/** Type that can store a 32-bit integer or a pointer. */
757typedef uintptr_t CPU_Uint32ptr;
758
759#endif /* ASM */
760
761#ifdef __cplusplus
762}
763#endif
764
765#endif
Note: See TracBrowser for help on using the repository browser.