source: rtems/bsps/powerpc/include/libcpu/powerpc-utility.h @ a2dad96

5
Last change on this file since a2dad96 was 2afb22b, checked in by Chris Johns <chrisj@…>, on 12/23/17 at 07:18:56

Remove make preinstall

A speciality of the RTEMS build system was the make preinstall step. It
copied header files from arbitrary locations into the build tree. The
header files were included via the -Bsome/build/tree/path GCC command
line option.

This has at least seven problems:

  • The make preinstall step itself needs time and disk space.
  • Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error.
  • There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult.
  • The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit.
  • An introduction of a new build system is difficult.
  • Include paths specified by the -B option are system headers. This may suppress warnings.
  • The parallel build had sporadic failures on some hosts.

This patch removes the make preinstall step. All installed header
files are moved to dedicated include directories in the source tree.
Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc,
etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g.
erc32, imx, qoriq, etc.

The new cpukit include directories are:

  • cpukit/include
  • cpukit/score/cpu/@RTEMS_CPU@/include
  • cpukit/libnetworking

The new BSP include directories are:

  • bsps/include
  • bsps/@RTEMS_CPU@/include
  • bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include

There are build tree include directories for generated files.

The include directory order favours the most general header file, e.g.
it is not possible to override general header files via the include path
order.

The "bootstrap -p" option was removed. The new "bootstrap -H" option
should be used to regenerate the "headers.am" files.

Update #3254.

  • Property mode set to 100644
File size: 19.4 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup powerpc_shared
5 *
6 * @brief General purpose assembler macros, linker command file support and
7 * some inline functions for direct register access.
8 */
9
10/*
11 * Copyright (c) 2008-2015 embedded brains GmbH.
12 *
13 *  embedded brains GmbH
14 *  Dornierstr. 4
15 *  82178 Puchheim
16 *  Germany
17 *  <rtems@embedded-brains.de>
18 *
19 * access function for Device Control Registers inspired by "ppc405common.h"
20 * from Michael Hamel ADInstruments May 2008
21 *
22 * The license and distribution terms for this file may be
23 * found in the file LICENSE in this distribution or at
24 * http://www.rtems.org/license/LICENSE.
25 */
26
27/**
28 * @defgroup powerpc_shared Shared PowerPC Code
29 */
30
31#ifndef __LIBCPU_POWERPC_UTILITY_H
32#define __LIBCPU_POWERPC_UTILITY_H
33
34#if !defined(ASM)
35  #include <rtems.h>
36#endif
37
38#include <rtems/score/cpu.h>
39#include <rtems/powerpc/registers.h>
40#include <rtems/powerpc/powerpc.h>
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46#if !defined(ASM)
47
48#include <rtems/bspIo.h>
49#include <rtems/system.h>
50
51#include <libcpu/cpuIdent.h>
52
53#define LINKER_SYMBOL(sym) extern char sym [];
54
55/**
56 * @brief Read one byte from @a src.
57 */
58static inline uint8_t ppc_read_byte(const volatile void *src)
59{
60  uint8_t value;
61
62  __asm__ volatile (
63    "lbz %0, 0(%1)"
64    : "=r" (value)
65    : "b" (src)
66  );
67
68  return value;
69}
70
71/**
72 * @brief Read one half word from @a src.
73 */
74static inline uint16_t ppc_read_half_word(const volatile void *src)
75{
76  uint16_t value;
77
78  __asm__ volatile (
79    "lhz %0, 0(%1)"
80    : "=r" (value)
81    : "b" (src)
82  );
83
84  return value;
85}
86
87/**
88 * @brief Read one word from @a src.
89 */
90static inline uint32_t ppc_read_word(const volatile void *src)
91{
92  uint32_t value;
93
94  __asm__ volatile (
95    "lwz %0, 0(%1)"
96    : "=r" (value)
97    : "b" (src)
98  );
99
100  return value;
101}
102
103/**
104 * @brief Write one byte @a value to @a dest.
105 */
106static inline void ppc_write_byte(uint8_t value, volatile void *dest)
107{
108  __asm__ volatile (
109    "stb %0, 0(%1)"
110    :
111    : "r" (value), "b" (dest)
112  );
113}
114
115/**
116 * @brief Write one half word @a value to @a dest.
117 */
118static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
119{
120  __asm__ volatile (
121    "sth %0, 0(%1)"
122    :
123    : "r" (value), "b" (dest)
124  );
125}
126
127/**
128 * @brief Write one word @a value to @a dest.
129 */
130static inline void ppc_write_word(uint32_t value, volatile void *dest)
131{
132  __asm__ volatile (
133    "stw %0, 0(%1)" :
134    : "r" (value), "b" (dest)
135  );
136}
137
138
139static inline void *ppc_stack_pointer(void)
140{
141  void *sp;
142
143  __asm__ volatile (
144    "mr %0, 1"
145    : "=r" (sp)
146  );
147
148  return sp;
149}
150
151static inline void ppc_set_stack_pointer(void *sp)
152{
153  __asm__ volatile (
154    "mr 1, %0"
155    :
156    : "r" (sp)
157  );
158}
159
160static inline void *ppc_link_register(void)
161{
162  void *lr;
163
164  __asm__ volatile (
165    "mflr %0"
166    : "=r" (lr)
167  );
168
169  return lr;
170}
171
172static inline void ppc_set_link_register(void *lr)
173{
174  __asm__ volatile (
175    "mtlr %0"
176    :
177    : "r" (lr)
178  );
179}
180
181static inline uint32_t ppc_machine_state_register(void)
182{
183  uint32_t msr;
184
185  __asm__ volatile (
186    "mfmsr %0"
187    : "=r" (msr)
188  );
189
190  return msr;
191}
192
193static inline void ppc_set_machine_state_register(uint32_t msr)
194{
195  __asm__ volatile (
196    "mtmsr %0"
197    :
198    : "r" (msr)
199  );
200}
201
202static inline void ppc_synchronize_data(void)
203{
204  RTEMS_COMPILER_MEMORY_BARRIER();
205
206  __asm__ volatile ("sync");
207}
208
209static inline void ppc_light_weight_synchronize(void)
210{
211  RTEMS_COMPILER_MEMORY_BARRIER();
212
213  __asm__ volatile ("lwsync");
214}
215
216static inline void ppc_synchronize_instructions(void)
217{
218  RTEMS_COMPILER_MEMORY_BARRIER();
219
220  __asm__ volatile ("isync");
221}
222
223static inline void ppc_enforce_in_order_execution_of_io(void)
224{
225  RTEMS_COMPILER_MEMORY_BARRIER();
226
227  __asm__ volatile ("eieio");
228}
229
230static inline void ppc_data_cache_block_flush(void *addr)
231{
232  __asm__ volatile (
233    "dcbf 0, %0"
234    :
235    : "r" (addr)
236    : "memory"
237  );
238}
239
240static inline void ppc_data_cache_block_flush_2(
241  void *base,
242  uintptr_t offset
243)
244{
245  __asm__ volatile (
246    "dcbf %0, %1"
247    :
248    : "b" (base), "r" (offset)
249    : "memory"
250  );
251}
252
253static inline void ppc_data_cache_block_invalidate(void *addr)
254{
255  __asm__ volatile (
256    "dcbi 0, %0"
257    :
258    : "r" (addr)
259    : "memory"
260  );
261}
262
263static inline void ppc_data_cache_block_invalidate_2(
264  void *base,
265  uintptr_t offset
266)
267{
268  __asm__ volatile (
269    "dcbi %0, %1"
270    :
271    : "b" (base), "r" (offset)
272    : "memory"
273  );
274}
275
276static inline void ppc_data_cache_block_store(const void *addr)
277{
278  __asm__ volatile (
279    "dcbst 0, %0"
280    :
281    : "r" (addr)
282  );
283}
284
285static inline void ppc_data_cache_block_store_2(
286  const void *base,
287  uintptr_t offset
288)
289{
290  __asm__ volatile (
291    "dcbst %0, %1"
292    :
293    : "b" (base), "r" (offset)
294  );
295}
296
297static inline void ppc_data_cache_block_touch(const void *addr)
298{
299  __asm__ volatile (
300    "dcbt 0, %0"
301    :
302    : "r" (addr)
303  );
304}
305
306static inline void ppc_data_cache_block_touch_2(
307  const void *base,
308  uintptr_t offset
309)
310{
311  __asm__ volatile (
312    "dcbt %0, %1"
313    :
314    : "b" (base), "r" (offset)
315  );
316}
317
318static inline void ppc_data_cache_block_touch_for_store(const void *addr)
319{
320  __asm__ volatile (
321    "dcbtst 0, %0"
322    :
323    : "r" (addr)
324  );
325}
326
327static inline void ppc_data_cache_block_touch_for_store_2(
328  const void *base,
329  uintptr_t offset
330)
331{
332  __asm__ volatile (
333    "dcbtst %0, %1"
334    :
335    : "b" (base), "r" (offset)
336  );
337}
338
339static inline void ppc_data_cache_block_clear_to_zero(void *addr)
340{
341  __asm__ volatile (
342    "dcbz 0, %0"
343    :
344    : "r" (addr)
345    : "memory"
346  );
347}
348
349static inline void ppc_data_cache_block_clear_to_zero_2(
350  void *base,
351  uintptr_t offset
352)
353{
354  __asm__ volatile (
355    "dcbz %0, %1"
356    :
357    : "b" (base), "r" (offset)
358    : "memory"
359  );
360}
361
362static inline void ppc_instruction_cache_block_invalidate(void *addr)
363{
364  __asm__ volatile (
365    "icbi 0, %0"
366    :
367    : "r" (addr)
368  );
369}
370
371static inline void ppc_instruction_cache_block_invalidate_2(
372  void *base,
373  uintptr_t offset
374)
375{
376  __asm__ volatile (
377    "icbi %0, %1"
378    :
379    : "b" (base), "r" (offset)
380  );
381}
382
383/**
384 * @brief Enables external exceptions.
385 *
386 * You can use this function to enable the external exceptions and restore the
387 * machine state with ppc_external_exceptions_disable() later.
388 */
389static inline uint32_t ppc_external_exceptions_enable(void)
390{
391  uint32_t current_msr;
392  uint32_t new_msr;
393
394  RTEMS_COMPILER_MEMORY_BARRIER();
395
396  __asm__ volatile (
397    "mfmsr %0;"
398    "ori %1, %0, 0x8000;"
399    "mtmsr %1"
400    : "=r" (current_msr), "=r" (new_msr)
401  );
402
403  return current_msr;
404}
405
406/**
407 * @brief Restores machine state.
408 *
409 * @see ppc_external_exceptions_enable()
410 */
411static inline void ppc_external_exceptions_disable(uint32_t msr)
412{
413  ppc_set_machine_state_register(msr);
414
415  RTEMS_COMPILER_MEMORY_BARRIER();
416}
417
418static inline uint32_t ppc_count_leading_zeros(uint32_t value)
419{
420  uint32_t count;
421
422  __asm__ (
423    "cntlzw %0, %1;"
424    : "=r" (count)
425    : "r" (value)
426  );
427
428  return count;
429}
430
431/*
432 *  Simple spin delay in microsecond units for device drivers.
433 *  This is very dependent on the clock speed of the target.
434 */
435
436#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
437/* Wonderful bookE doesn't have mftb/mftbu; they only
438 * define the TBRU/TBRL SPRs so we use these. Luckily,
439 * we run in supervisory mode so that should work on
440 * all CPUs. In user mode we'd have a problem...
441 * 2007/11/30, T.S.
442 *
443 * OTOH, PSIM currently lacks support for reading
444 * SPRs 268/269. You need GDB patch sim/2376 to avoid
445 * a crash...
446 * OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
447 * we NEED a mftb to access the time base.
448 * 2009/10/30 Th. D.
449 */
450#define CPU_Get_timebase_low( _value ) \
451    __asm__ volatile( "mftb  %0" : "=r" (_value) )
452#else
453#define CPU_Get_timebase_low( _value ) \
454    __asm__ volatile( "mfspr %0,268" : "=r" (_value) )
455#endif
456
457/* Must be provided for rtems_bsp_delay to work */
458extern     uint32_t bsp_clicks_per_usec;
459
460#define rtems_bsp_delay( _microseconds ) \
461  do { \
462    uint32_t   start, ticks, now; \
463    CPU_Get_timebase_low( start ) ; \
464    ticks = (_microseconds) * bsp_clicks_per_usec; \
465    do \
466      CPU_Get_timebase_low( now ) ; \
467    while (now - start < ticks); \
468  } while (0)
469
470#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
471  do { \
472    uint32_t   start, now; \
473    CPU_Get_timebase_low( start ); \
474    do \
475      CPU_Get_timebase_low( now ); \
476    while (now - start < (_cycles)); \
477  } while (0)
478
479/*
480 *  Routines to access the decrementer register
481 */
482
483#define PPC_Set_decrementer( _clicks ) \
484  do { \
485    __asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
486  } while (0)
487
488#define PPC_Get_decrementer( _clicks ) \
489    __asm__ volatile( "mfdec  %0" : "=r" (_clicks) )
490
491/*
492 *  Routines to access the time base register
493 */
494
495static inline uint64_t PPC_Get_timebase_register( void )
496{
497  uint32_t tbr_low;
498  uint32_t tbr_high;
499  uint32_t tbr_high_old;
500  uint64_t tbr;
501
502  do {
503#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
504/* See comment above (CPU_Get_timebase_low) */
505    __asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
506    __asm__ volatile( "mftb  %0" : "=r" (tbr_low));
507    __asm__ volatile( "mftbu %0" : "=r" (tbr_high));
508#else
509    __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
510    __asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
511    __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
512#endif
513  } while ( tbr_high_old != tbr_high );
514
515  tbr = tbr_high;
516  tbr <<= 32;
517  tbr |= tbr_low;
518  return tbr;
519}
520
521static inline  void PPC_Set_timebase_register (uint64_t tbr)
522{
523  uint32_t tbr_low;
524  uint32_t tbr_high;
525
526  tbr_low = (uint32_t) tbr;
527  tbr_high = (uint32_t) (tbr >> 32);
528  __asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
529  __asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
530
531}
532
533static inline uint32_t ppc_decrementer_register(void)
534{
535  uint32_t dec;
536
537  PPC_Get_decrementer(dec);
538
539  return dec;
540}
541
542static inline void ppc_set_decrementer_register(uint32_t dec)
543{
544  PPC_Set_decrementer(dec);
545}
546
547/**
548 * @brief Preprocessor magic for stringification of @a x.
549 */
550#define PPC_STRINGOF(x) #x
551
552/**
553 * @brief Returns the value of the Special Purpose Register with number @a spr.
554 *
555 * @note This macro uses a GNU C extension.
556 */
557#define PPC_SPECIAL_PURPOSE_REGISTER(spr) \
558  ({ \
559    uint32_t val; \
560    __asm__ volatile (\
561      "mfspr %0, " PPC_STRINGOF(spr) \
562      : "=r" (val) \
563    ); \
564    val;\
565  } )
566
567/**
568 * @brief Sets the Special Purpose Register with number @a spr to the value in
569 * @a val.
570 */
571#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
572  do { \
573    __asm__ volatile (\
574      "mtspr " PPC_STRINGOF(spr) ", %0" \
575      : \
576      : "r" (val) \
577    ); \
578  } while (0)
579
580/**
581 * @brief Sets in the Special Purpose Register with number @a spr all bits
582 * which are set in @a bits.
583 *
584 * Interrupts are disabled throughout this operation.
585 */
586#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
587  do { \
588    ISR_Level level; \
589    uint32_t val; \
590    uint32_t mybits = bits; \
591    _ISR_Local_disable(level); \
592    val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
593    val |= mybits; \
594    PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
595    _ISR_Local_enable(level); \
596  } while (0)
597
598/**
599 * @brief Sets in the Special Purpose Register with number @a spr all bits
600 * which are set in @a bits.  The previous register value will be masked with
601 * @a mask.
602 *
603 * Interrupts are disabled throughout this operation.
604 */
605#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
606  do { \
607    ISR_Level level; \
608    uint32_t val; \
609    uint32_t mybits = bits; \
610    uint32_t mymask = mask; \
611    _ISR_Local_disable(level); \
612    val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
613    val &= ~mymask; \
614    val |= mybits; \
615    PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
616    _ISR_Local_enable(level); \
617  } while (0)
618
619/**
620 * @brief Clears in the Special Purpose Register with number @a spr all bits
621 * which are set in @a bits.
622 *
623 * Interrupts are disabled throughout this operation.
624 */
625#define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
626  do { \
627    ISR_Level level; \
628    uint32_t val; \
629    uint32_t mybits = bits; \
630    _ISR_Local_disable(level); \
631    val = PPC_SPECIAL_PURPOSE_REGISTER(spr); \
632    val &= ~mybits; \
633    PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
634    _ISR_Local_enable(level); \
635  } while (0)
636
637/**
638 * @brief Returns the value of the Thread Management Register with number @a tmr.
639 *
640 * @note This macro uses a GNU C extension.
641 */
642#define PPC_THREAD_MGMT_REGISTER(tmr) \
643  ({ \
644    uint32_t val; \
645    __asm__ volatile (\
646      "mftmr %0, " PPC_STRINGOF(tmr) \
647      : "=r" (val) \
648    ); \
649    val;\
650  } )
651
652/**
653 * @brief Sets the Thread Management Register with number @a tmr to the value in
654 * @a val.
655 */
656#define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
657  do { \
658    __asm__ volatile (\
659      "mttmr " PPC_STRINGOF(tmr) ", %0" \
660      : \
661      : "r" (val) \
662    ); \
663  } while (0)
664
665/**
666 * @brief Returns the value of the Device Control Register with number @a dcr.
667 *
668 * The PowerPC 4XX family has Device Control Registers.
669 *
670 * @note This macro uses a GNU C extension.
671 */
672#define PPC_DEVICE_CONTROL_REGISTER(dcr) \
673  ({ \
674    uint32_t val; \
675    __asm__ volatile (\
676      "mfdcr %0, " PPC_STRINGOF(dcr) \
677      : "=r" (val) \
678    ); \
679    val;\
680  } )
681
682/**
683 * @brief Sets the Device Control Register with number @a dcr to the value in
684 * @a val.
685 *
686 * The PowerPC 4XX family has Device Control Registers.
687 */
688#define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
689  do { \
690    __asm__ volatile (\
691      "mtdcr " PPC_STRINGOF(dcr) ", %0" \
692      : \
693      : "r" (val) \
694    ); \
695  } while (0)
696
697/**
698 * @brief Sets in the Device Control Register with number @a dcr all bits
699 * which are set in @a bits.
700 *
701 * Interrupts are disabled throughout this operation.
702 */
703#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
704  do { \
705    ISR_Level level; \
706    uint32_t val; \
707    uint32_t mybits = bits; \
708    _ISR_Local_disable(level); \
709    val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
710    val |= mybits; \
711    PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
712    _ISR_Local_enable(level); \
713  } while (0)
714
715/**
716 * @brief Sets in the Device Control Register with number @a dcr all bits
717 * which are set in @a bits.  The previous register value will be masked with
718 * @a mask.
719 *
720 * Interrupts are disabled throughout this operation.
721 */
722#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
723  do { \
724    ISR_Level level; \
725    uint32_t val; \
726    uint32_t mybits = bits; \
727    uint32_t mymask = mask; \
728    _ISR_Local_disable(level); \
729    val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
730    val &= ~mymask; \
731    val |= mybits; \
732    PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
733    _ISR_Local_enable(level); \
734  } while (0)
735
736/**
737 * @brief Clears in the Device Control Register with number @a dcr all bits
738 * which are set in @a bits.
739 *
740 * Interrupts are disabled throughout this operation.
741 */
742#define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
743  do { \
744    ISR_Level level; \
745    uint32_t val; \
746    uint32_t mybits = bits; \
747    _ISR_Local_disable(level); \
748    val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
749    val &= ~mybits; \
750    PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
751    _ISR_Local_enable(level); \
752  } while (0)
753
754static inline uint32_t ppc_time_base(void)
755{
756  uint32_t val;
757
758  CPU_Get_timebase_low(val);
759
760  return val;
761}
762
763static inline void ppc_set_time_base(uint32_t val)
764{
765  PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWL, val);
766}
767
768static inline uint32_t ppc_time_base_upper(void)
769{
770  return PPC_SPECIAL_PURPOSE_REGISTER(TBRU);
771}
772
773static inline void ppc_set_time_base_upper(uint32_t val)
774{
775  PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWU, val);
776}
777
778static inline uint64_t ppc_time_base_64(void)
779{
780  return PPC_Get_timebase_register();
781}
782
783static inline void ppc_set_time_base_64(uint64_t val)
784{
785  PPC_Set_timebase_register(val);
786}
787
788static inline uint32_t ppc_alternate_time_base(void)
789{
790  return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL);
791}
792
793static inline uint32_t ppc_alternate_time_base_upper(void)
794{
795  return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU);
796}
797
798static inline uint64_t ppc_alternate_time_base_64(void)
799{
800  uint32_t atbl;
801  uint32_t atbu_0;
802  uint32_t atbu_1;
803
804  do {
805    atbu_0 = ppc_alternate_time_base_upper();
806    atbl = ppc_alternate_time_base();
807    atbu_1 = ppc_alternate_time_base_upper();
808  } while (atbu_0 != atbu_1);
809
810  return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
811}
812
813static inline uint32_t ppc_processor_id(void)
814{
815  return PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR);
816}
817
818static inline void ppc_set_processor_id(uint32_t val)
819{
820  PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
821}
822
823static inline uint32_t ppc_fsl_system_version(void)
824{
825  return PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR);
826}
827
828static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
829{
830  return (svr >> 28) & 0xf;
831}
832
833static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
834{
835  return (svr >> 16) & 0xfff;
836}
837
838static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
839{
840  return (svr >> 12) & 0xf;
841}
842
843static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
844{
845  return (svr >> 8) & 0xf;
846}
847
848static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
849{
850  return (svr >> 4) & 0xf;
851}
852
853static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
854{
855  return (svr >> 0) & 0xf;
856}
857
858void ppc_code_copy(void *dest, const void *src, size_t n);
859
860/* FIXME: Do not use this function */
861void printBAT(int bat, uint32_t upper, uint32_t lower);
862
863/* FIXME: Do not use this function */
864void ShowBATS(void);
865
866#endif /* ifndef ASM */
867
868#if defined(ASM)
869#include <rtems/asm.h>
870
871.macro LA reg, addr
872#if defined(__powerpc64__)
873        lis     \reg, (\addr)@highest
874        ori     \reg, \reg, (\addr)@higher
875        rldicr  \reg, \reg, 32, 31
876        oris    \reg, \reg, (\addr)@h
877        ori     \reg, \reg, (\addr)@l
878#else
879        lis     \reg, (\addr)@h
880        ori     \reg, \reg, (\addr)@l
881#endif
882.endm
883
884.macro LA32 reg, addr
885        lis     \reg, (\addr)@h
886        ori     \reg, \reg, (\addr)@l
887.endm
888
889.macro LWI reg, value
890        lis \reg, (\value)@h
891        ori     \reg, \reg, (\value)@l
892.endm
893
894.macro LW reg, addr
895        lis     \reg, \addr@ha
896        lwz     \reg, \addr@l(\reg)
897.endm
898
899/*
900 * Tests the bits in reg1 against the bits set in mask.  A match is indicated
901 * by EQ = 0 in CR0.  A mismatch is indicated by EQ = 1 in CR0.  The register
902 * reg2 is used to load the mask.
903 */
904.macro  TSTBITS reg1, reg2, mask
905        LWI     \reg2, \mask
906        and     \reg1, \reg1, \reg2
907        cmplw   \reg1, \reg2
908.endm
909
910.macro  SETBITS reg1, reg2, mask
911        LWI     \reg2, \mask
912        or      \reg1, \reg1, \reg2
913.endm
914
915.macro  CLRBITS reg1, reg2, mask
916        LWI     \reg2, \mask
917        andc    \reg1, \reg1, \reg2
918.endm
919
920.macro GLOBAL_FUNCTION name
921        .global \name
922        .type \name, @function
923\name:
924.endm
925
926/*
927 * Obtain interrupt mask
928 */
929.macro GET_INTERRUPT_MASK mask
930        lis     \mask, _PPC_INTERRUPT_DISABLE_MASK@h
931        ori     \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
932.endm
933
934/*
935 * Disables all asynchronous exeptions (interrupts) which may cause a context
936 * switch.
937 */
938.macro INTERRUPT_DISABLE level, mask
939        mfmsr   \level
940        GET_INTERRUPT_MASK mask=\mask
941        andc    \mask, \level, \mask
942        mtmsr   \mask
943.endm
944
945/*
946 * Restore previous machine state.
947 */
948.macro INTERRUPT_ENABLE level
949        mtmsr   \level
950.endm
951
952.macro SET_SELF_CPU_CONTROL reg_0, reg_1
953#if defined(RTEMS_SMP)
954        /* Use Book E Processor ID Register (PIR) */
955        mfspr   \reg_0, 286
956        slwi    \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
957#if defined(__powerpc64__)
958        LA      \reg_1, _Per_CPU_Information
959        add     \reg_0, \reg_0, \reg_1
960#else
961        addis   \reg_0, \reg_0, _Per_CPU_Information@ha
962        addi    \reg_0, \reg_0, _Per_CPU_Information@l
963#endif
964        mtspr   PPC_PER_CPU_CONTROL_REGISTER, \reg_0
965#endif
966.endm
967
968.macro GET_SELF_CPU_CONTROL reg
969#if defined(RTEMS_SMP)
970        mfspr   \reg, PPC_PER_CPU_CONTROL_REGISTER
971#else
972        lis     \reg, _Per_CPU_Information@h
973        ori     \reg, \reg, _Per_CPU_Information@l
974#endif
975.endm
976
977#define LINKER_SYMBOL(sym) .extern sym
978
979#endif /* ASM */
980
981#ifdef __cplusplus
982}
983#endif
984
985#endif /* __LIBCPU_POWERPC_UTILITY_H */
Note: See TracBrowser for help on using the repository browser.