Changeset 61bd0301 in rtems


Ignore:
Timestamp:
Jun 14, 2000, 3:52:24 PM (21 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
ac815430
Parents:
e4d7169f
Message:

Moved PowerPC cache management code to libcpu. Also compiled
mpc8xx libcpu support for the first time and remove includes
of bsp.h, references to BSP_Configuration, and Cpu_table. All
of these can be obtained directly from RTEMS now.

Files:
12 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/cpu/powerpc/rtems/score/ppc.h

    re4d7169f r61bd0301  
    384384#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
    385385#endif
    386 
    387 #ifndef ASM
    388 
    389 /*
    390  * CACHE MANAGER: The following functions are CPU-specific.
    391  * They provide the basic implementation for the rtems_* cache
    392  * management routines. If a given function has no meaning for the CPU,
    393  * it does nothing by default.
    394  *
    395  * FIXME: Some functions simply have not been implemented.
    396  */
    397  
    398 #if defined(ppc603)                     /* And possibly others */
    399 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    400 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    401 
    402 /* Helpful macros */
    403 #define PPC_Get_HID0( _value ) \
    404   do { \
    405       _value = 0;        /* to avoid warnings */ \
    406       asm volatile( \
    407           "mfspr %0, 0x3f0;"     /* get HID0 */ \
    408           "isync" \
    409           : "=r" (_value) \
    410           : "0" (_value) \
    411       ); \
    412   } while (0)
    413 
    414 #define PPC_Set_HID0( _value ) \
    415   do { \
    416       asm volatile( \
    417           "isync;" \
    418           "mtspr 0x3f0, %0;"     /* load HID0 */ \
    419           "isync" \
    420           : "=r" (_value) \
    421           : "0" (_value) \
    422       ); \
    423   } while (0)
    424 
    425 static inline void _CPU_enable_data_cache (
    426         void )
    427 {
    428   unsigned32 value;
    429   PPC_Get_HID0( value );
    430   value |= 0x00004000;        /* set DCE bit */
    431   PPC_Set_HID0( value );
    432 }
    433 
    434 static inline void _CPU_disable_data_cache (
    435         void )
    436 {
    437   unsigned32 value;
    438   PPC_Get_HID0( value );
    439   value &= 0xFFFFBFFF;        /* clear DCE bit */
    440   PPC_Set_HID0( value );
    441 }
    442 
    443 static inline void _CPU_enable_inst_cache (
    444         void )
    445 {
    446   unsigned32 value;
    447   PPC_Get_HID0( value );
    448   value |= 0x00008000;       /* Set ICE bit */
    449   PPC_Set_HID0( value );
    450 }
    451 
    452 static inline void _CPU_disable_inst_cache (
    453         void )
    454 {
    455   unsigned32 value;
    456   PPC_Get_HID0( value );
    457   value &= 0xFFFF7FFF;       /* Clear ICE bit */
    458   PPC_Set_HID0( value );
    459 }
    460 
    461 #elif ( defined(mpc860) || defined(mpc821) )
    462 
    463 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    464 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    465 
    466 #define mtspr(_spr,_reg)   __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
    467 #define isync   __asm__ volatile ("isync\n"::)
    468 
    469 static inline void _CPU_flush_1_data_cache_line(
    470         const void * _address )
    471 {
    472   register const void *__address = _address;
    473   asm volatile ( "dcbf 0,%0" :: "r" (__address) );
    474 }
    475 
    476 static inline void _CPU_invalidate_1_data_cache_line(
    477         const void * _address )
    478 {
    479   register const void *__address = _address;
    480   asm volatile ( "dcbi 0,%0" :: "r" (__address) );
    481 }
    482 
    483 static inline void _CPU_flush_entire_data_cache ( void ) {}
    484 static inline void _CPU_invalidate_entire_data_cache ( void ) {}
    485 static inline void _CPU_freeze_data_cache ( void ) {}
    486 static inline void _CPU_unfreeze_data_cache ( void ) {}
    487 
    488 static inline void _CPU_enable_data_cache (
    489         void )
    490 {
    491   unsigned32 r1;
    492   r1 = (0x2<<24);
    493   mtspr( 568, r1 );
    494   isync;
    495 }
    496 
    497 static inline void _CPU_disable_data_cache (
    498         void )
    499 {
    500   unsigned32 r1;
    501   r1 = (0x4<<24);
    502   mtspr( 568, r1 );
    503   isync;
    504 }
    505 
    506 static inline void _CPU_invalidate_1_inst_cache_line(
    507         const void * _address )
    508 {
    509   register const void *__address = _address;
    510   asm volatile ( "icbi 0,%0" :: "r" (__address) );
    511 }
    512 
    513 static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
    514 static inline void _CPU_freeze_inst_cache ( void ) {}
    515 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    516 
    517 static inline void _CPU_enable_inst_cache (
    518         void )
    519 {
    520   unsigned32 r1;
    521   r1 = (0x2<<24);
    522   mtspr( 560, r1 );
    523   isync;
    524 }
    525 
    526 static inline void _CPU_disable_inst_cache (
    527         void )
    528 {
    529   unsigned32 r1;
    530   r1 = (0x4<<24);
    531   mtspr( 560, r1 );
    532   isync;
    533 }
    534 #endif
    535 
    536 #endif  /* !ASM */
    537386
    538387/*
  • c/src/exec/score/cpu/powerpc/shared/ppc.h

    re4d7169f r61bd0301  
    384384#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
    385385#endif
    386 
    387 #ifndef ASM
    388 
    389 /*
    390  * CACHE MANAGER: The following functions are CPU-specific.
    391  * They provide the basic implementation for the rtems_* cache
    392  * management routines. If a given function has no meaning for the CPU,
    393  * it does nothing by default.
    394  *
    395  * FIXME: Some functions simply have not been implemented.
    396  */
    397  
    398 #if defined(ppc603)                     /* And possibly others */
    399 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    400 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    401 
    402 /* Helpful macros */
    403 #define PPC_Get_HID0( _value ) \
    404   do { \
    405       _value = 0;        /* to avoid warnings */ \
    406       asm volatile( \
    407           "mfspr %0, 0x3f0;"     /* get HID0 */ \
    408           "isync" \
    409           : "=r" (_value) \
    410           : "0" (_value) \
    411       ); \
    412   } while (0)
    413 
    414 #define PPC_Set_HID0( _value ) \
    415   do { \
    416       asm volatile( \
    417           "isync;" \
    418           "mtspr 0x3f0, %0;"     /* load HID0 */ \
    419           "isync" \
    420           : "=r" (_value) \
    421           : "0" (_value) \
    422       ); \
    423   } while (0)
    424 
    425 static inline void _CPU_enable_data_cache (
    426         void )
    427 {
    428   unsigned32 value;
    429   PPC_Get_HID0( value );
    430   value |= 0x00004000;        /* set DCE bit */
    431   PPC_Set_HID0( value );
    432 }
    433 
    434 static inline void _CPU_disable_data_cache (
    435         void )
    436 {
    437   unsigned32 value;
    438   PPC_Get_HID0( value );
    439   value &= 0xFFFFBFFF;        /* clear DCE bit */
    440   PPC_Set_HID0( value );
    441 }
    442 
    443 static inline void _CPU_enable_inst_cache (
    444         void )
    445 {
    446   unsigned32 value;
    447   PPC_Get_HID0( value );
    448   value |= 0x00008000;       /* Set ICE bit */
    449   PPC_Set_HID0( value );
    450 }
    451 
    452 static inline void _CPU_disable_inst_cache (
    453         void )
    454 {
    455   unsigned32 value;
    456   PPC_Get_HID0( value );
    457   value &= 0xFFFF7FFF;       /* Clear ICE bit */
    458   PPC_Set_HID0( value );
    459 }
    460 
    461 #elif ( defined(mpc860) || defined(mpc821) )
    462 
    463 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    464 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    465 
    466 #define mtspr(_spr,_reg)   __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
    467 #define isync   __asm__ volatile ("isync\n"::)
    468 
    469 static inline void _CPU_flush_1_data_cache_line(
    470         const void * _address )
    471 {
    472   register const void *__address = _address;
    473   asm volatile ( "dcbf 0,%0" :: "r" (__address) );
    474 }
    475 
    476 static inline void _CPU_invalidate_1_data_cache_line(
    477         const void * _address )
    478 {
    479   register const void *__address = _address;
    480   asm volatile ( "dcbi 0,%0" :: "r" (__address) );
    481 }
    482 
    483 static inline void _CPU_flush_entire_data_cache ( void ) {}
    484 static inline void _CPU_invalidate_entire_data_cache ( void ) {}
    485 static inline void _CPU_freeze_data_cache ( void ) {}
    486 static inline void _CPU_unfreeze_data_cache ( void ) {}
    487 
    488 static inline void _CPU_enable_data_cache (
    489         void )
    490 {
    491   unsigned32 r1;
    492   r1 = (0x2<<24);
    493   mtspr( 568, r1 );
    494   isync;
    495 }
    496 
    497 static inline void _CPU_disable_data_cache (
    498         void )
    499 {
    500   unsigned32 r1;
    501   r1 = (0x4<<24);
    502   mtspr( 568, r1 );
    503   isync;
    504 }
    505 
    506 static inline void _CPU_invalidate_1_inst_cache_line(
    507         const void * _address )
    508 {
    509   register const void *__address = _address;
    510   asm volatile ( "icbi 0,%0" :: "r" (__address) );
    511 }
    512 
    513 static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
    514 static inline void _CPU_freeze_inst_cache ( void ) {}
    515 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    516 
    517 static inline void _CPU_enable_inst_cache (
    518         void )
    519 {
    520   unsigned32 r1;
    521   r1 = (0x2<<24);
    522   mtspr( 560, r1 );
    523   isync;
    524 }
    525 
    526 static inline void _CPU_disable_inst_cache (
    527         void )
    528 {
    529   unsigned32 r1;
    530   r1 = (0x4<<24);
    531   mtspr( 560, r1 );
    532   isync;
    533 }
    534 #endif
    535 
    536 #endif  /* !ASM */
    537386
    538387/*
  • c/src/lib/libcpu/powerpc/configure.in

    re4d7169f r61bd0301  
    7171mpc6xx/wrapup/Makefile
    7272shared/Makefile
     73shared/include/Makefile
     74shared/src/Makefile
    7375wrapup/Makefile)
  • c/src/lib/libcpu/powerpc/mpc8xx/clock/clock.c

    re4d7169f r61bd0301  
    3737 */
    3838
    39 #include <bsp.h>
     39#include <rtems.h>
    4040#include <clockdrv.h>
    4141#include <rtems/libio.h>
     
    4343#include <stdlib.h>                     /* for atexit() */
    4444#include <mpc8xx.h>
    45 
    46 extern rtems_cpu_table           Cpu_table;             /* owned by BSP */
    4745
    4846volatile rtems_unsigned32 Clock_driver_ticks;
     
    7977  Clock_driver_ticks = 0;
    8078 
    81   pit_value = (BSP_Configuration.microseconds_per_tick *
    82                Cpu_table.clicks_per_usec) - 1 ;
     79  pit_value = (rtems_configuration_get_microseconds_per_tick() *
     80               rtems_cpu_configuration_get_clicks_per_usec()) - 1 ;
    8381 
    8482  if (pit_value > 0xffff) {           /* pit is only 16 bits long */
    8583    rtems_fatal_error_occurred(-1);
    8684  } 
    87   if (BSP_Configuration.ticks_per_timeslice) {
    88    
    89     /*
    90      * initialize the interval here
    91      * First tick is set to right amount of time in the future
    92      * Future ticks will be incremented over last value set
    93      * in order to provide consistent clicks in the face of
    94      * interrupt overhead
    95      */
    96    
    97     rtems_interrupt_catch(clock_isr, PPC_IRQ_LVL0, &previous_isr);
    98    
    99     m8xx.sccr &= ~(1<<24);
    100     m8xx.pitc = pit_value;
    101    
    102     /* set PIT irq level, enable PIT, PIT interrupts */
    103     /*  and clear int. status */
    104     m8xx.piscr = M8xx_PISCR_PIRQ(0) |
    105       M8xx_PISCR_PTE | M8xx_PISCR_PS | M8xx_PISCR_PIE;
     85
     86  /*
     87   * initialize the interval here
     88   * First tick is set to right amount of time in the future
     89   * Future ticks will be incremented over last value set
     90   * in order to provide consistent clicks in the face of
     91   * interrupt overhead
     92   */
     93 
     94  rtems_interrupt_catch(clock_isr, PPC_IRQ_LVL0, &previous_isr);
     95 
     96  m8xx.sccr &= ~(1<<24);
     97  m8xx.pitc = pit_value;
     98 
     99  /* set PIT irq level, enable PIT, PIT interrupts */
     100  /*  and clear int. status */
     101  m8xx.piscr = M8xx_PISCR_PIRQ(0) |
     102    M8xx_PISCR_PTE | M8xx_PISCR_PS | M8xx_PISCR_PIE;
    106103   
    107104#ifdef EPPCBUG_SMC1
    108     simask_copy = m8xx.simask | M8xx_SIMASK_LVM0;
     105  simask_copy = m8xx.simask | M8xx_SIMASK_LVM0;
    109106#endif /* EPPCBUG_SMC1 */
    110     m8xx.simask |= M8xx_SIMASK_LVM0;
    111   }
     107  m8xx.simask |= M8xx_SIMASK_LVM0;
    112108  atexit(Clock_exit);
    113109}
     
    134130Clock_exit(void)
    135131{
    136   if ( BSP_Configuration.ticks_per_timeslice ) {
    137     /* disable PIT and PIT interrupts */
    138     m8xx.piscr &= ~(M8xx_PISCR_PTE | M8xx_PISCR_PIE);
    139    
    140     (void) set_vector(0, PPC_IRQ_LVL0, 1);
    141   }
     132  /* disable PIT and PIT interrupts */
     133  m8xx.piscr &= ~(M8xx_PISCR_PTE | M8xx_PISCR_PIE);
     134 
     135  (void) set_vector(0, PPC_IRQ_LVL0, 1);
    142136}
    143137
  • c/src/lib/libcpu/powerpc/mpc8xx/console-generic/console-generic.c

    re4d7169f r61bd0301  
    4747 */
    4848
    49 #include <bsp.h>
     49#include <rtems.h>
    5050#include <rtems/libio.h>
    5151#include <mpc8xx.h>
    5252#include <mpc8xx/console.h>
     53#include <mpc8xx/cpm.h>
    5354#include <stdlib.h>
    5455#include <unistd.h>
     
    400401    if ((RxBd[SCC2_MINOR]->status & M8xx_BD_EMPTY) == 0) {
    401402      rtems_invalidate_multiple_data_cache_lines(
    402         RxBd[SCC2_MINOR]->buffer,
     403        (const void *) RxBd[SCC2_MINOR]->buffer,
    403404        RxBd[SCC2_MINOR]->length );
    404405      nb_overflow = rtems_termios_enqueue_raw_characters(
     
    443444    if ((RxBd[SCC3_MINOR]->status & M8xx_BD_EMPTY) == 0) {
    444445      rtems_invalidate_multiple_data_cache_lines(
    445         RxBd[SCC3_MINOR]->buffer,
     446        (const void *) RxBd[SCC3_MINOR]->buffer,
    446447        RxBd[SCC3_MINOR]->length );
    447448      nb_overflow = rtems_termios_enqueue_raw_characters(
     
    485486    if ((RxBd[SCC4_MINOR]->status & M8xx_BD_EMPTY) == 0) {
    486487      rtems_invalidate_multiple_data_cache_lines(
    487         RxBd[SCC4_MINOR]->buffer,
     488        (const void *) RxBd[SCC4_MINOR]->buffer,
    488489        RxBd[SCC4_MINOR]->length );
    489490      nb_overflow = rtems_termios_enqueue_raw_characters(
     
    527528    if ((RxBd[SMC1_MINOR]->status & M8xx_BD_EMPTY) == 0) {
    528529      rtems_invalidate_multiple_data_cache_lines(
    529         RxBd[SMC1_MINOR]->buffer,
     530        (const void *) RxBd[SMC1_MINOR]->buffer,
    530531        RxBd[SMC1_MINOR]->length );
    531532      nb_overflow = rtems_termios_enqueue_raw_characters(
     
    569570    if ((RxBd[SMC2_MINOR]->status & M8xx_BD_EMPTY) == 0) {
    570571      rtems_invalidate_multiple_data_cache_lines(
    571         RxBd[SMC2_MINOR]->buffer,
     572        (const void *) RxBd[SMC2_MINOR]->buffer,
    572573        RxBd[SMC2_MINOR]->length );
    573574      nb_overflow = rtems_termios_enqueue_raw_characters(
     
    984985    return -1;
    985986  }
    986   _CPU_Data_Cache_Block_Invalidate( RxBd[minor]->buffer );
     987  rtems_invalidate_multiple_data_cache_lines(
     988    (const void *) RxBd[minor]->buffer,
     989    RxBd[minor]->length
     990  );
    987991  c = ((char *)RxBd[minor]->buffer)[0];
    988992  RxBd[minor]->status = M8xx_BD_EMPTY | M8xx_BD_WRAP;
     
    10201024      continue;
    10211025    txBuf[minor] = *buf++;
    1022     _CPU_Data_Cache_Block_Flush( &txBuf[minor] );
     1026    rtems_flush_multiple_data_cache_lines(
     1027       (const void *) TxBd[minor]->buffer,
     1028       TxBd[minor]->length
     1029    );
    10231030    TxBd[minor]->buffer = &txBuf[minor];
    10241031    TxBd[minor]->length = 1;
  • c/src/lib/libcpu/powerpc/mpc8xx/cpm/cp.c

    re4d7169f r61bd0301  
    1313 */
    1414
    15 #include <bsp.h>
    16 #include <rtems/rtems/intr.h>
    17 #include <rtems/error.h>
     15#include <rtems.h>
     16#include <mpc8xx.h>
     17#include <mpc8xx/cpm.h>
    1818
    1919/*
    2020 * Send a command to the CPM RISC processer
    2121 */
     22
    2223void m8xx_cp_execute_cmd( unsigned16 command )
    2324{
  • c/src/lib/libcpu/powerpc/mpc8xx/cpm/dpram.c

    re4d7169f r61bd0301  
    1414 */
    1515
    16 #include <bsp.h>
    17 #include <rtems/rtems/intr.h>
    18 #include <rtems/error.h>
     16#include <rtems.h>
     17#include <mpc8xx.h>
     18#include <mpc8xx/cpm.h>
    1919
    2020/*
  • c/src/lib/libcpu/powerpc/mpc8xx/include/cpm.h

    re4d7169f r61bd0301  
    1919#endif
    2020
    21 #include <bsp.h>
    22 
    2321/* Functions */
    2422
  • c/src/lib/libcpu/powerpc/mpc8xx/include/mmu.h

    re4d7169f r61bd0301  
    1818extern "C" {
    1919#endif
    20 
    21 #include <bsp.h>
    2220
    2321/*
  • c/src/lib/libcpu/powerpc/mpc8xx/mmu/mmu.c

    re4d7169f r61bd0301  
    1212 */
    1313
    14 #include <bsp.h>
     14#include <rtems.h>
     15#include <mpc8xx.h>
    1516#include <mpc8xx/mmu.h>
    1617
  • c/src/lib/libcpu/powerpc/mpc8xx/timer/timer.c

    re4d7169f r61bd0301  
    4444 */
    4545
    46 #include <bsp.h>
    4746#include <rtems.h>
    4847#include <mpc8xx.h>
    49 
    50 extern rtems_cpu_table           Cpu_table;             /* owned by BSP */
    5148
    5249static volatile rtems_unsigned32 Timer_starting;
     
    8784
    8885  else {
    89     if ( total < Cpu_table.timer_least_valid ) {
     86    if ( total < rtems_cpu_configuration_get_timer_least_valid() ) {
    9087      return 0;            /* below timer resolution */
    9188    }
    92     return (total - Cpu_table.timer_average_overhead);
     89    return (total - rtems_cpu_configuration_get_timer_average_overhead());
    9390  }
    9491}
  • cpukit/score/cpu/powerpc/rtems/score/ppc.h

    re4d7169f r61bd0301  
    384384#error "Undefined power of 2 for PPC_CACHE_ALIGNMENT"
    385385#endif
    386 
    387 #ifndef ASM
    388 
    389 /*
    390  * CACHE MANAGER: The following functions are CPU-specific.
    391  * They provide the basic implementation for the rtems_* cache
    392  * management routines. If a given function has no meaning for the CPU,
    393  * it does nothing by default.
    394  *
    395  * FIXME: Some functions simply have not been implemented.
    396  */
    397  
    398 #if defined(ppc603)                     /* And possibly others */
    399 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    400 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    401 
    402 /* Helpful macros */
    403 #define PPC_Get_HID0( _value ) \
    404   do { \
    405       _value = 0;        /* to avoid warnings */ \
    406       asm volatile( \
    407           "mfspr %0, 0x3f0;"     /* get HID0 */ \
    408           "isync" \
    409           : "=r" (_value) \
    410           : "0" (_value) \
    411       ); \
    412   } while (0)
    413 
    414 #define PPC_Set_HID0( _value ) \
    415   do { \
    416       asm volatile( \
    417           "isync;" \
    418           "mtspr 0x3f0, %0;"     /* load HID0 */ \
    419           "isync" \
    420           : "=r" (_value) \
    421           : "0" (_value) \
    422       ); \
    423   } while (0)
    424 
    425 static inline void _CPU_enable_data_cache (
    426         void )
    427 {
    428   unsigned32 value;
    429   PPC_Get_HID0( value );
    430   value |= 0x00004000;        /* set DCE bit */
    431   PPC_Set_HID0( value );
    432 }
    433 
    434 static inline void _CPU_disable_data_cache (
    435         void )
    436 {
    437   unsigned32 value;
    438   PPC_Get_HID0( value );
    439   value &= 0xFFFFBFFF;        /* clear DCE bit */
    440   PPC_Set_HID0( value );
    441 }
    442 
    443 static inline void _CPU_enable_inst_cache (
    444         void )
    445 {
    446   unsigned32 value;
    447   PPC_Get_HID0( value );
    448   value |= 0x00008000;       /* Set ICE bit */
    449   PPC_Set_HID0( value );
    450 }
    451 
    452 static inline void _CPU_disable_inst_cache (
    453         void )
    454 {
    455   unsigned32 value;
    456   PPC_Get_HID0( value );
    457   value &= 0xFFFF7FFF;       /* Clear ICE bit */
    458   PPC_Set_HID0( value );
    459 }
    460 
    461 #elif ( defined(mpc860) || defined(mpc821) )
    462 
    463 #define _CPU_DATA_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    464 #define _CPU_INST_CACHE_ALIGNMENT PPC_CACHE_ALIGNMENT
    465 
    466 #define mtspr(_spr,_reg)   __asm__ volatile ( "mtspr %0, %1\n" : : "i" ((_spr)), "r" ((_reg)) )
    467 #define isync   __asm__ volatile ("isync\n"::)
    468 
    469 static inline void _CPU_flush_1_data_cache_line(
    470         const void * _address )
    471 {
    472   register const void *__address = _address;
    473   asm volatile ( "dcbf 0,%0" :: "r" (__address) );
    474 }
    475 
    476 static inline void _CPU_invalidate_1_data_cache_line(
    477         const void * _address )
    478 {
    479   register const void *__address = _address;
    480   asm volatile ( "dcbi 0,%0" :: "r" (__address) );
    481 }
    482 
    483 static inline void _CPU_flush_entire_data_cache ( void ) {}
    484 static inline void _CPU_invalidate_entire_data_cache ( void ) {}
    485 static inline void _CPU_freeze_data_cache ( void ) {}
    486 static inline void _CPU_unfreeze_data_cache ( void ) {}
    487 
    488 static inline void _CPU_enable_data_cache (
    489         void )
    490 {
    491   unsigned32 r1;
    492   r1 = (0x2<<24);
    493   mtspr( 568, r1 );
    494   isync;
    495 }
    496 
    497 static inline void _CPU_disable_data_cache (
    498         void )
    499 {
    500   unsigned32 r1;
    501   r1 = (0x4<<24);
    502   mtspr( 568, r1 );
    503   isync;
    504 }
    505 
    506 static inline void _CPU_invalidate_1_inst_cache_line(
    507         const void * _address )
    508 {
    509   register const void *__address = _address;
    510   asm volatile ( "icbi 0,%0" :: "r" (__address) );
    511 }
    512 
    513 static inline void _CPU_invalidate_entire_inst_cache ( void ) {}
    514 static inline void _CPU_freeze_inst_cache ( void ) {}
    515 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    516 
    517 static inline void _CPU_enable_inst_cache (
    518         void )
    519 {
    520   unsigned32 r1;
    521   r1 = (0x2<<24);
    522   mtspr( 560, r1 );
    523   isync;
    524 }
    525 
    526 static inline void _CPU_disable_inst_cache (
    527         void )
    528 {
    529   unsigned32 r1;
    530   r1 = (0x4<<24);
    531   mtspr( 560, r1 );
    532   isync;
    533 }
    534 #endif
    535 
    536 #endif  /* !ASM */
    537386
    538387/*
Note: See TracChangeset for help on using the changeset viewer.