Changeset cf1f72e in rtems for cpukit


Ignore:
Timestamp:
Jun 13, 2000, 9:53:38 PM (20 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
7d52750
Parents:
f0b11d63
Message:

Moved i386 and m68k cache management code to libcpu. Everything
now is an implementation of the prototypes in rtems/rtems/cache.h.
The libcpu/i386/wrapup directory is no longer needed.
The PowerPC needs this done to it.

Location:
cpukit
Files:
1 deleted
5 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libcsupport/src/malloc.c

    rf0b11d63 rcf1f72e  
    420420}
    421421
    422 
    423 /*
    424  *  rtems_cache_aligned_malloc
    425  *
    426  *  DESCRIPTION:
    427  *
    428  *  This function is used to allocate storage that spans an
    429  *  integral number of cache blocks.
    430  */
    431 RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
    432   size_t nbytes
    433 )
    434 {
    435   /*
    436    * Arrange to have the user storage start on the first cache
    437    * block beyond the header.
    438    */
    439   return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
    440                                                                         + _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
    441 }
    442 
    443 #endif
    444 
     422#endif
  • cpukit/rtems/include/rtems.h

    rf0b11d63 rcf1f72e  
    4343#include <rtems/rtems/tasks.h>
    4444#include <rtems/rtems/intr.h>
     45#include <rtems/rtems/cache.h>
    4546#include <rtems/rtems/clock.h>
    4647#include <rtems/extension.h>
  • cpukit/rtems/src/Makefile.am

    rf0b11d63 rcf1f72e  
    5151    dpmemident.c dpmeminternal2external.c
    5252
    53 STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
     53STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
    5454    $(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
    5555    $(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
  • cpukit/score/cpu/i386/rtems/score/i386.h

    rf0b11d63 rcf1f72e  
    186186}
    187187
    188 /*
    189  * Disable the entire cache
    190  */
    191 void _CPU_disable_cache() {
    192   cr0 regCr0;
    193 
    194   regCr0.i = i386_get_cr0();
    195   regCr0.cr0.page_level_cache_disable = 1;
    196   regCr0.cr0.no_write_through = 1;
    197   i386_set_cr0( regCr0.i );
    198   rtems_flush_entire_data_cache();
    199 }
    200 
    201 /*
    202  * Enable the entire cache
    203  */
    204 static inline void _CPU_enable_cache() {
    205   cr0 regCr0;
    206 
    207   regCr0.i = i386_get_cr0();
    208   regCr0.cr0.page_level_cache_disable = 0;
    209   regCr0.cr0.no_write_through = 0;
    210   i386_set_cr0( regCr0.i );
    211   /*rtems_flush_entire_data_cache();*/
    212 }
    213 
    214 /*
    215  * CACHE MANAGER: The following functions are CPU-specific.
    216  * They provide the basic implementation for the rtems_* cache
    217  * management routines. If a given function has no meaning for the CPU,
    218  * it does nothing by default.
    219  *
    220  * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
    221  *        each CPU. The routines below should be implemented per CPU,
    222  *        to accomodate the capabilities of each.
    223  */
    224 
    225 /* FIXME: I don't belong here. */
    226 #define I386_CACHE_ALIGNMENT 16
    227 
    228 #if defined(I386_CACHE_ALIGNMENT)
    229 #define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
    230 #define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
    231 
    232 static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
    233 static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
    234 static inline void _CPU_freeze_data_cache (void) {}
    235 static inline void _CPU_unfreeze_data_cache (void) {}
    236 static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
    237 static inline void _CPU_freeze_inst_cache (void) {}
    238 static inline void _CPU_unfreeze_inst_cache (void) {}
    239 
    240 static inline void _CPU_flush_entire_data_cache (
    241   const void * d_addr )
    242 {
    243   asm ("wbinvd");
    244 }
    245 static inline void _CPU_invalidate_entire_data_cache (
    246   const void * d_addr )
    247 {
    248   asm ("invd");
    249 }
    250 
    251 static inline void _CPU_enable_data_cache (
    252         void )
    253 {
    254         _CPU_enable_cache();
    255 }
    256 
    257 static inline void _CPU_disable_data_cache (
    258         void )
    259 {
    260         _CPU_disable_cache();
    261 }
    262 
    263 static inline void _CPU_invalidate_entire_inst_cache (
    264   const void * i_addr )
    265 {
    266   asm ("invd");
    267 }
    268 
    269 static inline void _CPU_enable_inst_cache (
    270         void )
    271 {
    272         _CPU_enable_cache();
    273 }
    274 
    275 static inline void _CPU_disable_inst_cache (
    276         void )
    277 {
    278         _CPU_disable_cache();
    279 }
    280 #endif
    281 
    282 
    283188/* routines */
    284189
  • cpukit/score/cpu/m68k/rtems/score/m68k.h

    rf0b11d63 rcf1f72e  
    374374
    375375
    376 /* 
    377  *  Since the cacr is common to all mc680x0, provide macros
    378  *  for masking values in that register.
    379  */
    380 
    381 /*
    382  *  Used to clear bits in the cacr.
    383  */
    384 #define _CPU_CACR_AND(mask)                                        \
    385   {                                                                \
    386   register unsigned long _value = mask;                            \
    387   register unsigned long _ctl = 0;                                 \
    388   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    389                   andl %2, %0;                /* and with _val */  \
    390                   movec %1, %%cacr"           /* write the cacr */ \
    391    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    392   }
    393 
    394 
    395 /* 
    396  *  Used to set bits in the cacr.
    397  */
    398 #define _CPU_CACR_OR(mask)                                         \
    399         {                                                                \
    400   register unsigned long _value = mask;                            \
    401   register unsigned long _ctl = 0;                                 \
    402   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    403                   orl %2, %0;                 /* or with _val */   \
    404                   movec %1, %%cacr"           /* write the cacr */ \
    405    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    406   }
    407 
    408    
    409 /*
    410  * CACHE MANAGER: The following functions are CPU-specific.
    411  * They provide the basic implementation for the rtems_* cache
    412  * management routines. If a given function has no meaning for the CPU,
    413  * it does nothing by default.
    414  */
    415 #if ( defined(__mc68020__) || defined(__mc68030__) )
    416 #define M68K_INST_CACHE_ALIGNMENT 16
    417 
    418 #if defined(__mc68030__)
    419 #define M68K_DATA_CACHE_ALIGNMENT 16
    420 
    421 /* Only the mc68030 has a data cache; it is writethrough only. */
    422 
    423 static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
    424 static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
    425 
    426 static inline void _CPU_invalidate_1_data_cache_line (
    427   const void * d_addr )
    428 {
    429   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    430   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    431   _CPU_CACR_OR(0x00000400);
    432 }
    433 
    434 static inline void _CPU_invalidate_entire_data_cache (
    435         void )
    436 {
    437   _CPU_CACR_OR( 0x00000800 );
    438 }
    439 
    440 static inline void _CPU_freeze_data_cache (
    441         void )
    442 {
    443   _CPU_CACR_OR( 0x00000200 );
    444 }
    445 
    446 static inline void _CPU_unfreeze_data_cache (
    447         void )
    448 {
    449   _CPU_CACR_AND( 0xFFFFFDFF );
    450 }
    451 
    452 static inline void _CPU_enable_data_cache (     void )
    453 {
    454   _CPU_CACR_OR( 0x00000100 );
    455 }
    456 static inline void _CPU_disable_data_cache (    void )
    457 {
    458   _CPU_CACR_AND( 0xFFFFFEFF );
    459 }
    460 #endif
    461 
    462 
    463 /* Both the 68020 and 68030 have instruction caches */
    464 
    465 static inline void _CPU_invalidate_1_inst_cache_line (
    466   const void * d_addr )
    467 {
    468   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    469   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    470   _CPU_CACR_OR( 0x00000004 );
    471 }
    472 
    473 static inline void _CPU_invalidate_entire_inst_cache (
    474         void )
    475 {
    476   _CPU_CACR_OR( 0x00000008 );
    477 }
    478 
    479 static inline void _CPU_freeze_inst_cache (
    480         void )
    481 {
    482   _CPU_CACR_OR( 0x00000002);
    483 }
    484 
    485 static inline void _CPU_unfreeze_inst_cache (
    486         void )
    487 {
    488   _CPU_CACR_AND( 0xFFFFFFFD );
    489 }
    490 
    491 static inline void _CPU_enable_inst_cache (     void )
    492 {
    493   _CPU_CACR_OR( 0x00000001 );
    494 }
    495 
    496 static inline void _CPU_disable_inst_cache (    void )
    497 {
    498   _CPU_CACR_AND( 0xFFFFFFFE );
    499 }
    500 
    501 
    502 #elif ( defined(__mc68040__) || defined (__mc68060__) )
    503 
    504 #define M68K_INST_CACHE_ALIGNMENT 16
    505 #define M68K_DATA_CACHE_ALIGNMENT 16
    506 
    507 /* Cannot be frozen */
    508 static inline void _CPU_freeze_data_cache ( void ) {}
    509 static inline void _CPU_unfreeze_data_cache ( void ) {}
    510 static inline void _CPU_freeze_inst_cache ( void ) {}
    511 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    512 
    513 static inline void _CPU_flush_1_data_cache_line (
    514   const void * d_addr )
    515 {
    516   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    517   asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
    518 }
    519 
    520 static inline void _CPU_invalidate_1_data_cache_line (
    521   const void * d_addr )
    522 {
    523   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    524   asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
    525 }
    526 
    527 static inline void _CPU_flush_entire_data_cache (
    528         void )
    529 {
    530         asm volatile ( "cpusha %%dc" :: );
    531 }
    532 
    533 static inline void _CPU_invalidate_entire_data_cache (
    534         void )
    535 {
    536         asm volatile ( "cinva %%dc" :: );
    537 }
    538 
    539 static inline void _CPU_enable_data_cache (
    540         void )
    541 {
    542   _CPU_CACR_OR( 0x80000000 );
    543 }
    544 
    545 static inline void _CPU_disable_data_cache (
    546         void )
    547 {
    548   _CPU_CACR_AND( 0x7FFFFFFF );
    549 }
    550 
    551 static inline void _CPU_invalidate_1_inst_cache_line (
    552   const void * i_addr )
    553 {
    554   void * p_address = (void *)  _CPU_virtual_to_physical( i_addr );
    555   asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
    556 }
    557 
    558 static inline void _CPU_invalidate_entire_inst_cache (
    559         void )
    560 {
    561                 asm volatile ( "cinva %%ic" :: );
    562 }
    563 
    564 static inline void _CPU_enable_inst_cache (
    565         void )
    566 {
    567   _CPU_CACR_OR( 0x00008000 );
    568 }
    569 
    570 static inline void _CPU_disable_inst_cache (
    571         void )
    572 {
    573         _CPU_CACR_AND( 0xFFFF7FFF );
    574 }
    575 #endif
    576 
    577 
    578 #if defined(M68K_DATA_CACHE_ALIGNMENT)
    579 #define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
    580 #endif
    581 
    582 #if defined(M68K_INST_CACHE_ALIGNMENT)
    583 #define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
    584 #endif
    585 
    586 
    587376#endif  /* !ASM */
    588377
Note: See TracChangeset for help on using the changeset viewer.