Changeset cf1f72e in rtems


Ignore:
Timestamp:
06/13/00 21:53:38 (23 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
7d52750
Parents:
f0b11d63
Message:

Moved i386 and m68k cache management code to libcpu. Everything
now is an implementation of the prototypes in rtems/rtems/cache.h.
The libcpu/i386/wrapup directory is no longer needed.
The PowerPC needs this done to it.

Files:
8 added
2 deleted
19 edited
2 moved

Legend:

Unmodified
Added
Removed
  • c/src/exec/libcsupport/src/malloc.c

    rf0b11d63 rcf1f72e  
    420420}
    421421
    422 
    423 /*
    424  *  rtems_cache_aligned_malloc
    425  *
    426  *  DESCRIPTION:
    427  *
    428  *  This function is used to allocate storage that spans an
    429  *  integral number of cache blocks.
    430  */
    431 RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
    432   size_t nbytes
    433 )
    434 {
    435   /*
    436    * Arrange to have the user storage start on the first cache
    437    * block beyond the header.
    438    */
    439   return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
    440                                                                         + _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
    441 }
    442 
    443 #endif
    444 
     422#endif
  • c/src/exec/rtems/include/rtems.h

    rf0b11d63 rcf1f72e  
    4343#include <rtems/rtems/tasks.h>
    4444#include <rtems/rtems/intr.h>
     45#include <rtems/rtems/cache.h>
    4546#include <rtems/rtems/clock.h>
    4647#include <rtems/extension.h>
  • c/src/exec/rtems/src/Makefile.am

    rf0b11d63 rcf1f72e  
    5151    dpmemident.c dpmeminternal2external.c
    5252
    53 STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
     53STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
    5454    $(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
    5555    $(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
  • c/src/exec/score/cpu/i386/rtems/score/i386.h

    rf0b11d63 rcf1f72e  
    186186}
    187187
    188 /*
    189  * Disable the entire cache
    190  */
    191 void _CPU_disable_cache() {
    192   cr0 regCr0;
    193 
    194   regCr0.i = i386_get_cr0();
    195   regCr0.cr0.page_level_cache_disable = 1;
    196   regCr0.cr0.no_write_through = 1;
    197   i386_set_cr0( regCr0.i );
    198   rtems_flush_entire_data_cache();
    199 }
    200 
    201 /*
    202  * Enable the entire cache
    203  */
    204 static inline void _CPU_enable_cache() {
    205   cr0 regCr0;
    206 
    207   regCr0.i = i386_get_cr0();
    208   regCr0.cr0.page_level_cache_disable = 0;
    209   regCr0.cr0.no_write_through = 0;
    210   i386_set_cr0( regCr0.i );
    211   /*rtems_flush_entire_data_cache();*/
    212 }
    213 
    214 /*
    215  * CACHE MANAGER: The following functions are CPU-specific.
    216  * They provide the basic implementation for the rtems_* cache
    217  * management routines. If a given function has no meaning for the CPU,
    218  * it does nothing by default.
    219  *
    220  * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
    221  *        each CPU. The routines below should be implemented per CPU,
    222  *        to accomodate the capabilities of each.
    223  */
    224 
    225 /* FIXME: I don't belong here. */
    226 #define I386_CACHE_ALIGNMENT 16
    227 
    228 #if defined(I386_CACHE_ALIGNMENT)
    229 #define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
    230 #define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
    231 
    232 static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
    233 static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
    234 static inline void _CPU_freeze_data_cache (void) {}
    235 static inline void _CPU_unfreeze_data_cache (void) {}
    236 static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
    237 static inline void _CPU_freeze_inst_cache (void) {}
    238 static inline void _CPU_unfreeze_inst_cache (void) {}
    239 
    240 static inline void _CPU_flush_entire_data_cache (
    241   const void * d_addr )
    242 {
    243   asm ("wbinvd");
    244 }
    245 static inline void _CPU_invalidate_entire_data_cache (
    246   const void * d_addr )
    247 {
    248   asm ("invd");
    249 }
    250 
    251 static inline void _CPU_enable_data_cache (
    252         void )
    253 {
    254         _CPU_enable_cache();
    255 }
    256 
    257 static inline void _CPU_disable_data_cache (
    258         void )
    259 {
    260         _CPU_disable_cache();
    261 }
    262 
    263 static inline void _CPU_invalidate_entire_inst_cache (
    264   const void * i_addr )
    265 {
    266   asm ("invd");
    267 }
    268 
    269 static inline void _CPU_enable_inst_cache (
    270         void )
    271 {
    272         _CPU_enable_cache();
    273 }
    274 
    275 static inline void _CPU_disable_inst_cache (
    276         void )
    277 {
    278         _CPU_disable_cache();
    279 }
    280 #endif
    281 
    282 
    283188/* routines */
    284189
  • c/src/exec/score/cpu/m68k/rtems/score/m68k.h

    rf0b11d63 rcf1f72e  
    374374
    375375
    376 /* 
    377  *  Since the cacr is common to all mc680x0, provide macros
    378  *  for masking values in that register.
    379  */
    380 
    381 /*
    382  *  Used to clear bits in the cacr.
    383  */
    384 #define _CPU_CACR_AND(mask)                                        \
    385   {                                                                \
    386   register unsigned long _value = mask;                            \
    387   register unsigned long _ctl = 0;                                 \
    388   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    389                   andl %2, %0;                /* and with _val */  \
    390                   movec %1, %%cacr"           /* write the cacr */ \
    391    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    392   }
    393 
    394 
    395 /* 
    396  *  Used to set bits in the cacr.
    397  */
    398 #define _CPU_CACR_OR(mask)                                         \
    399         {                                                                \
    400   register unsigned long _value = mask;                            \
    401   register unsigned long _ctl = 0;                                 \
    402   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    403                   orl %2, %0;                 /* or with _val */   \
    404                   movec %1, %%cacr"           /* write the cacr */ \
    405    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    406   }
    407 
    408    
    409 /*
    410  * CACHE MANAGER: The following functions are CPU-specific.
    411  * They provide the basic implementation for the rtems_* cache
    412  * management routines. If a given function has no meaning for the CPU,
    413  * it does nothing by default.
    414  */
    415 #if ( defined(__mc68020__) || defined(__mc68030__) )
    416 #define M68K_INST_CACHE_ALIGNMENT 16
    417 
    418 #if defined(__mc68030__)
    419 #define M68K_DATA_CACHE_ALIGNMENT 16
    420 
    421 /* Only the mc68030 has a data cache; it is writethrough only. */
    422 
    423 static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
    424 static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
    425 
    426 static inline void _CPU_invalidate_1_data_cache_line (
    427   const void * d_addr )
    428 {
    429   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    430   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    431   _CPU_CACR_OR(0x00000400);
    432 }
    433 
    434 static inline void _CPU_invalidate_entire_data_cache (
    435         void )
    436 {
    437   _CPU_CACR_OR( 0x00000800 );
    438 }
    439 
    440 static inline void _CPU_freeze_data_cache (
    441         void )
    442 {
    443   _CPU_CACR_OR( 0x00000200 );
    444 }
    445 
    446 static inline void _CPU_unfreeze_data_cache (
    447         void )
    448 {
    449   _CPU_CACR_AND( 0xFFFFFDFF );
    450 }
    451 
    452 static inline void _CPU_enable_data_cache (     void )
    453 {
    454   _CPU_CACR_OR( 0x00000100 );
    455 }
    456 static inline void _CPU_disable_data_cache (    void )
    457 {
    458   _CPU_CACR_AND( 0xFFFFFEFF );
    459 }
    460 #endif
    461 
    462 
    463 /* Both the 68020 and 68030 have instruction caches */
    464 
    465 static inline void _CPU_invalidate_1_inst_cache_line (
    466   const void * d_addr )
    467 {
    468   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    469   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    470   _CPU_CACR_OR( 0x00000004 );
    471 }
    472 
    473 static inline void _CPU_invalidate_entire_inst_cache (
    474         void )
    475 {
    476   _CPU_CACR_OR( 0x00000008 );
    477 }
    478 
    479 static inline void _CPU_freeze_inst_cache (
    480         void )
    481 {
    482   _CPU_CACR_OR( 0x00000002);
    483 }
    484 
    485 static inline void _CPU_unfreeze_inst_cache (
    486         void )
    487 {
    488   _CPU_CACR_AND( 0xFFFFFFFD );
    489 }
    490 
    491 static inline void _CPU_enable_inst_cache (     void )
    492 {
    493   _CPU_CACR_OR( 0x00000001 );
    494 }
    495 
    496 static inline void _CPU_disable_inst_cache (    void )
    497 {
    498   _CPU_CACR_AND( 0xFFFFFFFE );
    499 }
    500 
    501 
    502 #elif ( defined(__mc68040__) || defined (__mc68060__) )
    503 
    504 #define M68K_INST_CACHE_ALIGNMENT 16
    505 #define M68K_DATA_CACHE_ALIGNMENT 16
    506 
    507 /* Cannot be frozen */
    508 static inline void _CPU_freeze_data_cache ( void ) {}
    509 static inline void _CPU_unfreeze_data_cache ( void ) {}
    510 static inline void _CPU_freeze_inst_cache ( void ) {}
    511 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    512 
    513 static inline void _CPU_flush_1_data_cache_line (
    514   const void * d_addr )
    515 {
    516   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    517   asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
    518 }
    519 
    520 static inline void _CPU_invalidate_1_data_cache_line (
    521   const void * d_addr )
    522 {
    523   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    524   asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
    525 }
    526 
    527 static inline void _CPU_flush_entire_data_cache (
    528         void )
    529 {
    530         asm volatile ( "cpusha %%dc" :: );
    531 }
    532 
    533 static inline void _CPU_invalidate_entire_data_cache (
    534         void )
    535 {
    536         asm volatile ( "cinva %%dc" :: );
    537 }
    538 
    539 static inline void _CPU_enable_data_cache (
    540         void )
    541 {
    542   _CPU_CACR_OR( 0x80000000 );
    543 }
    544 
    545 static inline void _CPU_disable_data_cache (
    546         void )
    547 {
    548   _CPU_CACR_AND( 0x7FFFFFFF );
    549 }
    550 
    551 static inline void _CPU_invalidate_1_inst_cache_line (
    552   const void * i_addr )
    553 {
    554   void * p_address = (void *)  _CPU_virtual_to_physical( i_addr );
    555   asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
    556 }
    557 
    558 static inline void _CPU_invalidate_entire_inst_cache (
    559         void )
    560 {
    561                 asm volatile ( "cinva %%ic" :: );
    562 }
    563 
    564 static inline void _CPU_enable_inst_cache (
    565         void )
    566 {
    567   _CPU_CACR_OR( 0x00008000 );
    568 }
    569 
    570 static inline void _CPU_disable_inst_cache (
    571         void )
    572 {
    573         _CPU_CACR_AND( 0xFFFF7FFF );
    574 }
    575 #endif
    576 
    577 
    578 #if defined(M68K_DATA_CACHE_ALIGNMENT)
    579 #define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
    580 #endif
    581 
    582 #if defined(M68K_INST_CACHE_ALIGNMENT)
    583 #define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
    584 #endif
    585 
    586 
    587376#endif  /* !ASM */
    588377
  • c/src/lib/libbsp/i386/i386ex/wrapup/Makefile.am

    rf0b11d63 rcf1f72e  
    1414
    1515# bummer; have to use $foreach since % pattern subst rules only replace 1x
    16 OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o))
     16OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
     17       $(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
    1718LIB = $(ARCH)/libbsp.a
    1819
  • c/src/lib/libbsp/i386/pc386/wrapup/Makefile.am

    rf0b11d63 rcf1f72e  
    1414
    1515# bummer; have to use $foreach since % pattern subst rules only replace 1x
    16 OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o))
     16OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
     17       $(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
    1718LIB = $(ARCH)/libbsp.a
    1819
  • c/src/lib/libbsp/i386/ts_386ex/wrapup/Makefile.am

    rf0b11d63 rcf1f72e  
    1414
    1515# bummer; have to use $foreach since % pattern subst rules only replace 1x
    16 OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o))
     16OBJS = $(foreach piece, $(BSP_FILES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
     17       $(wildcard ../../../../libcpu/i386/$(ARCH)/*.o)
    1718LIB = $(ARCH)/libbsp.a
    1819
  • c/src/lib/libbsp/m68k/mvme167/wrapup/Makefile.am

    rf0b11d63 rcf1f72e  
    1414# bummer; have to use $foreach since % pattern subst rules only replace 1x
    1515OBJS = $(foreach piece, $(BSP_PIECES), $(wildcard ../$(piece)/$(ARCH)/*.o)) \
     16    $(wildcard ../../../../libcpu/$(RTEMS_CPU)/shared/*/$(ARCH)/*.o) \
    1617    $(wildcard ../../../../libcpu/$(RTEMS_CPU)/$(RTEMS_CPU_MODEL)/fpsp/$(ARCH)/fpsp.rel)
    1718
  • c/src/lib/libc/malloc.c

    rf0b11d63 rcf1f72e  
    420420}
    421421
    422 
    423 /*
    424  *  rtems_cache_aligned_malloc
    425  *
    426  *  DESCRIPTION:
    427  *
    428  *  This function is used to allocate storage that spans an
    429  *  integral number of cache blocks.
    430  */
    431 RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
    432   size_t nbytes
    433 )
    434 {
    435   /*
    436    * Arrange to have the user storage start on the first cache
    437    * block beyond the header.
    438    */
    439   return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
    440                                                                         + _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
    441 }
    442 
    443 #endif
    444 
     422#endif
  • c/src/lib/libcpu/i386/Makefile.am

    rf0b11d63 rcf1f72e  
    66ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal
    77
    8 LIBNAME = libcpu
    9 LIB = $(ARCH)/$(LIBNAME).a
     8VPATH = @srcdir@:@srcdir@/../shared/src
    109
    11 C_FILES = cpu.c displayCpu.c page.c
     10C_FILES = cache.c cache_aligned_malloc.c cache_manager.c displayCpu.c idt.c page.c
    1211C_O_FILES = $(C_FILES:%.c=$(ARCH)/%.o)
    1312
    14 H_FILES = cpu.h registers.h cpuModel.h
     13H_FILES = cache_.h
     14INSTALLED_H_FILES = cpu.h registers.h cpuModel.h
    1515
    16 S_FILES = cpu_asm.S cpuModel.S
     16S_FILES = cpuModel.S idtr.S
    1717S_O_FILES = $(S_FILES:%.S=$(ARCH)/%.o)
    1818
     
    2222include $(top_srcdir)/../../../../../automake/lib.am
    2323
    24 $(LIB): $(OBJS)
    25         $(make-library)
     24AM_CPPFLAGS += -I$(srcdir)
    2625
    2726$(PROJECT_INCLUDE)/libcpu:
     
    3130        $(INSTALL_DATA) $< $@
    3231
    33 $(PROJECT_RELEASE)/lib/$(LIBNAME)$(LIB_VARIANT).a: $(LIB)
     32$(PROJECT_INCLUDE)/libcpu/cache.h: $(top_srcdir)/../shared/include/cache.h
    3433        $(INSTALL_DATA) $< $@
    3534
    3635PREINSTALL_FILES += $(PROJECT_INCLUDE)/libcpu \
    37     $(H_FILES:%=$(PROJECT_INCLUDE)/libcpu/%)
     36    $(PROJECT_INCLUDE)/libcpu/cache.h \
     37    $(INSTALLED_H_FILES:%=$(PROJECT_INCLUDE)/libcpu/%)
    3838
    39 TMPINSTALL_FILES += $(PROJECT_RELEASE)/lib/$(LIBNAME)$(LIB_VARIANT).a
     39all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS)
    4040
    41 all-local: $(ARCH) $(PREINSTALL_FILES) $(OBJS) $(LIB) $(TMPINSTALL_FILES)
    42 
    43 .PRECIOUS: $(LIB)
    44 
    45 EXTRA_DIST = cpu.c cpu.h cpuModel.S cpuModel.h cpu_asm.S displayCpu.c page.c \
    46     registers.h
     41EXTRA_DIST = cache.c cache_.h cpu.h cpuModel.S cpuModel.h \
     42    displayCpu.c idt.c idtr.S page.c registers.h
    4743
    4844include $(top_srcdir)/../../../../../automake/local.am
  • c/src/lib/libcpu/m68k/Makefile.am

    rf0b11d63 rcf1f72e  
    66ACLOCAL_AMFLAGS = -I $(RTEMS_TOPdir)/aclocal
    77
    8 SUBDIRS = m68040
     8if shared
     9SHARED_LIB = shared
     10endif
     11
     12if m68040
     13CPU_SUBDIR = m68040
     14endif
     15
     16SUBDIRS = $(SHARED_LIB) $(CPU_SUBDIR)
    917
    1018include $(top_srcdir)/../../../../../automake/subdirs.am
  • c/src/lib/libcpu/m68k/configure.in

    rf0b11d63 rcf1f72e  
    2727RTEMS_CHECK_BSP_CACHE(RTEMS_BSP)
    2828
     29AM_CONDITIONAL(shared, test "$RTEMS_CPU_MODEL" = "m68020" \
     30|| test "$RTEMS_CPU_MODEL" = "m68020" \
     31|| test "$RTEMS_CPU_MODEL" = "m68030" \
     32|| test "$RTEMS_CPU_MODEL" = "m68lc040" \
     33|| test "$RTEMS_CPU_MODEL" = "m68040" \
     34|| test "$RTEMS_CPU_MODEL" = "m68060" )
     35
    2936AM_CONDITIONAL(m68040, test "$RTEMS_CPU_MODEL" = "m68040")
    3037
     
    3239AC_OUTPUT(
    3340Makefile
     41shared/Makefile
     42shared/cache/Makefile
    3443m68040/Makefile
    3544m68040/fpsp/Makefile)
  • c/src/lib/libcpu/shared/src/cache_manager.c

    rf0b11d63 rcf1f72e  
    1 /*  cache.c
    2  *
     1/*
    32 *  Cache Manager
    43 *
     
    1110 *
    1211 * 
    13  *  The functions in this file define the API to the RTEMS Cache Manager and
     12 *  The functions in this file implement the API to the RTEMS Cache Manager and
    1413 *  are divided into data cache and instruction cache functions. Data cache
    1514 *  functions are only declared if a data cache is supported. Instruction
     
    3130 */
    3231
    33 #include <rtems/system.h>
     32#include <rtems.h>
    3433#include <sys/types.h>
    35 #include <rtems/rtems/cache.h>
    36 
    37 
    38 /*
    39  * THESE FUNCTIONS ONLY EXIST IF WE HAVE A DATA CACHE
    40  */
    41 #if defined(_CPU_DATA_CACHE_ALIGNMENT)
     34#include <libcpu/cache.h>
     35#include "cache_.h"
     36
     37
     38/*
     39 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
     40 */
    4241
    4342/*
     
    4948rtems_flush_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
    5049{
    51     const void * final_address;
    52    /*
    53     * Set d_addr to the beginning of the cache line; final_address indicates
    54     * the last address_t which needs to be pushed. Increment d_addr and push
    55     * the resulting line until final_address is passed.
    56     */
    57     final_address = (void *)((size_t)d_addr + n_bytes - 1);
    58     d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
    59     while( d_addr <= final_address )  {
    60         _CPU_flush_1_data_cache_line( d_addr );
    61         d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
    62     }
     50#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     51  const void * final_address;
     52
     53 /*
     54  * Set d_addr to the beginning of the cache line; final_address indicates
     55  * the last address_t which needs to be pushed. Increment d_addr and push
     56  * the resulting line until final_address is passed.
     57  */
     58
     59  final_address = (void *)((size_t)d_addr + n_bytes - 1);
     60  d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
     61  while( d_addr <= final_address )  {
     62    _CPU_flush_1_data_cache_line( d_addr );
     63    d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
     64  }
     65#endif
    6366}
    6467
     
    6972 * perform the invalidations.
    7073 */
     74
    7175void
    7276rtems_invalidate_multiple_data_cache_lines( const void * d_addr, size_t n_bytes )
    7377{
    74     const void * final_address;
    75    /*
    76     * Set d_addr to the beginning of the cache line; final_address indicates
    77     * the last address_t which needs to be invalidated. Increment d_addr and
    78     * invalidate the resulting line until final_address is passed.
    79     */
    80     final_address = (void *)((size_t)d_addr + n_bytes - 1);
    81     d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
    82     while( final_address > d_addr ) {
    83         _CPU_invalidate_1_data_cache_line( d_addr );
    84         d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
    85     }
     78#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     79  const void * final_address;
     80
     81 /*
     82  * Set d_addr to the beginning of the cache line; final_address indicates
     83  * the last address_t which needs to be invalidated. Increment d_addr and
     84  * invalidate the resulting line until final_address is passed.
     85  */
     86
     87  final_address = (void *)((size_t)d_addr + n_bytes - 1);
     88  d_addr = (void *)((size_t)d_addr & ~(_CPU_DATA_CACHE_ALIGNMENT - 1));
     89  while( final_address > d_addr ) {
     90    _CPU_invalidate_1_data_cache_line( d_addr );
     91    d_addr = (void *)((size_t)d_addr + _CPU_DATA_CACHE_ALIGNMENT);
     92  }
     93#endif
    8694}
    8795
     
    94102rtems_flush_entire_data_cache( void )
    95103{
     104#if defined(_CPU_DATA_CACHE_ALIGNMENT)
    96105   /*
    97106    * Call the CPU-specific routine
    98107    */
    99108   _CPU_flush_entire_data_cache();
    100      
     109#endif
    101110}
    102111
     
    109118rtems_invalidate_entire_data_cache( void )
    110119{
    111    /*
    112     * Call the CPU-specific routine
    113     */
    114    _CPU_invalidate_entire_data_cache();
     120#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     121 /*
     122  * Call the CPU-specific routine
     123  */
     124
     125 _CPU_invalidate_entire_data_cache();
     126#endif
    115127}
    116128
     
    122134rtems_get_data_cache_line_size( void )
    123135{
    124         return _CPU_DATA_CACHE_ALIGNMENT;
     136#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     137  return _CPU_DATA_CACHE_ALIGNMENT;
     138#else
     139  return 0;
     140#endif
    125141}
    126142
     
    133149rtems_freeze_data_cache( void )
    134150{
    135         _CPU_freeze_data_cache();
     151#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     152  _CPU_freeze_data_cache();
     153#endif
    136154}
    137155
     
    142160void rtems_unfreeze_data_cache( void )
    143161{
    144         _CPU_unfreeze_data_cache();
     162#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     163  _CPU_unfreeze_data_cache();
     164#endif
    145165}
    146166
     
    150170rtems_enable_data_cache( void )
    151171{
    152         _CPU_enable_data_cache();
     172#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     173  _CPU_enable_data_cache();
     174#endif
    153175}
    154176
     
    158180rtems_disable_data_cache( void )
    159181{
    160         _CPU_disable_data_cache();
    161 }
    162 #endif
    163 
    164 
    165 
    166 /*
    167  * THESE FUNCTIONS ONLY EXIST IF WE HAVE AN INSTRUCTION CACHE
    168  */
    169 #if defined(_CPU_INST_CACHE_ALIGNMENT)
     182#if defined(_CPU_DATA_CACHE_ALIGNMENT)
     183  _CPU_disable_data_cache();
     184#endif
     185}
     186
     187
     188
     189/*
     190 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
     191 */
    170192
    171193/*
     
    177199rtems_invalidate_multiple_inst_cache_lines( const void * i_addr, size_t n_bytes )
    178200{
    179     const void * final_address;
    180    /*
    181     * Set i_addr to the beginning of the cache line; final_address indicates
    182     * the last address_t which needs to be invalidated. Increment i_addr and
    183     * invalidate the resulting line until final_address is passed.
    184     */
    185     final_address = (void *)((size_t)i_addr + n_bytes - 1);
    186     i_addr = (void *)((size_t)i_addr & ~(_CPU_INST_CACHE_ALIGNMENT - 1));
    187     while( final_address > i_addr ) {
    188         _CPU_invalidate_1_inst_cache_line( i_addr );
    189         i_addr = (void *)((size_t)i_addr + _CPU_INST_CACHE_ALIGNMENT);
    190     }
     201#if defined(_CPU_INST_CACHE_ALIGNMENT)
     202  const void * final_address;
     203
     204 /*
     205  * Set i_addr to the beginning of the cache line; final_address indicates
     206  * the last address_t which needs to be invalidated. Increment i_addr and
     207  * invalidate the resulting line until final_address is passed.
     208  */
     209
     210  final_address = (void *)((size_t)i_addr + n_bytes - 1);
     211  i_addr = (void *)((size_t)i_addr & ~(_CPU_INST_CACHE_ALIGNMENT - 1));
     212  while( final_address > i_addr ) {
     213    _CPU_invalidate_1_inst_cache_line( i_addr );
     214    i_addr = (void *)((size_t)i_addr + _CPU_INST_CACHE_ALIGNMENT);
     215  }
     216#endif
    191217}
    192218
     
    199225rtems_invalidate_entire_inst_cache( void )
    200226{
    201    /*
    202     * Call the CPU-specific routine
    203     */
    204    _CPU_invalidate_entire_inst_cache();
     227#if defined(_CPU_INST_CACHE_ALIGNMENT)
     228 /*
     229  * Call the CPU-specific routine
     230  */
     231
     232 _CPU_invalidate_entire_inst_cache();
     233#endif
    205234}
    206235
     
    212241rtems_get_inst_cache_line_size( void )
    213242{
    214         return _CPU_INST_CACHE_ALIGNMENT;
     243#if defined(_CPU_INST_CACHE_ALIGNMENT)
     244  return _CPU_INST_CACHE_ALIGNMENT;
     245#else
     246  return 0;
     247#endif
    215248}
    216249
     
    223256rtems_freeze_inst_cache( void )
    224257{
    225         _CPU_freeze_inst_cache();
     258#if defined(_CPU_INST_CACHE_ALIGNMENT)
     259  _CPU_freeze_inst_cache();
     260#endif
    226261}
    227262
     
    232267void rtems_unfreeze_inst_cache( void )
    233268{
    234         _CPU_unfreeze_inst_cache();
     269#if defined(_CPU_INST_CACHE_ALIGNMENT)
     270  _CPU_unfreeze_inst_cache();
     271#endif
    235272}
    236273
     
    240277rtems_enable_inst_cache( void )
    241278{
    242         _CPU_enable_inst_cache();
     279#if defined(_CPU_INST_CACHE_ALIGNMENT)
     280  _CPU_enable_inst_cache();
     281#endif
    243282}
    244283
     
    248287rtems_disable_inst_cache( void )
    249288{
    250         _CPU_disable_inst_cache();
    251 }
    252 #endif
     289#if defined(_CPU_INST_CACHE_ALIGNMENT)
     290  _CPU_disable_inst_cache();
     291#endif
     292}
  • c/src/tests/configure.in

    rf0b11d63 rcf1f72e  
    4545if test "$tests_enabled" = "yes"; then
    4646  # do functionality tests first, then performance tests
    47   cfg_subdirs="libtests sptests"
     47  cfg_subdirs="libtests sptests libffi"
    4848  if test "$HAS_MP" = "yes"; then
    4949    cfg_subdirs="$cfg_subdirs mptests"
     
    6868AC_CONFIG_SUBDIRS(support)
    6969AC_CONFIG_SUBDIRS(samples)
    70 AC_CONFIG_SUBDIRS(libffi)
    7170AC_CONFIG_SUBDIRS($cfg_subdirs)
    7271
  • cpukit/libcsupport/src/malloc.c

    rf0b11d63 rcf1f72e  
    420420}
    421421
    422 
    423 /*
    424  *  rtems_cache_aligned_malloc
    425  *
    426  *  DESCRIPTION:
    427  *
    428  *  This function is used to allocate storage that spans an
    429  *  integral number of cache blocks.
    430  */
    431 RTEMS_INLINE_ROUTINE void * rtems_cache_aligned_malloc (
    432   size_t nbytes
    433 )
    434 {
    435   /*
    436    * Arrange to have the user storage start on the first cache
    437    * block beyond the header.
    438    */
    439   return (void *) ((((unsigned long) malloc( nbytes + _CPU_DATA_CACHE_ALIGNMENT - 1 ))
    440                                                                         + _CPU_DATA_CACHE_ALIGNMENT - 1 ) &(~(_CPU_DATA_CACHE_ALIGNMENT - 1)) );
    441 }
    442 
    443 #endif
    444 
     422#endif
  • cpukit/rtems/include/rtems.h

    rf0b11d63 rcf1f72e  
    4343#include <rtems/rtems/tasks.h>
    4444#include <rtems/rtems/intr.h>
     45#include <rtems/rtems/cache.h>
    4546#include <rtems/rtems/clock.h>
    4647#include <rtems/extension.h>
  • cpukit/rtems/src/Makefile.am

    rf0b11d63 rcf1f72e  
    5151    dpmemident.c dpmeminternal2external.c
    5252
    53 STD_C_FILES = attr.c cache.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
     53STD_C_FILES = attr.c $(TASK_C_FILES) $(RATEMON_C_FILES) $(INTR_C_FILES) \
    5454    $(CLOCK_C_FILES) $(TIMER_C_FILES) $(SEMAPHORE_C_FILES) \
    5555    $(MESSAGE_QUEUE_C_FILES) $(EVENT_C_FILES) $(SIGNAL_C_FILES) \
  • cpukit/score/cpu/i386/rtems/score/i386.h

    rf0b11d63 rcf1f72e  
    186186}
    187187
    188 /*
    189  * Disable the entire cache
    190  */
    191 void _CPU_disable_cache() {
    192   cr0 regCr0;
    193 
    194   regCr0.i = i386_get_cr0();
    195   regCr0.cr0.page_level_cache_disable = 1;
    196   regCr0.cr0.no_write_through = 1;
    197   i386_set_cr0( regCr0.i );
    198   rtems_flush_entire_data_cache();
    199 }
    200 
    201 /*
    202  * Enable the entire cache
    203  */
    204 static inline void _CPU_enable_cache() {
    205   cr0 regCr0;
    206 
    207   regCr0.i = i386_get_cr0();
    208   regCr0.cr0.page_level_cache_disable = 0;
    209   regCr0.cr0.no_write_through = 0;
    210   i386_set_cr0( regCr0.i );
    211   /*rtems_flush_entire_data_cache();*/
    212 }
    213 
    214 /*
    215  * CACHE MANAGER: The following functions are CPU-specific.
    216  * They provide the basic implementation for the rtems_* cache
    217  * management routines. If a given function has no meaning for the CPU,
    218  * it does nothing by default.
    219  *
    220  * FIXME: Definitions for I386_CACHE_ALIGNMENT are missing above for
    221  *        each CPU. The routines below should be implemented per CPU,
    222  *        to accomodate the capabilities of each.
    223  */
    224 
    225 /* FIXME: I don't belong here. */
    226 #define I386_CACHE_ALIGNMENT 16
    227 
    228 #if defined(I386_CACHE_ALIGNMENT)
    229 #define _CPU_DATA_CACHE_ALIGNMENT I386_CACHE_ALIGNMENT
    230 #define _CPU_INST_CACHE_ALIGNEMNT I386_CACHE_ALIGNMENT
    231 
    232 static inline void _CPU_flush_1_data_cache_line (const void * d_addr) {}
    233 static inline void _CPU_invalidate_1_data_cache_line (const void * d_addr) {}
    234 static inline void _CPU_freeze_data_cache (void) {}
    235 static inline void _CPU_unfreeze_data_cache (void) {}
    236 static inline void _CPU_invalidate_1_inst_cache_line const void * d_addr() {}
    237 static inline void _CPU_freeze_inst_cache (void) {}
    238 static inline void _CPU_unfreeze_inst_cache (void) {}
    239 
    240 static inline void _CPU_flush_entire_data_cache (
    241   const void * d_addr )
    242 {
    243   asm ("wbinvd");
    244 }
    245 static inline void _CPU_invalidate_entire_data_cache (
    246   const void * d_addr )
    247 {
    248   asm ("invd");
    249 }
    250 
    251 static inline void _CPU_enable_data_cache (
    252         void )
    253 {
    254         _CPU_enable_cache();
    255 }
    256 
    257 static inline void _CPU_disable_data_cache (
    258         void )
    259 {
    260         _CPU_disable_cache();
    261 }
    262 
    263 static inline void _CPU_invalidate_entire_inst_cache (
    264   const void * i_addr )
    265 {
    266   asm ("invd");
    267 }
    268 
    269 static inline void _CPU_enable_inst_cache (
    270         void )
    271 {
    272         _CPU_enable_cache();
    273 }
    274 
    275 static inline void _CPU_disable_inst_cache (
    276         void )
    277 {
    278         _CPU_disable_cache();
    279 }
    280 #endif
    281 
    282 
    283188/* routines */
    284189
  • cpukit/score/cpu/m68k/rtems/score/m68k.h

    rf0b11d63 rcf1f72e  
    374374
    375375
    376 /* 
    377  *  Since the cacr is common to all mc680x0, provide macros
    378  *  for masking values in that register.
    379  */
    380 
    381 /*
    382  *  Used to clear bits in the cacr.
    383  */
    384 #define _CPU_CACR_AND(mask)                                        \
    385   {                                                                \
    386   register unsigned long _value = mask;                            \
    387   register unsigned long _ctl = 0;                                 \
    388   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    389                   andl %2, %0;                /* and with _val */  \
    390                   movec %1, %%cacr"           /* write the cacr */ \
    391    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    392   }
    393 
    394 
    395 /* 
    396  *  Used to set bits in the cacr.
    397  */
    398 #define _CPU_CACR_OR(mask)                                         \
    399         {                                                                \
    400   register unsigned long _value = mask;                            \
    401   register unsigned long _ctl = 0;                                 \
    402   asm volatile ( "movec %%cacr, %0;           /* read the cacr */  \
    403                   orl %2, %0;                 /* or with _val */   \
    404                   movec %1, %%cacr"           /* write the cacr */ \
    405    : "=d" (_ctl) : "0" (_ctl), "d" (_value) : "%%cc" );            \
    406   }
    407 
    408    
    409 /*
    410  * CACHE MANAGER: The following functions are CPU-specific.
    411  * They provide the basic implementation for the rtems_* cache
    412  * management routines. If a given function has no meaning for the CPU,
    413  * it does nothing by default.
    414  */
    415 #if ( defined(__mc68020__) || defined(__mc68030__) )
    416 #define M68K_INST_CACHE_ALIGNMENT 16
    417 
    418 #if defined(__mc68030__)
    419 #define M68K_DATA_CACHE_ALIGNMENT 16
    420 
    421 /* Only the mc68030 has a data cache; it is writethrough only. */
    422 
    423 static inline void _CPU_flush_1_data_cache_line ( const void * d_addr ) {}
    424 static inline void _CPU_flush_entire_data_cache ( const void * d_addr ) {}
    425 
    426 static inline void _CPU_invalidate_1_data_cache_line (
    427   const void * d_addr )
    428 {
    429   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    430   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    431   _CPU_CACR_OR(0x00000400);
    432 }
    433 
    434 static inline void _CPU_invalidate_entire_data_cache (
    435         void )
    436 {
    437   _CPU_CACR_OR( 0x00000800 );
    438 }
    439 
    440 static inline void _CPU_freeze_data_cache (
    441         void )
    442 {
    443   _CPU_CACR_OR( 0x00000200 );
    444 }
    445 
    446 static inline void _CPU_unfreeze_data_cache (
    447         void )
    448 {
    449   _CPU_CACR_AND( 0xFFFFFDFF );
    450 }
    451 
    452 static inline void _CPU_enable_data_cache (     void )
    453 {
    454   _CPU_CACR_OR( 0x00000100 );
    455 }
    456 static inline void _CPU_disable_data_cache (    void )
    457 {
    458   _CPU_CACR_AND( 0xFFFFFEFF );
    459 }
    460 #endif
    461 
    462 
    463 /* Both the 68020 and 68030 have instruction caches */
    464 
    465 static inline void _CPU_invalidate_1_inst_cache_line (
    466   const void * d_addr )
    467 {
    468   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    469   asm volatile ( "movec %0, %%caar" :: "a" (p_address) );      /* write caar */
    470   _CPU_CACR_OR( 0x00000004 );
    471 }
    472 
    473 static inline void _CPU_invalidate_entire_inst_cache (
    474         void )
    475 {
    476   _CPU_CACR_OR( 0x00000008 );
    477 }
    478 
    479 static inline void _CPU_freeze_inst_cache (
    480         void )
    481 {
    482   _CPU_CACR_OR( 0x00000002);
    483 }
    484 
    485 static inline void _CPU_unfreeze_inst_cache (
    486         void )
    487 {
    488   _CPU_CACR_AND( 0xFFFFFFFD );
    489 }
    490 
    491 static inline void _CPU_enable_inst_cache (     void )
    492 {
    493   _CPU_CACR_OR( 0x00000001 );
    494 }
    495 
    496 static inline void _CPU_disable_inst_cache (    void )
    497 {
    498   _CPU_CACR_AND( 0xFFFFFFFE );
    499 }
    500 
    501 
    502 #elif ( defined(__mc68040__) || defined (__mc68060__) )
    503 
    504 #define M68K_INST_CACHE_ALIGNMENT 16
    505 #define M68K_DATA_CACHE_ALIGNMENT 16
    506 
    507 /* Cannot be frozen */
    508 static inline void _CPU_freeze_data_cache ( void ) {}
    509 static inline void _CPU_unfreeze_data_cache ( void ) {}
    510 static inline void _CPU_freeze_inst_cache ( void ) {}
    511 static inline void _CPU_unfreeze_inst_cache ( void ) {}
    512 
    513 static inline void _CPU_flush_1_data_cache_line (
    514   const void * d_addr )
    515 {
    516   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    517   asm volatile ( "cpushl %%dc,(%0)" :: "a" (p_address) );
    518 }
    519 
    520 static inline void _CPU_invalidate_1_data_cache_line (
    521   const void * d_addr )
    522 {
    523   void * p_address = (void *) _CPU_virtual_to_physical( d_addr );
    524   asm volatile ( "cinvl %%dc,(%0)" :: "a" (p_address) );
    525 }
    526 
    527 static inline void _CPU_flush_entire_data_cache (
    528         void )
    529 {
    530         asm volatile ( "cpusha %%dc" :: );
    531 }
    532 
    533 static inline void _CPU_invalidate_entire_data_cache (
    534         void )
    535 {
    536         asm volatile ( "cinva %%dc" :: );
    537 }
    538 
    539 static inline void _CPU_enable_data_cache (
    540         void )
    541 {
    542   _CPU_CACR_OR( 0x80000000 );
    543 }
    544 
    545 static inline void _CPU_disable_data_cache (
    546         void )
    547 {
    548   _CPU_CACR_AND( 0x7FFFFFFF );
    549 }
    550 
    551 static inline void _CPU_invalidate_1_inst_cache_line (
    552   const void * i_addr )
    553 {
    554   void * p_address = (void *)  _CPU_virtual_to_physical( i_addr );
    555   asm volatile ( "cinvl %%ic,(%0)" :: "a" (p_address) );
    556 }
    557 
    558 static inline void _CPU_invalidate_entire_inst_cache (
    559         void )
    560 {
    561                 asm volatile ( "cinva %%ic" :: );
    562 }
    563 
    564 static inline void _CPU_enable_inst_cache (
    565         void )
    566 {
    567   _CPU_CACR_OR( 0x00008000 );
    568 }
    569 
    570 static inline void _CPU_disable_inst_cache (
    571         void )
    572 {
    573         _CPU_CACR_AND( 0xFFFF7FFF );
    574 }
    575 #endif
    576 
    577 
    578 #if defined(M68K_DATA_CACHE_ALIGNMENT)
    579 #define _CPU_DATA_CACHE_ALIGNMENT M68K_DATA_CACHE_ALIGNMENT
    580 #endif
    581 
    582 #if defined(M68K_INST_CACHE_ALIGNMENT)
    583 #define _CPU_INST_CACHE_ALIGNMENT M68K_INST_CACHE_ALIGNMENT
    584 #endif
    585 
    586 
    587376#endif  /* !ASM */
    588377
Note: See TracChangeset for help on using the changeset viewer.