Changeset ddbc3f8d in rtems
- Timestamp:
- 07/11/14 14:37:56 (9 years ago)
- Branches:
- 4.11, 5, master
- Children:
- 9a9ab85
- Parents:
- 62f373fb
- git-author:
- Daniel Cederman <cederman@…> (07/11/14 14:37:56)
- git-committer:
- Daniel Hellstrom <daniel@…> (08/22/14 11:10:59)
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
c/src/lib/libbsp/sparc/leon3/include/cache_.h
r62f373fb rddbc3f8d 27 27 #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS 28 28 29 #define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING 30 29 31 #define CPU_INSTRUCTION_CACHE_ALIGNMENT 64 30 32 -
c/src/lib/libcpu/shared/src/cache_manager.c
r62f373fb rddbc3f8d 38 38 #include <rtems.h> 39 39 #include "cache_.h" 40 #include <rtems/score/smpimpl.h> 41 #include <rtems/score/smplock.h> 42 #include <rtems/score/chainimpl.h> 43 #include <rtems/score/sysstate.h> 44 45 #if defined( RTEMS_SMP ) 46 47 typedef void (*Cache_manager_Function_ptr)(const void *d_addr, size_t n_bytes); 48 49 typedef struct { 50 Chain_Node Node; 51 Cache_manager_Function_ptr func; 52 const void *addr; 53 size_t size; 54 cpu_set_t *recipients; 55 size_t setsize; 56 Atomic_Ulong done; 57 } Cache_manager_SMP_node; 58 59 typedef struct { 60 SMP_lock_Control Lock; 61 Chain_Control List; 62 } Cache_manager_SMP_control; 63 64 static Cache_manager_SMP_control _Cache_manager_SMP_control = { 65 .Lock = SMP_LOCK_INITIALIZER("cachemgr"), 66 .List = CHAIN_INITIALIZER_EMPTY(_Cache_manager_SMP_control.List) 67 }; 68 69 void 70 _SMP_Cache_manager_message_handler(void) 71 { 72 SMP_lock_Context lock_context; 73 Cache_manager_SMP_node *node; 74 Cache_manager_SMP_node *next; 75 uint32_t cpu_self_idx; 76 77 _SMP_lock_ISR_disable_and_acquire( &_Cache_manager_SMP_control.Lock, 78 &lock_context ); 79 cpu_self_idx = _SMP_Get_current_processor(); 80 81 node = (Cache_manager_SMP_node*)_Chain_First( 82 &_Cache_manager_SMP_control.List ); 83 while ( !_Chain_Is_tail( &_Cache_manager_SMP_control.List, &node->Node ) ) { 84 next = (Cache_manager_SMP_node*)_Chain_Next( &node->Node ); 85 if ( CPU_ISSET_S ( cpu_self_idx, node->setsize, node->recipients ) ) { 86 CPU_CLR_S ( cpu_self_idx, node->setsize, node->recipients ); 87 88 node->func( node->addr, node->size ); 89 90 if ( CPU_COUNT_S( node->setsize, node->recipients ) == 0 ) { 91 _Chain_Extract_unprotected( &node->Node ); 92 _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE ); 93 } 94 } 95 node = next; 96 } 97 98 _SMP_lock_Release_and_ISR_enable( &_Cache_manager_SMP_control.Lock, 99 &lock_context ); 100 } 101 102 #if defined(CPU_DATA_CACHE_ALIGNMENT) || \ 103 (defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) && \ 104 defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)) 105 106 static void 107 _Cache_manager_Process_cache_messages( void ) 108 { 109 unsigned long message; 110 Per_CPU_Control *cpu_self; 111 ISR_Level isr_level; 112 113 _ISR_Disable_without_giant( isr_level ); 114 115 cpu_self = _Per_CPU_Get(); 116 117 message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); 118 119 if ( message & SMP_MESSAGE_CACHE_MANAGER ) { 120 if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message, 121 message & ~SMP_MESSAGE_CACHE_MANAGER, ATOMIC_ORDER_RELAXED, 122 ATOMIC_ORDER_RELAXED ) ) { 123 _SMP_Cache_manager_message_handler(); 124 } 125 } 126 127 _ISR_Enable_without_giant( isr_level ); 128 } 129 130 /* 131 * We can not make this function static as we need to access it 132 * from the test program. 133 */ 134 void 135 _Cache_manager_Send_smp_msg( 136 const size_t setsize, 137 const cpu_set_t *set, 138 Cache_manager_Function_ptr func, 139 const void * addr, 140 size_t size 141 ); 142 143 void 144 _Cache_manager_Send_smp_msg( 145 const size_t setsize, 146 const cpu_set_t *set, 147 Cache_manager_Function_ptr func, 148 const void * addr, 149 size_t size 150 ) 151 { 152 uint32_t i; 153 Cache_manager_SMP_node node; 154 size_t set_size = CPU_ALLOC_SIZE( _SMP_Get_processor_count() ); 155 char cpu_set_copy[set_size]; 156 SMP_lock_Context lock_context; 157 158 if ( ! _System_state_Is_up( _System_state_Get() ) ) { 159 func( addr, size ); 160 return; 161 } 162 163 memset( cpu_set_copy, 0, set_size ); 164 if( set == NULL ) { 165 for( i=0; i<_SMP_Get_processor_count(); ++i ) 166 CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy ); 167 } else { 168 for( i=0; i<_SMP_Get_processor_count(); ++i ) 169 if( CPU_ISSET_S( i, set_size, set ) ) 170 CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy ); 171 } 172 173 node.func = func; 174 node.addr = addr; 175 node.size = size; 176 node.setsize = set_size; 177 node.recipients = (cpu_set_t *)cpu_set_copy; 178 _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED ); 179 180 181 _SMP_lock_ISR_disable_and_acquire( &_Cache_manager_SMP_control.Lock, 182 &lock_context ); 183 _Chain_Prepend_unprotected( &_Cache_manager_SMP_control.List, &node.Node ); 184 _SMP_lock_Release_and_ISR_enable( &_Cache_manager_SMP_control.Lock, 185 &lock_context ); 186 187 _SMP_Send_message_multicast( set_size, node.recipients, 188 SMP_MESSAGE_CACHE_MANAGER ); 189 190 _Cache_manager_Process_cache_messages(); 191 192 while ( !_Atomic_Load_uint( &node.done, ATOMIC_ORDER_ACQUIRE ) ); 193 } 194 #endif 195 196 void 197 rtems_cache_flush_multiple_data_lines_processor_set( 198 const void *addr, 199 size_t size, 200 const size_t setsize, 201 const cpu_set_t *set 202 ) 203 { 204 #if defined(CPU_DATA_CACHE_ALIGNMENT) 205 _Cache_manager_Send_smp_msg( setsize, set, 206 rtems_cache_flush_multiple_data_lines, addr, size ); 207 #endif 208 } 209 210 void 211 rtems_cache_invalidate_multiple_data_lines_processor_set( 212 const void *addr, 213 size_t size, 214 const size_t setsize, 215 const cpu_set_t *set 216 ) 217 { 218 #if defined(CPU_DATA_CACHE_ALIGNMENT) 219 _Cache_manager_Send_smp_msg( setsize, set, 220 rtems_cache_invalidate_multiple_data_lines, addr, size ); 221 #endif 222 } 223 224 void 225 rtems_cache_flush_entire_data_processor_set( 226 const size_t setsize, 227 const cpu_set_t *set 228 ) 229 { 230 #if defined(CPU_DATA_CACHE_ALIGNMENT) 231 _Cache_manager_Send_smp_msg( setsize, set, 232 (Cache_manager_Function_ptr)rtems_cache_flush_entire_data, 0, 0 ); 233 #endif 234 } 235 236 void 237 rtems_cache_invalidate_entire_data_processor_set( 238 const size_t setsize, 239 const cpu_set_t *set 240 ) 241 { 242 #if defined(CPU_DATA_CACHE_ALIGNMENT) 243 _Cache_manager_Send_smp_msg( setsize, set, 244 (Cache_manager_Function_ptr)rtems_cache_invalidate_entire_data, 0, 0 ); 245 #endif 246 } 247 #endif 40 248 41 249 /* … … 220 428 */ 221 429 430 431 222 432 /* 223 433 * This function is responsible for performing an instruction cache … … 225 435 * and then perform the invalidations. 226 436 */ 227 void 228 rtems_cache_invalidate_multiple_instruction_lines( const void * i_addr, size_t n_bytes ) 229 { 230 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) 231 #if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS) 232 _CPU_cache_invalidate_instruction_range( i_addr, n_bytes ); 233 #else 437 438 #if !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS) 439 static void 440 _invalidate_multiple_instruction_lines_no_range_functions( 441 const void * i_addr, 442 size_t n_bytes 443 ) 444 { 234 445 const void * final_address; 235 446 … … 250 461 i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT); 251 462 } 463 } 464 #endif 465 466 void 467 rtems_cache_invalidate_multiple_instruction_lines( 468 const void * i_addr, 469 size_t n_bytes 470 ) 471 { 472 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) 473 #if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS) 474 475 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING) 476 _Cache_manager_Send_smp_msg( 0, 0, _CPU_cache_invalidate_instruction_range, 477 i_addr, n_bytes ); 478 #else 479 _CPU_cache_invalidate_instruction_range( i_addr, n_bytes ); 480 #endif 481 482 #else 483 484 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING) 485 _Cache_manager_Send_smp_msg( 0, 0, 486 _invalidate_multiple_instruction_lines_no_range_functions, i_addr, 487 n_bytes ); 488 #else 489 _invalidate_multiple_instruction_lines_no_range_functions( i_addr, n_bytes ); 490 #endif 491 252 492 #endif 253 493 #endif … … 267 507 */ 268 508 509 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING) 510 _Cache_manager_Send_smp_msg( 0, 0, 511 (Cache_manager_Function_ptr)_CPU_cache_invalidate_entire_instruction, 512 0, 0 ); 513 #else 269 514 _CPU_cache_invalidate_entire_instruction(); 515 #endif 270 516 #endif 271 517 } -
cpukit/rtems/include/rtems/rtems/cache.h
r62f373fb rddbc3f8d 114 114 * The cache lines covering the area are marked as invalid. A later 115 115 * instruction fetch from the area will result in a load from memory. 116 * In SMP mode, on processors without instruction cache snooping, this 117 * operation will invalidate the instruction cache lines on all processors. 118 * It should not be called from interrupt context in such case. 116 119 * 117 120 * @param[in] addr The start address of the area to invalidate. … … 189 192 void *rtems_cache_aligned_malloc ( size_t nbytes ); 190 193 194 #if defined( RTEMS_SMP ) 195 196 /** 197 * @brief Flushes multiple data cache lines for a set of processors 198 * 199 * Dirty cache lines covering the area are transferred to memory. 200 * Depending on the cache implementation this may mark the lines as invalid. 201 * 202 * This operation should not be called from interrupt context. 203 * 204 * @param[in] addr The start address of the area to flush. 205 * @param[in] size The size in bytes of the area to flush. 206 * @param[in] setsize The size of the processor set. 207 * @param[in] set The target processor set. 208 */ 209 void rtems_cache_flush_multiple_data_lines_processor_set( 210 const void *addr, 211 size_t size, 212 const size_t setsize, 213 const cpu_set_t *set 214 ); 215 216 /** 217 * @brief Invalidates multiple data cache lines for a set of processors 218 * 219 * The cache lines covering the area are marked as invalid. A later read 220 * access in the area will load the data from memory. 221 * 222 * In case the area is not aligned on cache line boundaries, then this 223 * operation may destroy unrelated data. 224 * 225 * This operation should not be called from interrupt context. 226 * 227 * @param[in] addr The start address of the area to invalidate. 228 * @param[in] size The size in bytes of the area to invalidate. 229 * @param[in] setsize The size of the processor set. 230 * @param[in] set The target processor set. 231 */ 232 void rtems_cache_invalidate_multiple_data_lines_processor_set( 233 const void *addr, 234 size_t size, 235 const size_t setsize, 236 const cpu_set_t *set 237 ); 238 239 /** 240 * @brief Flushes the entire data cache for a set of processors 241 * 242 * This operation should not be called from interrupt context. 243 * 244 * @see rtems_cache_flush_multiple_data_lines(). 245 * 246 * @param[in] setsize The size of the processor set. 247 * @param[in] set The target processor set. 248 */ 249 void rtems_cache_flush_entire_data_processor_set( 250 const size_t setsize, 251 const cpu_set_t *set 252 ); 253 254 /** 255 * @brief Invalidates the entire cache for a set of processors 256 * 257 * This function is responsible for performing a data cache 258 * invalidate. It invalidates the entire cache for a set of 259 * processors. 260 * 261 * This operation should not be called from interrupt context. 262 * 263 * @param[in] setsize The size of the processor set. 264 * @param[in] set The target processor set. 265 */ 266 void rtems_cache_invalidate_entire_data_processor_set( 267 const size_t setsize, 268 const cpu_set_t *set 269 ); 270 271 #endif 272 191 273 /**@}*/ 192 274 -
cpukit/score/include/rtems/score/smpimpl.h
r62f373fb rddbc3f8d 22 22 #include <rtems/score/percpu.h> 23 23 #include <rtems/fatal.h> 24 #include <rtems/rtems/cache.h> 24 25 25 26 #ifdef __cplusplus … … 50 51 */ 51 52 #define SMP_MESSAGE_TEST 0x2UL 53 54 /** 55 * @brief SMP message to request a cache manager invocation. 56 * 57 * @see _SMP_Send_message(). 58 */ 59 #define SMP_MESSAGE_CACHE_MANAGER 0x4UL 52 60 53 61 /** … … 128 136 129 137 /** 138 * @brief Handles cache invalidation/flush requests from a remote processor. 139 * 140 */ 141 void _SMP_Cache_manager_message_handler( void ); 142 143 /** 130 144 * @brief Interrupt handler for inter-processor interrupts. 131 145 */ … … 149 163 ( *_SMP_Test_message_handler )( cpu_self ); 150 164 } 165 166 if ( ( message & SMP_MESSAGE_CACHE_MANAGER ) != 0 ) { 167 _SMP_Cache_manager_message_handler(); 168 } 169 151 170 } 152 171 }
Note: See TracChangeset
for help on using the changeset viewer.