Changeset 702c5f5 in rtems
- Timestamp:
- 10/27/99 15:29:18 (24 years ago)
- Branches:
- 4.10, 4.11, 4.8, 4.9, 5, master
- Children:
- eb7f0f22
- Parents:
- 090b1c37
- Files:
-
- 73 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
c/ACKNOWLEDGEMENTS
r090b1c37 r702c5f5 162 162 numerous improvements to the i386 and PowerPC ports of RTEMS. 163 163 164 + Mark Bronson <mark@ramix.com> of RAMIX for submitting i960RP 165 support and the rxgen960 board support package. 166 164 167 Finally, the RTEMS project would like to thank those who have contributed 165 168 to the other free software efforts which RTEMS utilizes. The primary RTEMS -
c/src/exec/score/cpu/i960/cpu.c
r090b1c37 r702c5f5 13 13 * $Id$ 14 14 */ 15 /* 16 * 1999/04/26: added support for Intel i960RP 17 */ 15 18 16 19 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 20 #elif defined(__i960RP__) 17 21 #else 18 #warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA ONLY***"22 #warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA & RP ONLY ***" 19 23 #warning "*** THIS FILE WILL NOT COMPILE ON ANOTHER FAMILY MEMBER ***" 20 24 #endif … … 62 66 * _CPU_ISR_install_raw_handler 63 67 */ 64 68 69 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 65 70 #define _Is_vector_caching_enabled( _prcb ) \ 66 71 ((_prcb)->control_tbl->icon & 0x2000) 72 #elif defined(__i960RP__) 73 #define _Is_vector_caching_enabled( _prcb ) \ 74 ((*((unsigned int *) ICON_ADDR)) & 0x2000) 75 #endif 67 76 68 77 void _CPU_ISR_install_raw_handler( … … 72 81 ) 73 82 { 74 i960 ca_PRCB*prcb = _CPU_Table.Prcb;83 i960_PRCB *prcb = _CPU_Table.Prcb; 75 84 proc_ptr *cached_intr_tbl = NULL; 76 85 … … 125 134 */ 126 135 136 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 127 137 #define soft_reset( prcb ) \ 128 { register i960 ca_PRCB *_prcb = (prcb); \138 { register i960_PRCB *_prcb = (prcb); \ 129 139 register unsigned32 *_next=0; \ 130 140 register unsigned32 _cmd = 0x30000; \ … … 135 145 : "0" (_cmd), "1" (_next), "2" (_prcb) ); \ 136 146 } 147 #else 148 #if defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP) 149 #define soft_reset( prcb ) \ 150 { register i960_PRCB *_prcb = (prcb); \ 151 register unsigned32 *_next=0; \ 152 register unsigned32 _cmd = 0x300; \ 153 asm volatile( "lda next,%1; \ 154 sysctl %0,%1,%2; \ 155 next: mov g0,g0" \ 156 : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \ 157 : "0" (_cmd), "1" (_next), "2" (_prcb) ); \ 158 } 159 #endif 160 #endif 137 161 138 162 void _CPU_Install_interrupt_stack( void ) 139 163 { 140 i960 ca_PRCB *prcb = _CPU_Table.Prcb;164 i960_PRCB *prcb = _CPU_Table.Prcb; 141 165 unsigned32 level; 166 #if defined(__i960RP__) || defined(__i960_RP__) 167 int *isp = (int *) ISP_ADDR; 168 #endif 142 169 143 170 /* … … 150 177 prcb->intr_stack = _CPU_Interrupt_stack_low; 151 178 179 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 152 180 soft_reset( prcb ); 181 #elif defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP) 182 *isp = prcb->intr_stack; 183 #endif 153 184 154 185 _CPU_ISR_Enable( level ); -
c/src/exec/score/cpu/i960/rtems/score/cpu.h
r090b1c37 r702c5f5 216 216 217 217 /* 218 * i960 is pretty tolerant of alignment. Just put things on 4 byte boundaries. 219 */ 220 221 #define CPU_ALIGNMENT 4 218 * i960 is pretty tolerant of alignment but some CPU models do 219 * better with different default aligments so we use what the 220 * CPU model selected in rtems/score/i960.h. 221 */ 222 223 #define CPU_ALIGNMENT I960_CPU_ALIGNMENT 222 224 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT 223 225 #define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT -
c/src/exec/score/cpu/i960/rtems/score/i960.h
r090b1c37 r702c5f5 39 39 #define CPU_MODEL_NAME "i960ca" 40 40 #define __RTEMS_I960CA__ 41 #define I960_HAS_FPU 042 41 43 42 #elif defined(__i960HA__) || defined(__i960_HA__) || defined(__i960HA) … … 45 44 #define CPU_MODEL_NAME "i960ha" 46 45 #define __RTEMS_I960HA__ 46 47 #elif defined(__i960RP__) 48 49 #include <i960RP.h> 50 #define CPU_MODEL_NAME "i960rp" 51 #define __RTEMS_I960RP__ 52 #define I960_CPU_ALIGNMENT 8 53 #define I960_SOFT_RESET_COMMAND 0x300 54 55 #else 56 57 #error "Unsupported CPU Model" 58 59 #endif 60 61 /* 62 * Now default some CPU model variation parameters 63 */ 64 65 #ifndef I960_HAS_FPU 47 66 #define I960_HAS_FPU 0 48 49 #else 50 51 #error "Unsupported CPU Model" 52 67 #endif 68 69 #ifndef I960_CPU_ALIGNMENT 70 #define I960_CPU_ALIGNMENT 4 71 #endif 72 73 #ifndef I960_SOFT_RESET_COMMAND 74 #define I960_SOFT_RESET_COMMAND 0x30000 53 75 #endif 54 76 … … 67 89 68 90 #if defined(__RTEMS_I960CA__) 91 /* 92 * Now default some CPU model variation parameters 93 */ 94 95 #ifndef I960_HAS_FPU 96 #define I960_HAS_FPU 0 97 #endif 98 99 #ifndef I960_CPU_ALIGNMENT 100 #define I960_CPU_ALIGNMENT 4 101 #endif 102 69 103 70 104 /* i960CA control structures */ … … 196 230 typedef i960ha_PRCB i960_PRCB; 197 231 198 #endif 232 #elif defined(__RTEMS_I960RP__) 233 234 /* i960RP control structures */ 235 236 /* Intel i960RP Control Table */ 237 238 typedef struct { 239 /* Control Group 0 */ 240 unsigned int rsvd00; 241 unsigned int rsvd01; 242 unsigned int rsvd02; 243 unsigned int rsvd03; 244 /* Control Group 1 */ 245 unsigned int imap0; /* interrupt map 0 */ 246 unsigned int imap1; /* interrupt map 1 */ 247 unsigned int imap2; /* interrupt map 2 */ 248 unsigned int icon; /* interrupt control */ 249 /* Control Group 2 */ 250 unsigned int pmcon0; /* memory region 0 configuration */ 251 unsigned int rsvd1; 252 unsigned int pmcon2; /* memory region 2 configuration */ 253 unsigned int rsvd2; 254 /* Control Group 3 */ 255 unsigned int pmcon4; /* memory region 4 configuration */ 256 unsigned int rsvd3; 257 unsigned int pmcon6; /* memory region 6 configuration */ 258 unsigned int rsvd4; 259 /* Control Group 4 */ 260 unsigned int pmcon8; /* memory region 8 configuration */ 261 unsigned int rsvd5; 262 unsigned int pmcon10; /* memory region 10 configuration */ 263 unsigned int rsvd6; 264 /* Control Group 5 */ 265 unsigned int pmcon12; /* memory region 12 configuration */ 266 unsigned int rsvd7; 267 unsigned int pmcon14; /* memory region 14 configuration */ 268 unsigned int rsvd8; 269 /* Control Group 6 */ 270 unsigned int rsvd9; 271 unsigned int rsvd10; 272 unsigned int tc; /* trace control */ 273 unsigned int bcon; /* bus configuration control */ 274 } i960rp_control_table; 275 276 /* Intel i960RP Processor Control Block */ 277 278 typedef struct { 279 unsigned int *fault_tbl; /* fault table base address */ 280 i960rp_control_table 281 *control_tbl; /* control table base address */ 282 unsigned int initial_ac; /* AC register initial value */ 283 unsigned int fault_config; /* fault configuration word */ 284 void **intr_tbl; /* interrupt table base address */ 285 void *sys_proc_tbl; /* system procedure table 286 base address */ 287 unsigned int reserved; /* reserved */ 288 unsigned int *intr_stack; /* interrupt stack pointer */ 289 unsigned int ins_cache_cfg; /* instruction cache 290 configuration word */ 291 unsigned int reg_cache_cfg; /* register cache configuration word */ 292 } i960rp_PRCB; 293 294 typedef i960rp_control_table i960_control_table; 295 typedef i960rp_PRCB i960_PRCB; 296 297 #else 298 #error "invalid processor selection!" 299 #endif 300 301 /* 302 * Miscellaneous Support Routines 303 */ 304 305 #define i960_reload_ctl_group( group ) \ 306 { register int _cmd = ((group)|0x400) ; \ 307 asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \ 308 } 309 310 #define i960_atomic_modify( mask, addr, prev ) \ 311 { register unsigned int _mask = (mask); \ 312 register unsigned int *_addr = (unsigned int *)(addr); \ 313 asm volatile( "atmod %0,%1,%1" \ 314 : "=d" (_addr), "=d" (_mask) \ 315 : "0" (_addr), "1" (_mask) ); \ 316 (prev) = _mask; \ 317 } 318 319 #define atomic_modify( _mask, _address, _previous ) \ 320 i960_atomic_modify( _mask, _address, _previous ) 321 322 #define i960_enable_tracing() \ 323 { register unsigned int _pc = 0x1; \ 324 asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \ 325 } 199 326 200 327 /* … … 232 359 } while ( 0 ) 233 360 234 #define i960_atomic_modify( mask, addr, prev ) \ 235 { register unsigned int _mask = (mask); \ 236 register unsigned int *_addr = (unsigned int *)(addr); \ 237 asm volatile( "atmod %0,%1,%1" \ 238 : "=d" (_addr), "=d" (_mask) \ 239 : "0" (_addr), "1" (_mask) ); \ 240 (prev) = _mask; \ 241 } 242 243 244 #define atomic_modify( _mask, _address, _previous ) \ 245 i960_atomic_modify( _mask, _address, _previous ) 246 247 #define i960_enable_tracing() \ 248 { register unsigned int _pc = 0x1; \ 249 asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \ 250 } 361 #define i960_cause_intr( intr ) \ 362 { register int _intr = (intr); \ 363 asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \ 364 } 365 366 /* 367 * Interrupt Masking Routines 368 */ 369 370 #if defined(__RTEMS_I960CA__) || defined(__RTEMS_I960HA__) 251 371 252 372 #define i960_unmask_intr( xint ) \ … … 267 387 } 268 388 269 #define i960_reload_ctl_group( group ) \ 270 { register int _cmd = ((group)|0x400) ; \ 271 asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \ 272 } 273 274 #define i960_cause_intr( intr ) \ 275 { register int _intr = (intr); \ 276 asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \ 277 } 389 static inline unsigned int i960_pend_intrs() 390 { register unsigned int _intr=0; 391 asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) ); 392 return ( _intr ); 393 } 394 395 static inline unsigned int i960_mask_intrs() 396 { register unsigned int _intr=0; 397 asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) ); 398 return( _intr ); 399 } 400 401 #elif defined(__RTEMS_I960RP__) 402 403 #define i960_unmask_intr( xint ) \ 404 { register unsigned int _mask= (1<<(xint)); \ 405 register unsigned int *_imsk = (int * ) IMSK_ADDR; \ 406 register unsigned int _val= *_imsk; \ 407 asm volatile( "or %0,%2,%0; \ 408 st %0,(%1)" \ 409 : "=d" (_val), "=d" (_imsk), "=d" (_mask) \ 410 : "0" (_val), "1" (_imsk), "2" (_mask) ); \ 411 } 412 413 #define i960_mask_intr( xint ) \ 414 { register unsigned int _mask= (1<<(xint)); \ 415 register unsigned int *_imsk = (int * ) IMSK_ADDR; \ 416 register unsigned int _val = *_imsk; \ 417 asm volatile( "andnot %2,%0,%0; \ 418 st %0,(%1)" \ 419 : "=d" (_val), "=d" (_imsk), "=d" (_mask) \ 420 : "0" (_val), "1" (_imsk), "2" (_mask) ); \ 421 } 422 #define i960_clear_intr( xint ) \ 423 { register unsigned int _xint=xint; \ 424 register unsigned int _mask=(1<<(xint)); \ 425 register unsigned int *_ipnd = (int * ) IPND_ADDR; \ 426 register unsigned int _rslt = 0; \ 427 asm volatile( "loop_til_cleared: mov 0, %0; \ 428 atmod %1, %2, %0; \ 429 bbs %3,%0, loop_til_cleared" \ 430 : "=d" (_rslt), "=d" (_ipnd), "=d" (_mask), "=d" (_xint) \ 431 : "0" (_rslt), "1" (_ipnd), "2" (_mask), "3" (_xint) ); \ 432 } 433 434 static inline unsigned int i960_pend_intrs() 435 { register unsigned int _intr= *(unsigned int *) IPND_ADDR; 436 /*register unsigned int *_ipnd = (int * ) IPND_ADDR; \ 437 asm volatile( "mov (%0),%1" \ 438 : "=d" (_ipnd), "=d" (_mask) \ 439 : "0" (_ipnd), "1" (_mask) ); \ */ 440 return ( _intr ); 441 } 442 443 static inline unsigned int i960_mask_intrs() 444 { register unsigned int _intr= *(unsigned int *) IMSK_ADDR; 445 /*asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );*/ 446 return( _intr ); 447 } 448 #endif 449 450 static inline unsigned int i960_get_fp() 451 { register unsigned int _fp=0; 452 asm volatile( "mov fp,%0" : "=d" (_fp) : "0" (_fp) ); 453 return ( _fp ); 454 } 455 456 /* 457 * Soft Reset 458 */ 459 460 #if defined(I960_SOFT_RESET_COMMAND) 278 461 279 462 #define i960_soft_reset( prcb ) \ 280 { register i960 ca_PRCB*_prcb = (prcb); \281 register unsigned int 282 register unsigned int _cmd = 0x30000; \463 { register i960_PRCB *_prcb = (prcb); \ 464 register unsigned int *_next=0; \ 465 register unsigned int _cmd = I960_SOFT_RESET_COMMAND; \ 283 466 asm volatile( "lda next,%1; \ 284 467 sysctl %0,%1,%2; \ … … 288 471 } 289 472 290 static inline unsigned int i960_pend_intrs() 291 { register unsigned int _intr=0; 292 asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) ); 293 return ( _intr ); 294 } 295 296 static inline unsigned int i960_mask_intrs() 297 { register unsigned int _intr=0; 298 asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) ); 299 return( _intr ); 300 } 301 302 static inline unsigned int i960_get_fp() 303 { register unsigned int _fp=0; 304 asm volatile( "mov fp,%0" : "=d" (_fp) : "0" (_fp) ); 305 return ( _fp ); 306 } 473 #else 474 #warning "I960_SOFT_RESET_COMMAND is not defined" 475 #endif 307 476 308 477 /* -
c/src/exec/score/cpu/i960/wrap/Makefile.in
r090b1c37 r702c5f5 28 28 C_O_FILES = $(C_PIECES:%=${ARCH}/%.o) 29 29 30 H_PIECES = asm.h 30 H_PIECES = asm.h i960RP.h 31 31 H_FILES = $(H_PIECES:%=$(srcdir)/../%) 32 32 -
cpukit/score/cpu/i960/cpu.c
r090b1c37 r702c5f5 13 13 * $Id$ 14 14 */ 15 /* 16 * 1999/04/26: added support for Intel i960RP 17 */ 15 18 16 19 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 20 #elif defined(__i960RP__) 17 21 #else 18 #warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA ONLY***"22 #warning "*** ENTIRE FILE IMPLEMENTED & TESTED FOR CA & RP ONLY ***" 19 23 #warning "*** THIS FILE WILL NOT COMPILE ON ANOTHER FAMILY MEMBER ***" 20 24 #endif … … 62 66 * _CPU_ISR_install_raw_handler 63 67 */ 64 68 69 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 65 70 #define _Is_vector_caching_enabled( _prcb ) \ 66 71 ((_prcb)->control_tbl->icon & 0x2000) 72 #elif defined(__i960RP__) 73 #define _Is_vector_caching_enabled( _prcb ) \ 74 ((*((unsigned int *) ICON_ADDR)) & 0x2000) 75 #endif 67 76 68 77 void _CPU_ISR_install_raw_handler( … … 72 81 ) 73 82 { 74 i960 ca_PRCB*prcb = _CPU_Table.Prcb;83 i960_PRCB *prcb = _CPU_Table.Prcb; 75 84 proc_ptr *cached_intr_tbl = NULL; 76 85 … … 125 134 */ 126 135 136 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 127 137 #define soft_reset( prcb ) \ 128 { register i960 ca_PRCB *_prcb = (prcb); \138 { register i960_PRCB *_prcb = (prcb); \ 129 139 register unsigned32 *_next=0; \ 130 140 register unsigned32 _cmd = 0x30000; \ … … 135 145 : "0" (_cmd), "1" (_next), "2" (_prcb) ); \ 136 146 } 147 #else 148 #if defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP) 149 #define soft_reset( prcb ) \ 150 { register i960_PRCB *_prcb = (prcb); \ 151 register unsigned32 *_next=0; \ 152 register unsigned32 _cmd = 0x300; \ 153 asm volatile( "lda next,%1; \ 154 sysctl %0,%1,%2; \ 155 next: mov g0,g0" \ 156 : "=d" (_cmd), "=d" (_next), "=d" (_prcb) \ 157 : "0" (_cmd), "1" (_next), "2" (_prcb) ); \ 158 } 159 #endif 160 #endif 137 161 138 162 void _CPU_Install_interrupt_stack( void ) 139 163 { 140 i960 ca_PRCB *prcb = _CPU_Table.Prcb;164 i960_PRCB *prcb = _CPU_Table.Prcb; 141 165 unsigned32 level; 166 #if defined(__i960RP__) || defined(__i960_RP__) 167 int *isp = (int *) ISP_ADDR; 168 #endif 142 169 143 170 /* … … 150 177 prcb->intr_stack = _CPU_Interrupt_stack_low; 151 178 179 #if defined(__i960CA__) || defined(__i960_CA__) || defined(__i960CA) 152 180 soft_reset( prcb ); 181 #elif defined(__i960RP__) || defined(__i960_RP__) || defined(__i960RP) 182 *isp = prcb->intr_stack; 183 #endif 153 184 154 185 _CPU_ISR_Enable( level ); -
cpukit/score/cpu/i960/rtems/score/cpu.h
r090b1c37 r702c5f5 216 216 217 217 /* 218 * i960 is pretty tolerant of alignment. Just put things on 4 byte boundaries. 219 */ 220 221 #define CPU_ALIGNMENT 4 218 * i960 is pretty tolerant of alignment but some CPU models do 219 * better with different default aligments so we use what the 220 * CPU model selected in rtems/score/i960.h. 221 */ 222 223 #define CPU_ALIGNMENT I960_CPU_ALIGNMENT 222 224 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT 223 225 #define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT -
cpukit/score/cpu/i960/rtems/score/i960.h
r090b1c37 r702c5f5 39 39 #define CPU_MODEL_NAME "i960ca" 40 40 #define __RTEMS_I960CA__ 41 #define I960_HAS_FPU 042 41 43 42 #elif defined(__i960HA__) || defined(__i960_HA__) || defined(__i960HA) … … 45 44 #define CPU_MODEL_NAME "i960ha" 46 45 #define __RTEMS_I960HA__ 46 47 #elif defined(__i960RP__) 48 49 #include <i960RP.h> 50 #define CPU_MODEL_NAME "i960rp" 51 #define __RTEMS_I960RP__ 52 #define I960_CPU_ALIGNMENT 8 53 #define I960_SOFT_RESET_COMMAND 0x300 54 55 #else 56 57 #error "Unsupported CPU Model" 58 59 #endif 60 61 /* 62 * Now default some CPU model variation parameters 63 */ 64 65 #ifndef I960_HAS_FPU 47 66 #define I960_HAS_FPU 0 48 49 #else 50 51 #error "Unsupported CPU Model" 52 67 #endif 68 69 #ifndef I960_CPU_ALIGNMENT 70 #define I960_CPU_ALIGNMENT 4 71 #endif 72 73 #ifndef I960_SOFT_RESET_COMMAND 74 #define I960_SOFT_RESET_COMMAND 0x30000 53 75 #endif 54 76 … … 67 89 68 90 #if defined(__RTEMS_I960CA__) 91 /* 92 * Now default some CPU model variation parameters 93 */ 94 95 #ifndef I960_HAS_FPU 96 #define I960_HAS_FPU 0 97 #endif 98 99 #ifndef I960_CPU_ALIGNMENT 100 #define I960_CPU_ALIGNMENT 4 101 #endif 102 69 103 70 104 /* i960CA control structures */ … … 196 230 typedef i960ha_PRCB i960_PRCB; 197 231 198 #endif 232 #elif defined(__RTEMS_I960RP__) 233 234 /* i960RP control structures */ 235 236 /* Intel i960RP Control Table */ 237 238 typedef struct { 239 /* Control Group 0 */ 240 unsigned int rsvd00; 241 unsigned int rsvd01; 242 unsigned int rsvd02; 243 unsigned int rsvd03; 244 /* Control Group 1 */ 245 unsigned int imap0; /* interrupt map 0 */ 246 unsigned int imap1; /* interrupt map 1 */ 247 unsigned int imap2; /* interrupt map 2 */ 248 unsigned int icon; /* interrupt control */ 249 /* Control Group 2 */ 250 unsigned int pmcon0; /* memory region 0 configuration */ 251 unsigned int rsvd1; 252 unsigned int pmcon2; /* memory region 2 configuration */ 253 unsigned int rsvd2; 254 /* Control Group 3 */ 255 unsigned int pmcon4; /* memory region 4 configuration */ 256 unsigned int rsvd3; 257 unsigned int pmcon6; /* memory region 6 configuration */ 258 unsigned int rsvd4; 259 /* Control Group 4 */ 260 unsigned int pmcon8; /* memory region 8 configuration */ 261 unsigned int rsvd5; 262 unsigned int pmcon10; /* memory region 10 configuration */ 263 unsigned int rsvd6; 264 /* Control Group 5 */ 265 unsigned int pmcon12; /* memory region 12 configuration */ 266 unsigned int rsvd7; 267 unsigned int pmcon14; /* memory region 14 configuration */ 268 unsigned int rsvd8; 269 /* Control Group 6 */ 270 unsigned int rsvd9; 271 unsigned int rsvd10; 272 unsigned int tc; /* trace control */ 273 unsigned int bcon; /* bus configuration control */ 274 } i960rp_control_table; 275 276 /* Intel i960RP Processor Control Block */ 277 278 typedef struct { 279 unsigned int *fault_tbl; /* fault table base address */ 280 i960rp_control_table 281 *control_tbl; /* control table base address */ 282 unsigned int initial_ac; /* AC register initial value */ 283 unsigned int fault_config; /* fault configuration word */ 284 void **intr_tbl; /* interrupt table base address */ 285 void *sys_proc_tbl; /* system procedure table 286 base address */ 287 unsigned int reserved; /* reserved */ 288 unsigned int *intr_stack; /* interrupt stack pointer */ 289 unsigned int ins_cache_cfg; /* instruction cache 290 configuration word */ 291 unsigned int reg_cache_cfg; /* register cache configuration word */ 292 } i960rp_PRCB; 293 294 typedef i960rp_control_table i960_control_table; 295 typedef i960rp_PRCB i960_PRCB; 296 297 #else 298 #error "invalid processor selection!" 299 #endif 300 301 /* 302 * Miscellaneous Support Routines 303 */ 304 305 #define i960_reload_ctl_group( group ) \ 306 { register int _cmd = ((group)|0x400) ; \ 307 asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \ 308 } 309 310 #define i960_atomic_modify( mask, addr, prev ) \ 311 { register unsigned int _mask = (mask); \ 312 register unsigned int *_addr = (unsigned int *)(addr); \ 313 asm volatile( "atmod %0,%1,%1" \ 314 : "=d" (_addr), "=d" (_mask) \ 315 : "0" (_addr), "1" (_mask) ); \ 316 (prev) = _mask; \ 317 } 318 319 #define atomic_modify( _mask, _address, _previous ) \ 320 i960_atomic_modify( _mask, _address, _previous ) 321 322 #define i960_enable_tracing() \ 323 { register unsigned int _pc = 0x1; \ 324 asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \ 325 } 199 326 200 327 /* … … 232 359 } while ( 0 ) 233 360 234 #define i960_atomic_modify( mask, addr, prev ) \ 235 { register unsigned int _mask = (mask); \ 236 register unsigned int *_addr = (unsigned int *)(addr); \ 237 asm volatile( "atmod %0,%1,%1" \ 238 : "=d" (_addr), "=d" (_mask) \ 239 : "0" (_addr), "1" (_mask) ); \ 240 (prev) = _mask; \ 241 } 242 243 244 #define atomic_modify( _mask, _address, _previous ) \ 245 i960_atomic_modify( _mask, _address, _previous ) 246 247 #define i960_enable_tracing() \ 248 { register unsigned int _pc = 0x1; \ 249 asm volatile( "modpc 0,%0,%0" : "=d" (_pc) : "0" (_pc) ); \ 250 } 361 #define i960_cause_intr( intr ) \ 362 { register int _intr = (intr); \ 363 asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \ 364 } 365 366 /* 367 * Interrupt Masking Routines 368 */ 369 370 #if defined(__RTEMS_I960CA__) || defined(__RTEMS_I960HA__) 251 371 252 372 #define i960_unmask_intr( xint ) \ … … 267 387 } 268 388 269 #define i960_reload_ctl_group( group ) \ 270 { register int _cmd = ((group)|0x400) ; \ 271 asm volatile( "sysctl %0,%0,%0" : "=d" (_cmd) : "0" (_cmd) ); \ 272 } 273 274 #define i960_cause_intr( intr ) \ 275 { register int _intr = (intr); \ 276 asm volatile( "sysctl %0,%0,%0" : "=d" (_intr) : "0" (_intr) ); \ 277 } 389 static inline unsigned int i960_pend_intrs() 390 { register unsigned int _intr=0; 391 asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) ); 392 return ( _intr ); 393 } 394 395 static inline unsigned int i960_mask_intrs() 396 { register unsigned int _intr=0; 397 asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) ); 398 return( _intr ); 399 } 400 401 #elif defined(__RTEMS_I960RP__) 402 403 #define i960_unmask_intr( xint ) \ 404 { register unsigned int _mask= (1<<(xint)); \ 405 register unsigned int *_imsk = (int * ) IMSK_ADDR; \ 406 register unsigned int _val= *_imsk; \ 407 asm volatile( "or %0,%2,%0; \ 408 st %0,(%1)" \ 409 : "=d" (_val), "=d" (_imsk), "=d" (_mask) \ 410 : "0" (_val), "1" (_imsk), "2" (_mask) ); \ 411 } 412 413 #define i960_mask_intr( xint ) \ 414 { register unsigned int _mask= (1<<(xint)); \ 415 register unsigned int *_imsk = (int * ) IMSK_ADDR; \ 416 register unsigned int _val = *_imsk; \ 417 asm volatile( "andnot %2,%0,%0; \ 418 st %0,(%1)" \ 419 : "=d" (_val), "=d" (_imsk), "=d" (_mask) \ 420 : "0" (_val), "1" (_imsk), "2" (_mask) ); \ 421 } 422 #define i960_clear_intr( xint ) \ 423 { register unsigned int _xint=xint; \ 424 register unsigned int _mask=(1<<(xint)); \ 425 register unsigned int *_ipnd = (int * ) IPND_ADDR; \ 426 register unsigned int _rslt = 0; \ 427 asm volatile( "loop_til_cleared: mov 0, %0; \ 428 atmod %1, %2, %0; \ 429 bbs %3,%0, loop_til_cleared" \ 430 : "=d" (_rslt), "=d" (_ipnd), "=d" (_mask), "=d" (_xint) \ 431 : "0" (_rslt), "1" (_ipnd), "2" (_mask), "3" (_xint) ); \ 432 } 433 434 static inline unsigned int i960_pend_intrs() 435 { register unsigned int _intr= *(unsigned int *) IPND_ADDR; 436 /*register unsigned int *_ipnd = (int * ) IPND_ADDR; \ 437 asm volatile( "mov (%0),%1" \ 438 : "=d" (_ipnd), "=d" (_mask) \ 439 : "0" (_ipnd), "1" (_mask) ); \ */ 440 return ( _intr ); 441 } 442 443 static inline unsigned int i960_mask_intrs() 444 { register unsigned int _intr= *(unsigned int *) IMSK_ADDR; 445 /*asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) );*/ 446 return( _intr ); 447 } 448 #endif 449 450 static inline unsigned int i960_get_fp() 451 { register unsigned int _fp=0; 452 asm volatile( "mov fp,%0" : "=d" (_fp) : "0" (_fp) ); 453 return ( _fp ); 454 } 455 456 /* 457 * Soft Reset 458 */ 459 460 #if defined(I960_SOFT_RESET_COMMAND) 278 461 279 462 #define i960_soft_reset( prcb ) \ 280 { register i960 ca_PRCB*_prcb = (prcb); \281 register unsigned int 282 register unsigned int _cmd = 0x30000; \463 { register i960_PRCB *_prcb = (prcb); \ 464 register unsigned int *_next=0; \ 465 register unsigned int _cmd = I960_SOFT_RESET_COMMAND; \ 283 466 asm volatile( "lda next,%1; \ 284 467 sysctl %0,%1,%2; \ … … 288 471 } 289 472 290 static inline unsigned int i960_pend_intrs() 291 { register unsigned int _intr=0; 292 asm volatile( "mov sf0,%0" : "=d" (_intr) : "0" (_intr) ); 293 return ( _intr ); 294 } 295 296 static inline unsigned int i960_mask_intrs() 297 { register unsigned int _intr=0; 298 asm volatile( "mov sf1,%0" : "=d" (_intr) : "0" (_intr) ); 299 return( _intr ); 300 } 301 302 static inline unsigned int i960_get_fp() 303 { register unsigned int _fp=0; 304 asm volatile( "mov fp,%0" : "=d" (_fp) : "0" (_fp) ); 305 return ( _fp ); 306 } 473 #else 474 #warning "I960_SOFT_RESET_COMMAND is not defined" 475 #endif 307 476 308 477 /*
Note: See TracChangeset
for help on using the changeset viewer.