source: rtems/cpukit/score/cpu/sparc64/interrupt.S @ 7633f5b

5
Last change on this file since 7633f5b was 7633f5b, checked in by Sebastian Huber <sebastian.huber@…>, on Mar 12, 2018 at 5:59:15 AM

sparc64: Move libcpu content to cpukit

This patch is a part of the BSP source reorganization.

Update #3285.

  • Property mode set to 100644
File size: 17.7 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR).
8 *  COPYRIGHT (c) 2010. Gedare Bloom.
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.org/license/LICENSE.
13 */
14
15#include <rtems/asm.h>
16#include <rtems/score/percpu.h>
17
18
19/*
20 *  The assembler needs to be told that we know what to do with
21 *  the global registers.
22 */
23.register %g2, #scratch
24.register %g3, #scratch
25.register %g6, #scratch
26.register %g7, #scratch
27
28
29    /*
30     *  void _ISR_Handler()
31     *
32     *  This routine provides the RTEMS interrupt management.
33     *
34     *  We enter this handler from the 8 instructions in the trap table with
35     *  the following registers assumed to be set as shown:
36     *
37     *    g4 = tstate (old l0)
38     *    g2 = trap type (vector) (old l3)
39     *
40     *  NOTE: By an executive defined convention:
41     *    if trap type is between 0 and 511 it is an asynchronous trap
42     *    if trap type is between 512 and 1023 it is an asynchonous trap
43     */
44
45  .align 4
46PUBLIC(_ISR_Handler)
47  SYM(_ISR_Handler):
48
49    /*
50     * The ISR is called at TL = 1.
51     * On sun4u we use the alternate globals set.     
52     *
53     * On entry:
54     *   g4 = tstate (from trap table)
55     *   g2 = trap vector #
56     *
57     * In either case, note that trap handlers share a register window with
58     * the interrupted context, unless we explicitly enter a new window. This
59     * differs from Sparc v8, in which a dedicated register window is saved
60     * for trap handling.  This means we have to avoid overwriting any registers
61     * that we don't save.
62     *
63     */
64
65
66    /*
67     *  save some or all context on stack
68     */
69
70    /*
71     *  Save the state of the interrupted task -- especially the global
72     *  registers -- in the Interrupt Stack Frame.  Note that the ISF
73     *  includes a regular minimum stack frame which will be used if
74     *  needed by register window overflow and underflow handlers.
75     *
76     *  This is slightly wasteful, since the stack already has the window
77     *  overflow space reserved, but there is no obvious way to ensure
78     *  we can store the interrupted state and still handle window
79     *  spill/fill correctly, since there is no room for the ISF.
80     *
81     */
82
83    /* this is for debugging purposes, make sure that TL = 1, otherwise
84     * things might get dicey */
85    rdpr %tl, %g1
86    cmp %g1, 1
87    be 1f
88    nop
89
90    0: ba 0b
91    nop
92
93    1:
94    /* first store the sp of the interrupted task temporarily in g1 */
95    mov   %sp, %g1
96
97    sub     %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
98    ! make space for Stack_Frame||ISF
99
100    /* save tstate, tpc, tnpc, pil */
101    stx   %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] 
102    rdpr  %pil, %g3
103    rdpr  %tpc, %g4
104    rdpr  %tnpc, %g5
105    stx   %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET]
106    stx   %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET]
107    stx   %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET]
108    stx   %g2, [%sp + STACK_BIAS + ISF_TVEC_OFFSET]
109
110    rd  %y, %g4        ! save y
111    stx   %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET]
112
113    ! save interrupted frame's output regs
114    stx     %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET]     ! save o0
115    stx     %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET]     ! save o1
116    stx     %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET]     ! save o2
117    stx     %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET]     ! save o3
118    stx     %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET]     ! save o4
119    stx     %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET]     ! save o5
120    stx     %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET]  ! save o6/sp
121    stx     %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET]     ! save o7
122
123    mov  %g1, %o5    ! hold the old sp here for now
124    mov  %g2, %o1    ! we'll need trap # later
125
126    /* switch to TL[0] */
127    wrpr  %g0, 0, %tl
128
129    /* switch to normal globals */
130#if defined (SUN4U)
131    /* the assignment to pstate below will mask out the AG bit */
132#elif defined (SUN4V)
133    wrpr  %g0, 0, %gl
134#endif
135    /* get pstate to known state */
136    wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate
137
138    ! save globals
139    stx     %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET]     ! save g1
140    stx     %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET]     ! save g2
141    stx     %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET]     ! save g3
142    stx     %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET]     ! save g4
143    stx     %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET]     ! save g5
144    stx     %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET]     ! save g6
145    stx     %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET]     ! save g7
146
147
148  mov  %o1, %g2  ! get the trap #
149  mov  %o5, %g7  ! store the interrupted %sp (preserve)
150  mov  %sp, %o1  ! 2nd arg to ISR Handler = address of ISF
151  add  %o1, STACK_BIAS, %o1 ! need to adjust for stack bias, 2nd arg = ISF
152
153  /*
154   *  Increment ISR nest level and Thread dispatch disable level.
155   *
156   *  Register usage for this section: (note, these are used later)
157   *
158   *    g3 = _Thread_Dispatch_disable_level pointer
159   *    g5 = _Thread_Dispatch_disable_level value (uint32_t)
160   *    g6 = _ISR_Nest_level pointer
161   *    g4 = _ISR_Nest_level value (uint32_t)
162   *    o5 = temp
163   *
164   *  NOTE: It is assumed that g6 - g7 will be preserved until the ISR
165   *        nest and thread dispatch disable levels are unnested.
166   */
167
168  setx  THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
169  lduw  [%g3], %g5
170  setx  ISR_NEST_LEVEL, %o5, %g6
171  lduw  [%g6], %g4
172
173  add      %g5, 1, %g5
174  stuw     %g5, [%g3]
175
176  add      %g4, 1, %g4
177  stuw     %g4, [%g6]
178
179  /*
180   *  If ISR nest level was zero (now 1), then switch stack.
181   */
182
183  subcc    %g4, 1, %g4             ! outermost interrupt handler?
184  bnz      dont_switch_stacks      ! No, then do not switch stacks
185
186  setx  SYM(INTERRUPT_STACK_HIGH), %o5, %g1
187  ldx  [%g1], %sp
188
189  /*
190   * Adjust the stack for the stack bias
191   */
192  sub     %sp, STACK_BIAS, %sp
193
194  /*
195   *  Make sure we have a place on the stack for the window overflow
196   *  trap handler to write into.  At this point it is safe to
197   *  enable traps again.
198   */
199
200  sub      %sp, SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp
201
202  dont_switch_stacks:
203  /*
204   *  Check if we have an external interrupt (trap 0x41 - 0x4f). If so,
205   *  set the PIL to mask off interrupts with lower priority.
206   *
207   *  The original PIL is not modified since it will be restored
208   *  when the interrupt handler returns.
209   */
210
211  and      %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]?
212
213  subcc    %g1, 0x41, %g0
214  bl       dont_fix_pil
215  subcc    %g1, 0x4f, %g0
216  bg       dont_fix_pil
217  nop
218  wrpr     %g0, %g1, %pil
219
220  dont_fix_pil:
221  /* We need to be careful about enabling traps here.
222   *
223   * We already stored off the tstate, tpc, and tnpc, and switched to
224   * TL = 0, so it should be safe.
225   */
226
227  /* zero out g4 so that ofw calls work */
228  mov  %g0, %g4
229
230  ! **** ENABLE TRAPS ****
231  wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
232    SPARC_PSTATE_IE_MASK, %pstate
233
234    /*
235     *  Vector to user's handler.
236     *
237     *  NOTE: TBR may no longer have vector number in it since
238     *        we just enabled traps.  It is definitely in g2.
239     */
240    setx  SYM(_ISR_Vector_table), %o5, %g1
241    and      %g2, 0x1FF, %o5        ! remove synchronous trap indicator
242    sll      %o5, 3, %o5            ! o5 = offset into table
243    ldx      [%g1 + %o5], %g1       ! g1 = _ISR_Vector_table[ vector ]
244
245
246    ! o1 = 2nd arg = address of the ISF
247    !   WAS LOADED WHEN ISF WAS SAVED!!!
248    mov      %g2, %o0               ! o0 = 1st arg = vector number
249    call     %g1, 0
250    nop                             ! delay slot
251
252    /*
253     *  Redisable traps so we can finish up the interrupt processing.
254     *  This is a conservative place to do this.
255     */
256    ! **** DISABLE TRAPS ****
257    wrpr  %g0, SPARC_PSTATE_PRIV_MASK, %pstate
258
259    /*
260     * We may safely use any of the %o and %g registers, because
261     * we saved them earlier (and any other interrupt that uses
262     * them will also save them).  Right now, the state of those
263     * registers are as follows:
264     *  %o registers: unknown (user's handler may have destroyed)
265     *  %g1,g4,g5: scratch
266     *  %g2: unknown: was trap vector
267     *  %g3: uknown: was _Thread_Dispatch_Disable_level pointer
268     *  %g6: _ISR_Nest_level
269     *  %g7: interrupted task's sp
270     */
271
272    /*
273     *  Increment ISR nest level and Thread dispatch disable level.
274     *
275     *  Register usage for this section: (note: as used above)
276     *
277     *    g3 = _Thread_Dispatch_disable_level pointer
278     *    g5 = _Thread_Dispatch_disable_level value
279     *    g6 = _ISR_Nest_level pointer
280     *    g4 = _ISR_Nest_level value
281     *    o5 = temp
282     */
283
284    /* We have to re-load the values from memory, because there are
285     * not enough registers that we know will be preserved across the
286     * user's handler. If this is a problem, we can create a register
287     * window for _ISR_Handler.
288     */
289
290    setx  THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
291    lduw  [%g3],%g5
292    lduw  [%g6],%g4
293    sub   %g5, 1, %g5
294    stuw  %g5, [%g3]
295    sub   %g4, 1, %g4
296    stuw  %g4, [%g6]
297
298    orcc  %g4, %g0, %g0           ! ISRs still nested?
299    bnz   dont_restore_stack      ! Yes then don't restore stack yet
300    nop
301
302    /*
303     *  This is the outermost interrupt handler. Need to get off the
304     *  CPU Interrupt Stack and back to the tasks stack.
305     *
306     *  The following subtract should get us back on the interrupted
307     *  tasks stack and add enough room to invoke the dispatcher.
308     *  When we enable traps, we are mostly back in the context
309     *  of the task and subsequent interrupts can operate normally.
310     *
311     *  Now %sp points to the bottom of the ISF.
312     * 
313     */
314
315    sub      %g7,   CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
316
317    dont_restore_stack:
318
319    /*
320     *  If dispatching is disabled (includes nested interrupt case),
321     *  then do a "simple" exit.
322     */
323
324    orcc     %g5, %g0, %g0   ! Is dispatching disabled?
325    bnz      simple_return   ! Yes, then do a "simple" exit
326    ! NOTE: Use the delay slot
327    mov      %g0, %g4  ! clear g4 for ofw
328
329    ! Are we dispatching from a previous ISR in the interrupted thread?
330    setx  SYM(_CPU_ISR_Dispatch_disable), %o5, %g5
331    lduw     [%g5], %o5
332    orcc     %o5, %g0, %g0   ! Is this thread already doing an ISR?
333    bnz      simple_return   ! Yes, then do a "simple" exit
334    nop
335
336    setx    DISPATCH_NEEDED, %o5, %g7
337
338
339    /*
340     *  If a context switch is necessary, then do fudge stack to
341     *  return to the interrupt dispatcher.
342     */
343
344    ldub     [%g7], %o5
345
346    orcc     %o5, %g0, %g0   ! Is thread switch necessary?
347    bz       simple_return   ! no, then do a simple return. otherwise fallthru
348    nop
349
350    /*
351     *  Invoke interrupt dispatcher.
352     */
353PUBLIC(_ISR_Dispatch)
354  SYM(_ISR_Dispatch):
355    ! Set ISR dispatch nesting prevention flag
356      mov      1, %o1
357      setx     SYM(_CPU_ISR_Dispatch_disable), %o5, %o2
358      stuw     %o1, [%o2]
359
360
361      !  **** ENABLE TRAPS ****
362      wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
363        SPARC_PSTATE_IE_MASK, %pstate
364        isr_dispatch:
365        call    SYM(_Thread_Dispatch), 0
366        nop
367
368        /*
369         *  We invoked _Thread_Dispatch in a state similar to the interrupted
370         *  task.  In order to safely be able to tinker with the register
371         *  windows and get the task back to its pre-interrupt state,
372         *  we need to disable interrupts.
373         */
374      mov   2, %g4        ! syscall (disable interrupts)
375      ta    0             ! syscall (disable interrupts)
376      mov   0, %g4
377
378  /*
379   *  While we had ISR dispatching disabled in this thread,
380   *  did we miss anything.  If so, then we need to do another
381   *  _Thread_Dispatch before leaving this ISR Dispatch context.
382   */
383
384  setx     DISPATCH_NEEDED, %o5, %o1
385  ldub     [%o1], %o2
386
387  orcc     %o2, %g0, %g0   ! Is thread switch necessary?
388  bz       allow_nest_again ! No, then clear out and return
389  nop
390
391  ! Yes, then invoke the dispatcher
392dispatchAgain:
393  mov      3, %g4        ! syscall (enable interrupts)
394  ta       0             ! syscall (enable interrupts)
395  ba       isr_dispatch
396  mov      0, %g4
397
398  allow_nest_again:
399
400  ! Zero out ISR stack nesting prevention flag
401  setx    SYM(_CPU_ISR_Dispatch_disable), %o5, %o1
402  stuw    %g0,[%o1]
403
404  /*
405   *  The CWP in place at this point may be different from
406   *  that which was in effect at the beginning of the ISR if we
407   *  have been context switched between the beginning of this invocation
408   *  of _ISR_Handler and this point.  Thus the CWP and WIM should
409   *  not be changed back to their values at ISR entry time.  Any
410   *  changes to the PSR must preserve the CWP.
411   */
412
413  simple_return:
414  flushw          ! get register windows to a 'clean' state
415
416  ! **** DISABLE TRAPS ****
417  wrpr    %g0, SPARC_PSTATE_PRIV_MASK, %pstate
418
419  ldx     [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1      ! restore y
420  wr      %o1, 0, %y
421
422  ldx  [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
423
424! see if cwp is proper (tstate.cwp == cwp)
425  and  %g1, 0x1F, %g6
426  rdpr  %cwp, %g7
427  cmp  %g6, %g7
428  bz  good_window
429  nop
430
431  /*
432   * Fix the CWP. Need the cwp to be the proper cwp that
433   * gets restored when returning from the trap via retry/done. Do
434   * this before reloading the task's output regs. Basically fake a
435   * window spill/fill.
436   *
437   * Is this necessary on sun4v? Why not just re-write
438   * tstate.cwp to be equal to the current cwp?
439   */
440  mov  %sp, %g1
441  stx  %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET]
442  stx  %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET]
443  stx  %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET]
444  stx  %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET]
445  stx  %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET]
446  stx  %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET]
447  stx  %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET]
448  stx  %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET]
449  stx  %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET]
450  stx  %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET]
451  stx  %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET]
452  stx  %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET]
453  stx  %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET]
454  stx  %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET]
455  stx  %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET]
456  stx  %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET]
457  wrpr  %g0, %g6, %cwp
458  mov  %g1, %sp
459  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0
460  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1
461  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2
462  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3
463  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4
464  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5
465  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6
466  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7
467  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0
468  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1
469  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2
470  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3
471  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4
472  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5
473  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
474  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7
475
476
477  good_window:
478
479
480  /*
481   *  Restore tasks global and out registers
482   */
483
484  ldx     [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1    ! restore g1
485  ldx     [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2    ! restore g2
486  ldx     [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3    ! restore g3
487  ldx     [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4    ! restore g4
488  ldx     [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5    ! restore g5
489  ldx     [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6    ! restore g6
490  ldx     [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7    ! restore g7
491
492  ! Assume the interrupted context is in TL 0 with GL 0 / normal globals.
493  ! When tstate is restored at done/retry, the interrupted context is restored.
494  ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC
495  wrpr  %g0, 1, %tl
496
497  ! return to GL=1 or AG
498#if defined(SUN4U)
499    rdpr  %pstate, %o1
500    or  %o1, SPARC_PSTATE_AG_MASK, %o1
501    wrpr  %o1, %g0, %pstate                 ! go to AG.
502#elif defined(SUN4V)
503  wrpr  %g0, 1, %gl
504#endif
505
506! now we can use global registers (at gl=1 or AG)
507  ldx   [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3
508  ldx   [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4
509  ldx   [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5
510  ldx   [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
511  ldx   [%sp + STACK_BIAS + ISF_TVEC_OFFSET], %g2
512  wrpr  %g0, %g3, %pil
513  wrpr  %g0, %g4, %tpc
514  wrpr  %g0, %g5, %tnpc
515
516  wrpr    %g0, %g1, %tstate
517
518  ldx     [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0    ! restore o0
519  ldx     [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1    ! restore o1
520  ldx     [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2    ! restore o2
521  ldx     [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3    ! restore o3
522  ldx     [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4    ! restore o4
523  ldx     [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5    ! restore o5
524  ! sp is restored later
525  ldx     [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7    ! restore o7
526
527  ldx     [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp
528
529  /*
530   *  Determine whether to re-execute the trapping instruction
531   *  (asynchronous trap) or to skip the trapping instruction
532   *  (synchronous trap).
533   */
534
535  andcc   %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
536  ! Is this a synchronous trap?
537  be  not_synch             ! No, then skip trapping instruction
538  mov  0, %g4
539  retry        ! re-execute trapping instruction
540  not_synch:
541  done        ! skip trapping instruction
542
543/* end of file */
Note: See TracBrowser for help on using the repository browser.