source: rtems/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c @ 05e2e4c

4.104.114.84.95
Last change on this file since 05e2e4c was 05e2e4c, checked in by Till Straumann <strauman@…>, on 07/13/06 at 00:29:06
  • mpc6xx/mmu/bat.c, mpc6xx/mmu/pte121.c, shared/src/cache.c: Checked inline assembly code; added 'm' operands and paranoia 'memory' clobbers. Also, made sure that no pure input operands are modified by the asm.
  • Property mode set to 100644
File size: 29.2 KB
Line 
1/* $Id$ */
2
3/* Trivial page table setup for RTEMS
4 * Purpose: allow write protection of text/ro-data
5 *
6 * Author: Till Straumann <strauman@slac.stanford.edu>, 4/2002
7 */
8
9/* Chose debugging options */
10#undef  DEBUG_MAIN              /* create a standalone (host) program for basic testing */
11#undef  DEBUG                   /* target debugging and consistency checking */
12#undef  DEBUG_EXC               /* add exception handler which reenables BAT0 and recovers from a page fault */
13
14#ifdef  DEBUG_MAIN
15#undef  DEBUG                   /* must not use these together with DEBUG_MAIN */
16#undef  DEBUG_EXC
17#endif
18
19/***************************** INCLUDE HEADERS ****************************/
20
21#ifndef DEBUG_MAIN
22#include <rtems.h>
23#include <rtems/bspIo.h>
24#include <libcpu/cpuIdent.h>
25#include <libcpu/spr.h>
26#ifdef  DEBUG_EXC
27#include <bsp.h>
28#include <bsp/vectors.h>
29#include <libcpu/raw_exception.h>
30#endif
31#endif
32
33#include <stdio.h>
34#include <assert.h>
35#include <string.h>
36
37#include "pte121.h"
38
39/************************** CONSTANT DEFINITIONS **************************/
40
41/* Base 2 logs of some sizes */
42
43#ifndef DEBUG_MAIN
44
45#define LD_PHYS_SIZE    32      /* physical address space */
46#define LD_PG_SIZE              12      /* page size */
47#define LD_PTEG_SIZE    6       /* PTEG size */
48#define LD_PTE_SIZE             3       /* PTE size  */
49#define LD_SEG_SIZE             28      /* segment size */
50#define LD_MIN_PT_SIZE  16      /* minimal size of a page table */
51#define LD_HASH_SIZE    19      /* lengh of a hash */
52#define LD_VSID_SIZE    24      /* vsid bits in seg. register */
53
54#else /* DEBUG_MAIN */
55
56/* Reduced 'fantasy' sizes for testing */
57#define LD_PHYS_SIZE    32      /* physical address space */
58#define LD_PG_SIZE              6       /* page size */
59#define LD_PTEG_SIZE    5       /* PTEG size */
60#define LD_PTE_SIZE             3       /* PTE size  */
61#define LD_SEG_SIZE             28      /* segment size */
62#define LD_MIN_PT_SIZE  7       /* minimal size of a page table */
63#define LD_HASH_SIZE    19      /* lengh of a hash */
64
65#endif /* DEBUG_MAIN */
66
67/* Derived sizes */
68
69/* Size of a page index */
70#define LD_PI_SIZE              ((LD_SEG_SIZE) - (LD_PG_SIZE))
71
72/* Number of PTEs in a PTEG */
73#define PTE_PER_PTEG    (1<<((LD_PTEG_SIZE)-(LD_PTE_SIZE)))
74
75/* Segment register bits */
76#define KEY_SUP                 (1<<30) /* supervisor mode key */
77#define KEY_USR                 (1<<29) /* user mode key */
78
79/* The range of effective addresses to scan with 'tlbie'
80 * instructions in order to flush all TLBs.
81 * On the 750 and 7400, there are 128 two way I and D TLBs,
82 * indexed by EA[14:19]. Hence calling
83 *   tlbie rx
84 * where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
85 * is sufficient to do the job
86 */
87#define NUM_TLB_PER_WAY 64      /* 750 and 7400 have 128 two way TLBs */
88#define FLUSH_EA_RANGE  (NUM_TLB_PER_WAY<<LD_PG_SIZE)
89
90/*************************** MACRO DEFINITIONS ****************************/
91
92/* Macros to split a (32bit) 'effective' address into
93 * VSID (virtual segment id) and PI (page index)
94 * using a 1:1 mapping of 'effective' to 'virtual'
95 * addresses.
96 *
97 * For 32bit addresses this looks like follows
98 * (each 'x' or '0' stands for a 'nibble' [4bits]):
99 *
100 *         32bit effective address (EA)
101 *
102 *              x x x x x x x x
103 *               |       |
104 *    0 0 0 0 0 x|x x x x|x x x
105 *       VSID    |  PI   |  PO (page offset)
106 *               |       |
107 */
108/* 1:1 VSID of an EA  */
109#define VSID121(ea) (((ea)>>LD_SEG_SIZE) & ((1<<(LD_PHYS_SIZE-LD_SEG_SIZE))-1))
110/* page index of an EA */
111#define PI121(ea)       (((ea)>>LD_PG_SIZE) & ((1<<LD_PI_SIZE)-1))
112
113/* read VSID from segment register */
114#ifndef DEBUG_MAIN
115static uint32_t
116seg2vsid (uint32_t ea)
117{
118  asm volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
119  return ea & ((1 << LD_VSID_SIZE) - 1);
120}
121#else
122#define seg2vsid(ea) VSID121(ea)
123#endif
124
125/* Primary and secondary PTE hash functions */
126
127/* Compute the primary hash from a VSID and a PI */
128#define PTE_HASH1(vsid, pi) (((vsid)^(pi))&((1<<LD_HASH_SIZE)-1))
129
130/* Compute the secondary hash from a primary hash */
131#define PTE_HASH2(hash1) ((~(hash1))&((1<<LD_HASH_SIZE)-1))
132
133/* Extract the abbreviated page index (which is the
134 * part of the PI which does not go into the hash
135 * under all circumstances [10 bits to -> 6bit API])
136 */
137#define API(pi) ((pi)>>((LD_MIN_PT_SIZE)-(LD_PTEG_SIZE)))
138
139
140/* Horrible Macros */
141#ifdef __rtems__
142/* must not use printf until multitasking is up */
143typedef void (*PrintF) (char *, ...);
144static PrintF
145whatPrintf (void)
146{
147  return _Thread_Executing ? (PrintF) printf : printk;
148}
149
150#define PRINTF(args...) ((void)(whatPrintf())(args))
151#else
152#define PRINTF(args...) printf(args)
153#endif
154
155#ifdef DEBUG
156unsigned long triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expect);
157
158static int consistencyPass = 0;
159#define CONSCHECK(expect) triv121PgTblConsistency(&pgTbl,consistencyPass++,(expect))
160#else
161#define CONSCHECK(expect) do {} while (0)
162#endif
163
164/**************************** TYPE DEFINITIONS ****************************/
165
166/* internal description of a trivial page table */
167typedef struct Triv121PgTblRec_
168{
169  APte base;
170  unsigned long size;
171  int active;
172} Triv121PgTblRec;
173
174
175/************************** FORWARD DECLARATIONS *************************/
176
177#ifdef DEBUG_EXC
178static void myhdl (BSP_Exception_frame * excPtr);
179#endif
180
181static void dumpPte (APte pte);
182
183#ifdef DEBUG
184static void
185dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash);
186#endif
187
188unsigned long
189triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end);
190
191static void do_dssall ();
192
193/**************************** STATIC VARIABLES ****************************/
194
195/* dont malloc - we might have to use this before
196 * we have malloc or even RTEMS workspace available
197 */
198static Triv121PgTblRec pgTbl = { 0 };
199
200#ifdef DEBUG_EXC
201static void *ohdl;              /* keep a pointer to the original handler */
202#endif
203
204/*********************** INLINES & PRIVATE ROUTINES ***********************/
205
206/* compute the page table entry group (PTEG) of a hash */
207static inline APte
208ptegOf (Triv121PgTbl pt, unsigned long hash)
209{
210  hash &= ((1 << LD_HASH_SIZE) - 1);
211  return (APte) (((unsigned long) pt->
212                  base) | ((hash << LD_PTEG_SIZE) & (pt->size - 1)));
213}
214
215/* see if a vsid/pi combination is already mapped
216 *
217 * RETURNS: PTE of mapping / NULL if none exists
218 *
219 * NOTE: a vsid<0 is legal and will tell this
220 *       routine that 'pi' is actually an EA to
221 *       be split into vsid and pi...
222 */
223static APte
224alreadyMapped (Triv121PgTbl pt, long vsid, unsigned long pi)
225{
226  int i;
227  unsigned long hash, api;
228  APte pte;
229
230  if (!pt->size)
231    return 0;
232
233  if (TRIV121_121_VSID == vsid) {
234    vsid = VSID121 (pi);
235    pi = PI121 (pi);
236  } else if (TRIV121_SEG_VSID == vsid) {
237    vsid = seg2vsid (pi);
238    pi = PI121 (pi);
239  }
240
241  hash = PTE_HASH1 (vsid, pi);
242  api = API (pi);
243  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
244    if (pte->v && pte->vsid == vsid && pte->api == api && 0 == pte->h)
245      return pte;
246  /* try the secondary hash table */
247  hash = PTE_HASH2 (hash);
248  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
249    if (pte->v && pte->vsid == vsid && pte->api == api && 1 == pte->h)
250      return pte;
251  return 0;
252}
253
254/* find the first available slot for  vsid/pi
255 *
256 * NOTE: it is NOT legal to pass a vsid<0 / EA combination.
257 *
258 * RETURNS free slot with the 'marked' field set. The 'h'
259 *         field is set to 0 or one, depending on whether
260 *         the slot was allocated by using the primary or
261 *         the secondary hash, respectively.
262 */
263static APte
264slotFor (Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
265{
266  int i;
267  unsigned long hash, api;
268  APte pte;
269
270  /* primary hash */
271  hash = PTE_HASH1 (vsid, pi);
272  api = API (pi);
273  /* linear search thru all buckets for this hash */
274  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
275    if (!pte->v && !pte->marked) {
276      /* found a free PTE; mark it as potentially used and return */
277      pte->h = 0;               /* found by the primary hash fn */
278      pte->marked = 1;
279      return pte;
280    }
281  }
282
283#ifdef DEBUG
284  /* Strange: if the hash table was allocated big enough,
285   *          this should not happen (when using a 1:1 mapping)
286   *          Give them some information...
287   */
288  PRINTF ("## First hash bucket full - ");
289  dumpPteg (vsid, pi, hash);
290#endif
291
292  hash = PTE_HASH2 (hash);
293#ifdef DEBUG
294  PRINTF ("   Secondary pteg is 0x%08x\n", (unsigned) ptegOf (pt, hash));
295#endif
296  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
297    if (!pte->v && !pte->marked) {
298      /* mark this pte as potentially used */
299      pte->marked = 1;
300      pte->h = 1;
301      return pte;
302    }
303  }
304#ifdef DEBUG
305  /* Even more strange - most likely, something is REALLY messed up */
306  PRINTF ("## Second hash bucket full - ");
307  dumpPteg (vsid, pi, hash);
308#endif
309  return 0;
310}
311
312/* unmark all entries */
313static void
314unmarkAll (Triv121PgTbl pt)
315{
316  unsigned long n = pt->size / sizeof (PTERec);
317  unsigned long i;
318  APte pte;
319  for (i = 0, pte = pt->base; i < n; i++, pte++)
320    pte->marked = 0;
321
322}
323
324/* calculate the minimal size of a page/hash table
325 * to map a range of 'size' bytes in EA space.
326 *
327 * RETURNS: size in 'number of bits', i.e. the
328 *          integer part of LOGbase2(minsize)
329 *          is returned.
330 * NOTE:        G3/G4 machines need at least 16 bits
331 *          (64k).
332 */
333unsigned long
334triv121PgTblLdMinSize (unsigned long size)
335{
336  unsigned long i;
337  /* round 'size' up to the next page boundary */
338  size += (1 << LD_PG_SIZE) - 1;
339  size &= ~((1 << LD_PG_SIZE) - 1);
340  /* divide by number of PTEs  and multiply
341   * by the size of a PTE.
342   */
343  size >>= LD_PG_SIZE - LD_PTE_SIZE;
344  /* find the next power of 2 >= size */
345  for (i = 0; i < LD_PHYS_SIZE; i++) {
346    if ((1 << i) >= size)
347      break;
348  }
349  /* pop up to the allowed minimum, if necessary */
350  if (i < LD_MIN_PT_SIZE)
351    i = LD_MIN_PT_SIZE;
352  return i;
353}
354
355/* initialize a trivial page table of 2^ldSize bytes
356 * at 'base' in memory.
357 *
358 * RETURNS:     OPAQUE HANDLE (not the hash table address)
359 *          or NULL on failure.
360 */
361Triv121PgTbl
362triv121PgTblInit (unsigned long base, unsigned ldSize)
363{
364  if (pgTbl.size) {
365    /* already initialized */
366    return 0;
367  }
368
369  if (ldSize < LD_MIN_PT_SIZE)
370    return 0;                   /* too small */
371
372  if (base & ((1 << ldSize) - 1))
373    return 0;                   /* misaligned */
374
375  /* This was tested on 604r, 750 and 7400.
376   * On other CPUs, verify that the TLB invalidation works
377   * for a new CPU variant and that it has hardware PTE lookup/
378   * TLB replacement before adding it to this list.
379   *
380   * NOTE: The 603 features no hardware PTE lookup - and
381   *       hence the page tables should NOT be used.
382   *               Although lookup could be implemented in
383   *               software this is probably not desirable
384   *               as it could have an impact on hard realtime
385   *               performance, screwing deterministic latency!
386   *               (Could still be useful for debugging, though)
387   */
388  if (   PPC_604 != current_ppc_cpu
389      && PPC_604e != current_ppc_cpu
390      && PPC_604r != current_ppc_cpu
391      && PPC_750  != current_ppc_cpu
392      && PPC_7400 != current_ppc_cpu
393      && PPC_7455 != current_ppc_cpu
394      && PPC_7457 != current_ppc_cpu
395     )
396    return 0;                   /* unsupported by this CPU */
397
398  pgTbl.base = (APte) base;
399  pgTbl.size = 1 << ldSize;
400  /* clear all page table entries */
401  memset (pgTbl.base, 0, pgTbl.size);
402
403  CONSCHECK (0);
404
405  /* map the page table itself 'm' and 'readonly' */
406  if (triv121PgTblMap (&pgTbl,
407                       TRIV121_121_VSID,
408                       base,
409                       (pgTbl.size >> LD_PG_SIZE),
410                       TRIV121_ATTR_M, TRIV121_PP_RO_PAGE) >= 0)
411    return 0;
412
413  CONSCHECK ((pgTbl.size >> LD_PG_SIZE));
414
415  return &pgTbl;
416}
417
418/* return the handle of the (one and only) page table
419 * or NULL if none has been initialized yet.
420 */
421Triv121PgTbl
422triv121PgTblGet (void)
423{
424  return pgTbl.size ? &pgTbl : 0;
425}
426
427/* NOTE: this routine returns -1 on success;
428 *       on failure, the page table index for
429 *       which no PTE could be allocated is returned
430 *
431 * (Consult header about argument/return value
432 * description)
433 */
434long
435triv121PgTblMap (Triv121PgTbl pt,
436                 long ovsid,
437                 unsigned long start,
438                 unsigned long numPages,
439                 unsigned attributes, unsigned protection)
440{
441  int i, pass;
442  unsigned long pi;
443  APte pte;
444  long vsid;
445#ifdef DEBUG
446  long saved_vsid = ovsid;
447#endif
448
449  if (TRIV121_121_VSID == ovsid) {
450    /* use 1:1 mapping */
451    ovsid = VSID121 (start);
452  } else if (TRIV121_SEG_VSID == ovsid) {
453    ovsid = seg2vsid (start);
454  }
455
456#ifdef DEBUG
457  PRINTF ("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
458          (unsigned) numPages, (unsigned) numPages,
459          (unsigned) start, (unsigned) ovsid);
460#endif
461
462  /* map in two passes. During the first pass, we try
463   * to claim entries as needed. The 'slotFor()' routine
464   * will 'mark' the claimed entries without 'valid'ating
465   * them.
466   * If the mapping fails, all claimed entries are unmarked
467   * and we return the PI for which allocation failed.
468   *
469   * Once we know that the allocation would succeed, we
470   * do a second pass; during the second pass, the PTE
471   * is actually written.
472   *
473   */
474  for (pass = 0; pass < 2; pass++) {
475    /* check if we would succeed during the first pass */
476    for (i = 0, pi = PI121 (start), vsid = ovsid; i < numPages; i++, pi++) {
477      if (pi >= 1 << LD_PI_SIZE) {
478        vsid++;
479        pi = 0;
480      }
481      /* leave alone existing mappings for this EA */
482      if (!alreadyMapped (pt, vsid, pi)) {
483        if (!(pte = slotFor (pt, vsid, pi))) {
484          /* no free slot found for page index 'pi' */
485          unmarkAll (pt);
486          return pi;
487        } else {
488          /* have a free slot; marked by slotFor() */
489          if (pass) {
490            /* second pass; do the real work */
491            pte->vsid = vsid;
492            /* H was set by slotFor() */
493            pte->api = API (pi);
494            /* set up 1:1 mapping */
495            pte->rpn =
496              ((((unsigned long) vsid) &
497                ((1 << (LD_PHYS_SIZE - LD_SEG_SIZE)) -
498                 1)) << LD_PI_SIZE) | pi;
499            pte->wimg = attributes & 0xf;
500            pte->pp = protection & 0x3;
501            /* mark it valid */
502            pte->marked = 0;
503            if (pt->active) {
504              uint32_t flags;
505              rtems_interrupt_disable (flags);
506              /* order setting 'v' after writing everything else */
507              asm volatile ("eieio"::"m"(*pte));
508              pte->v = 1;
509              asm volatile ("sync"::"m"(*pte));
510              rtems_interrupt_enable (flags);
511            } else {
512              pte->v = 1;
513            }
514
515#ifdef DEBUG
516            /* add paranoia */
517            assert (alreadyMapped (pt, vsid, pi) == pte);
518#endif
519          }
520        }
521      }
522    }
523    unmarkAll (pt);
524  }
525#ifdef DEBUG
526  {
527    unsigned long failedat;
528    CONSCHECK (-1);
529    /* double check that the requested range is mapped */
530    failedat =
531      triv121IsRangeMapped (saved_vsid, start,
532                            start + (1 << LD_PG_SIZE) * numPages);
533    if (0x0C0C != failedat) {
534      PRINTF ("triv121 mapping failed at 0x%08x\n", (unsigned) failedat);
535      return PI121 (failedat);
536    }
537  }
538#endif
539  return TRIV121_MAP_SUCCESS;   /* -1 !! */
540}
541
542unsigned long
543triv121PgTblSDR1 (Triv121PgTbl pt)
544{
545  return (((unsigned long) pt->base) & ~((1 << LD_MIN_PT_SIZE) - 1)) |
546    (((pt->size - 1) >> LD_MIN_PT_SIZE) &
547     ((1 << (LD_HASH_SIZE - (LD_MIN_PT_SIZE - LD_PTEG_SIZE))) - 1)
548    );
549}
550
551void
552triv121PgTblActivate (Triv121PgTbl pt)
553{
554#ifndef DEBUG_MAIN
555  unsigned long          sdr1 = triv121PgTblSDR1 (pt);
556  register unsigned long tmp0 = 16;     /* initial counter value (#segment regs) */
557  register unsigned long tmp1 = (KEY_USR | KEY_SUP);
558  register unsigned long tmp2 = (MSR_EE | MSR_IR | MSR_DR);
559#endif
560  pt->active = 1;
561
562#ifndef DEBUG_MAIN
563#ifdef DEBUG_EXC
564  /* install our exception handler */
565  ohdl = globalExceptHdl;
566  globalExceptHdl = myhdl;
567  __asm__ __volatile__ ("sync"::"memory");
568#endif
569
570  /* This section of assembly code takes care of the
571   * following:
572   * - get MSR and switch interrupts + MMU off
573   *
574   * - load up the segment registers with a
575   *   1:1 effective <-> virtual mapping;
576   *   give user & supervisor keys
577   *
578   * - flush all TLBs;
579   *   NOTE: the TLB flushing code is probably
580   *         CPU dependent!
581   *
582   * - setup SDR1
583   *
584   * - restore original MSR
585   */
586  __asm__ __volatile (
587    "   mtctr   %[tmp0]\n"
588    /* Get MSR and switch interrupts off - just in case.
589     * Also switch the MMU off; the book
590     * says that SDR1 must not be changed with either
591     * MSR_IR or MSR_DR set. I would guess that it could
592     * be safe as long as the IBAT & DBAT mappings override
593     * the page table...
594     */
595    "   mfmsr   %[tmp0]\n"
596    "   andc    %[tmp2], %[tmp0], %[tmp2]\n"
597    "   mtmsr   %[tmp2]\n"
598    "   isync   \n"
599    /* set up the segment registers */
600    "   li              %[tmp2], 0\n"
601    "1: mtsrin  %[tmp1], %[tmp2]\n"
602    "   addis   %[tmp2], %[tmp2], 0x1000\n" /* address next SR */
603    "   addi    %[tmp1], %[tmp1], 1\n"      /* increment VSID  */
604    "   bdnz    1b\n"
605    /* Now flush all TLBs, starting with the topmost index */
606    "   lis             %[tmp2], %[ea_range]@h\n"
607    "2: addic.  %[tmp2], %[tmp2], -%[pg_sz]\n"    /* address the next one (decrementing) */
608    "   tlbie   %[tmp2]\n"             /* invalidate & repeat */
609    "   bgt             2b\n"
610    "   eieio   \n"
611    "   tlbsync \n"
612    "   sync    \n"
613    /* set up SDR1 */
614    "   mtspr   %[sdr1], %[sdr1val]\n"
615    /* restore original MSR  */
616    "   mtmsr   %[tmp0]\n"
617    "   isync   \n"
618      :[tmp0]"+r&"(tmp0), [tmp1]"+b&"(tmp1), [tmp2]"+b&"(tmp2)
619      :[ea_range]"i"(FLUSH_EA_RANGE), [pg_sz]"i" (1 << LD_PG_SIZE),
620       [sdr1]"i"(SDR1), [sdr1val]"r" (sdr1)
621      :"ctr", "cc", "memory"
622  );
623
624  /* At this point, BAT0 is probably still active; it's the
625   * caller's job to deactivate it...
626   */
627#endif
628}
629
630/**************************  DEBUGGING ROUTINES  *************************/
631
632/* Exception handler to catch page faults */
633#ifdef DEBUG_EXC
634
635#define BAT_VALID_BOTH  3       /* allow user + super access */
636
637static void
638myhdl (BSP_Exception_frame * excPtr)
639{
640  if (3 == excPtr->_EXC_number) {
641    unsigned long dsisr;
642
643    /* reactivate DBAT0 and read DSISR */
644    __asm__ __volatile__ (
645      "mfspr %0, %1   \n"
646      "ori   %0, %0, 3\n"
647      "mtspr %1, %0   \n"
648      "sync\n"
649      "mfspr %0, %2\n"
650        :"=&r" (dsisr)
651        :"i" (DBAT0U), "i" (DSISR), "i" (BAT_VALID_BOTH)
652    );
653
654    printk ("Data Access Exception (DSI) # 3\n");
655    printk ("Reactivated DBAT0 mapping\n");
656
657
658    printk ("DSISR 0x%08x\n", dsisr);
659
660    printk ("revectoring to prevent default handler panic().\n");
661    printk ("NOTE: exception number %i below is BOGUS\n", ASM_DEC_VECTOR);
662    /* make this exception 'recoverable' for
663     * the default handler by faking a decrementer
664     * exception.
665     * Note that the default handler's message will be
666     * wrong about the exception number.
667     */
668    excPtr->_EXC_number = ASM_DEC_VECTOR;
669  }
670/* now call the original handler */
671  ((void (*)()) ohdl) (excPtr);
672}
673#endif
674
675
676
677/* test the consistency of the page table
678 *
679 * 'pass' is merely a number which will be printed
680 * by this routine, so the caller may give some
681 * context information.
682 *
683 * 'expected' is the number of valid (plus 'marked')
684 * entries the caller believes the page table should
685 * have. This routine complains if its count differs.
686 *
687 * It basically verifies that the topmost 20bits
688 * of all VSIDs as well as the unused bits are all
689 * zero. Then it counts all valid and all 'marked'
690 * entries, adding them up and comparing them to the
691 * 'expected' number of occupied slots.
692 *
693 * RETURNS: total number of valid plus 'marked' slots.
694 */
695unsigned long
696triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expected)
697{
698  APte pte;
699  int i;
700  unsigned v, m;
701  int warn = 0;
702  int errs = 0;
703  static int maxw = 20;         /* mute after detecting this many errors */
704
705  PRINTF ("Checking page table at 0x%08x (size %i==0x%x)\n",
706          (unsigned) pt->base, (unsigned) pt->size, (unsigned) pt->size);
707
708  if (!pt->base || !pt->size) {
709    PRINTF ("Uninitialized Page Table!\n");
710    return 0;
711  }
712
713  v = m = 0;
714#if 1
715  /* 10/9/2002: I had machine checks crashing after this loop
716   *            terminated. Maybe caused by speculative loads
717   *            from beyond the valid memory area (since the
718   *            page hash table sits at the top of physical
719   *            memory).
720   *            Very bizarre - the other loops in this file
721   *            seem to be fine. Maybe there is a compiler bug??
722   *            For the moment, I let the loop run backwards...
723   *
724   *                        Also see the comment a couple of lines down.
725   */
726  for (i = pt->size / sizeof (PTERec) - 1, pte = pt->base + i; i >= 0;
727       i--, pte--)
728#else
729  for (i = 0, pte = pt->base; i < pt->size / sizeof (PTERec); i++, pte++)
730#endif
731  {
732    int err = 0;
733    char buf[500];
734    unsigned long *lp = (unsigned long *) pte;
735#if 0
736    /* If I put this bogus while statement here (the body is
737     * never reached), the original loop works OK
738     */
739    while (pte >= pt->base + pt->size / sizeof (PTERec))
740      /* never reached */ ;
741#endif
742
743    if ( /* T.S: allow any VSID... (*lp & (0xfffff0 << 7)) || */ (*(lp + 1) & 0xe00)
744        || (pte->v && pte->marked)) {
745      /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
746      sprintf (buf, "unused bits or v && m");
747      err = 1;
748    } else {
749      if ( (*lp & (0xfffff0 << 7)) ) {
750        sprintf(buf,"(warning) non-1:1 VSID found");
751        err = 2;
752      }
753      if (pte->v)
754        v++;
755      if (pte->marked)
756        m++;
757    }
758    if (err && maxw) {
759      PRINTF
760        ("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
761         pass, (unsigned) pte, i, i);
762      PRINTF ("Reason: %s\n", buf);
763      dumpPte (pte);
764      if ( err & 2 ) {
765         warn++;
766      } else {
767         errs++;
768      }
769      maxw--;
770    }
771  }
772  if (errs) {
773    PRINTF ("%i errors %s", errs, warn ? "and ":"");
774  }
775  if (warn) {
776    PRINTF ("%i warnings ",warn);
777  }
778  if (errs || warn) {
779    PRINTF ("found; currently %i entries marked, %i are valid\n",
780            m, v);
781  }
782  v += m;
783  if (maxw && expected >= 0 && expected != v) {
784    /* number of occupied slots not what they expected */
785    PRINTF ("Wrong # of occupied slots detected during pass");
786    PRINTF ("%i; should be %i (0x%x) is %i (0x%x)\n",
787            pass, expected, (unsigned) expected, v, (unsigned) v);
788    maxw--;
789  }
790  return v;
791}
792
793/* Find the PTE for a EA and print its contents
794 * RETURNS: pte for EA or NULL if no entry was found.
795 */
796APte
797triv121DumpEa (unsigned long ea)
798{
799  APte pte;
800
801  pte =
802    alreadyMapped (&pgTbl, pgTbl.active ? TRIV121_SEG_VSID : TRIV121_121_VSID,
803                   ea);
804
805  if (pte)
806    dumpPte (pte);
807  return pte;
808}
809
810APte
811triv121FindPte (unsigned long vsid, unsigned long pi)
812{
813  return alreadyMapped (&pgTbl, vsid, pi);
814}
815
816APte
817triv121UnmapEa (unsigned long ea)
818{
819  uint32_t flags;
820  APte pte;
821
822  if (!pgTbl.active) {
823    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
824    if (pte)                    /* alreadyMapped checks for pte->v */
825      pte->v = 0;
826    return pte;
827  }
828
829  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
830
831  if (!pte)
832    return 0;
833
834  rtems_interrupt_disable (flags);
835  pte->v = 0;
836  do_dssall ();
837  asm volatile ("       sync            \n\t"
838                "       tlbie %0        \n\t"
839                "       eieio           \n\t"
840                "       tlbsync         \n\t"
841                "       sync            \n\t"::"r" (ea):"memory");
842  rtems_interrupt_enable (flags);
843  return pte;
844}
845
846/* A context synchronizing jump */
847#define SYNC_LONGJMP(msr)                               \
848        asm volatile(                                           \
849                "       mtsrr1  %0                      \n\t"   \
850                "       bl              1f                      \n\t"   \
851                "1:     mflr    3                       \n\t"   \
852                "       addi    3,3,1f-1b       \n\t"   \
853                "       mtsrr0  3                       \n\t"   \
854                "       rfi                                     \n\t"   \
855                "1:                                             \n\t"   \
856                :                                                               \
857                :"r"(msr)                                               \
858                :"3","lr","memory")
859
860/* The book doesn't mention dssall when changing PTEs
861 * but they require it for BAT changes and I guess
862 * it makes sense in the case of PTEs as well.
863 * Just do it to be on the safe side...
864 */
865static void
866do_dssall ()
867{
868  /* Before changing BATs, 'dssall' must be issued.
869   * We check MSR for MSR_VE and issue a 'dssall' if
870   * MSR_VE is set hoping that
871   *  a) on non-altivec CPUs MSR_VE reads as zero
872   *  b) all altivec CPUs use the same bit
873   */
874  if (_read_MSR () & MSR_VE) {
875    /* this construct is needed because we don't know
876     * if this file is compiled with -maltivec.
877     * (I plan to add altivec support outside of
878     * RTEMS core and hence I'd rather not
879     * rely on consistent compiler flags).
880     */
881#define DSSALL  0x7e00066c      /* dssall opcode */
882    asm volatile ("     .long %0"::"i" (DSSALL));
883#undef  DSSALL
884  }
885}
886
887APte
888triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
889{
890  APte pte;
891  unsigned long msr;
892
893  if (!pgTbl.active) {
894    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
895    if (!pte)
896      return 0;
897    if (wimg > 0)
898      pte->wimg = wimg;
899    if (pp > 0)
900      pte->pp = pp;
901    return pte;
902  }
903
904  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
905
906  if (!pte)
907    return 0;
908
909  if (wimg < 0 && pp < 0)
910    return pte;
911
912  asm volatile ("mfmsr %0":"=r" (msr));
913
914  /* switch MMU and IRQs off */
915  SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
916
917  pte->v = 0;
918  do_dssall ();
919  asm volatile ("sync":::"memory");
920  if (wimg >= 0)
921    pte->wimg = wimg;
922  if (pp >= 0)
923    pte->pp = pp;
924  asm volatile ("tlbie %0; eieio"::"r" (ea):"memory");
925  pte->v = 1;
926  asm volatile ("tlbsync; sync":::"memory");
927
928  /* restore, i.e., switch MMU and IRQs back on */
929  SYNC_LONGJMP (msr);
930
931  return pte;
932}
933
934static void
935pgtblChangePP (Triv121PgTbl pt, int pp)
936{
937  unsigned long n = pt->size >> LD_PG_SIZE;
938  unsigned long b, i;
939
940  for (i = 0, b = (unsigned long) pt->base; i < n;
941       i++, b += (1 << LD_PG_SIZE)) {
942    triv121ChangeEaAttributes (b, -1, pp);
943  }
944}
945
946void
947triv121MakePgTblRW ()
948{
949  pgtblChangePP (&pgTbl, TRIV121_PP_RW_PAGE);
950}
951
952void
953triv121MakePgTblRO ()
954{
955  pgtblChangePP (&pgTbl, TRIV121_PP_RO_PAGE);
956}
957
958long
959triv121DumpPte (APte pte)
960{
961  if (pte)
962    dumpPte (pte);
963  return 0;
964}
965
966
967#ifdef DEBUG
968/* Dump an entire PTEG */
969
970static void
971dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash)
972{
973  APte pte = ptegOf (&pgTbl, hash);
974  int i;
975  PRINTF ("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
976          (unsigned) hash, (unsigned) pte, (unsigned) vsid, (unsigned) pi);
977  for (i = 0; i < PTE_PER_PTEG; i++, pte++) {
978    PRINTF ("pte 0x%08x is 0x%08x : 0x%08x\n",
979            (unsigned) pte,
980            (unsigned) *(unsigned long *) pte,
981            (unsigned) *(((unsigned long *) pte) + 1));
982  }
983}
984#endif
985
986/* Verify that a range of addresses is mapped the page table.
987 * start/end are segment offsets or EAs (if vsid has one of
988 * the special values), respectively.
989 *
990 * RETURNS: address of the first page for which no
991 *          PTE was found (i.e. page index * page size)
992 *         
993 *          ON SUCCESS, the special value 0x0C0C ("OKOK")
994 *          [which is not page aligned and hence is not
995 *          a valid page address].
996 */
997
998unsigned long
999triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end)
1000{
1001unsigned pi;
1002
1003  start &= ~((1 << LD_PG_SIZE) - 1);
1004  while (start < end) {
1005    if ( TRIV121_SEG_VSID != vsid && TRIV121_121_VSID != vsid )
1006      pi = PI121(start);
1007    else
1008      pi = start;
1009    if (!alreadyMapped (&pgTbl, vsid, pi))
1010      return start;
1011    start += 1 << LD_PG_SIZE;
1012  }
1013  return 0x0C0C;                /* OKOK - not on a page boundary */
1014}
1015
1016
1017#include <stdlib.h>
1018
1019/* print a PTE */
1020static void
1021dumpPte (APte pte)
1022{
1023  if (0 == ((unsigned long) pte & ((1 << LD_PTEG_SIZE) - 1)))
1024    PRINTF ("PTEG--");
1025  else
1026    PRINTF ("......");
1027  if (pte->v) {
1028    PRINTF ("VSID: 0x%08x H:%1i API: 0x%02x\n", pte->vsid, pte->h, pte->api);
1029    PRINTF ("      ");
1030    PRINTF ("RPN:  0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
1031            pte->rpn, pte->wimg, pte->marked, pte->pp);
1032  } else {
1033    PRINTF ("xxxxxx\n");
1034    PRINTF ("      ");
1035    PRINTF ("xxxxxx\n");
1036  }
1037}
1038
1039
1040/* dump page table entries from index 'from' to 'to'
1041 * The special values (unsigned)-1 are allowed which
1042 * cause the routine to dump the entire table.
1043 *
1044 * RETURNS 0
1045 */
1046int
1047triv121PgTblDump (Triv121PgTbl pt, unsigned from, unsigned to)
1048{
1049  int i;
1050  APte pte;
1051  PRINTF ("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
1052          (unsigned) pt->size, (unsigned) pt->size, (unsigned) pt->base);
1053  if (from > pt->size >> LD_PTE_SIZE)
1054    from = 0;
1055  if (to > pt->size >> LD_PTE_SIZE)
1056    to = (pt->size >> LD_PTE_SIZE);
1057  for (i = from, pte = pt->base + from; i < (long) to; i++, pte++) {
1058    dumpPte (pte);
1059  }
1060  return 0;
1061}
1062
1063
1064#if defined(DEBUG_MAIN)
1065
1066#define LD_DBG_PT_SIZE  LD_MIN_PT_SIZE
1067
1068int
1069main (int argc, char **argv)
1070{
1071  unsigned long base, start, numPages;
1072  unsigned long size = 1 << LD_DBG_PT_SIZE;
1073  Triv121PgTbl pt;
1074
1075  base = (unsigned long) malloc (size << 1);
1076
1077  assert (base);
1078
1079  /* align pt */
1080  base += size - 1;
1081  base &= ~(size - 1);
1082
1083  assert (pt = triv121PgTblInit (base, LD_DBG_PT_SIZE));
1084
1085  triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1);
1086  do {
1087    do {
1088      PRINTF ("Start Address:");
1089      fflush (stdout);
1090    } while (1 != scanf ("%i", &start));
1091    do {
1092      PRINTF ("# pages:");
1093      fflush (stdout);
1094    } while (1 != scanf ("%i", &numPages));
1095  } while (TRIV121_MAP_SUCCESS ==
1096           triv121PgTblMap (pt, TRIV121_121_VSID, start, numPages,
1097                            TRIV121_ATTR_IO_PAGE, 2)
1098           && 0 == triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1));
1099}
1100#endif
Note: See TracBrowser for help on using the repository browser.