source: rtems/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c @ 359e537

4.104.115
Last change on this file since 359e537 was 359e537, checked in by Ralf Corsepius <ralf.corsepius@…>, on 11/30/09 at 05:09:41

Whitespace removal.

  • Property mode set to 100644
File size: 30.7 KB
Line 
1/* $Id$ */
2
3/* Trivial page table setup for RTEMS
4 * Purpose: allow write protection of text/ro-data
5 */
6
7/*
8 * Authorship
9 * ----------
10 * This software was created by
11 *     Till Straumann <strauman@slac.stanford.edu>, 4/2002, 2003, 2004,
12 *         Stanford Linear Accelerator Center, Stanford University.
13 *
14 * Acknowledgement of sponsorship
15 * ------------------------------
16 * This software was produced by
17 *     the Stanford Linear Accelerator Center, Stanford University,
18 *         under Contract DE-AC03-76SFO0515 with the Department of Energy.
19 *
20 * Government disclaimer of liability
21 * ----------------------------------
22 * Neither the United States nor the United States Department of Energy,
23 * nor any of their employees, makes any warranty, express or implied, or
24 * assumes any legal liability or responsibility for the accuracy,
25 * completeness, or usefulness of any data, apparatus, product, or process
26 * disclosed, or represents that its use would not infringe privately owned
27 * rights.
28 *
29 * Stanford disclaimer of liability
30 * --------------------------------
31 * Stanford University makes no representations or warranties, express or
32 * implied, nor assumes any liability for the use of this software.
33 *
34 * Stanford disclaimer of copyright
35 * --------------------------------
36 * Stanford University, owner of the copyright, hereby disclaims its
37 * copyright and all other rights in this software.  Hence, anyone may
38 * freely use it for any purpose without restriction.
39 *
40 * Maintenance of notices
41 * ----------------------
42 * In the interest of clarity regarding the origin and status of this
43 * SLAC software, this and all the preceding Stanford University notices
44 * are to remain affixed to any copy or derivative of this software made
45 * or distributed by the recipient and are to be affixed to any copy of
46 * software made or distributed by the recipient that contains a copy or
47 * derivative of this software.
48 *
49 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
50 */
51
52/* Chose debugging options */
53#undef  DEBUG_MAIN              /* create a standalone (host) program for basic testing */
54#undef  DEBUG                   /* target debugging and consistency checking */
55#undef  DEBUG_EXC               /* add exception handler which reenables BAT0 and recovers from a page fault */
56
57#ifdef  DEBUG_MAIN
58#undef  DEBUG                   /* must not use these together with DEBUG_MAIN */
59#undef  DEBUG_EXC
60#endif
61
62/***************************** INCLUDE HEADERS ****************************/
63
64#ifndef DEBUG_MAIN
65#include <rtems.h>
66#include <rtems/bspIo.h>
67#include <libcpu/cpuIdent.h>
68#include <libcpu/spr.h>
69#ifdef  DEBUG_EXC
70#include <bsp.h>
71#include <bsp/vectors.h>
72#endif
73#endif
74
75#include <stdio.h>
76#include <assert.h>
77#include <string.h>
78
79#include "pte121.h"
80
81/************************** CONSTANT DEFINITIONS **************************/
82
83/* Base 2 logs of some sizes */
84
85#ifndef DEBUG_MAIN
86
87#define LD_PHYS_SIZE    32      /* physical address space */
88#define LD_PG_SIZE              12      /* page size */
89#define LD_PTEG_SIZE    6       /* PTEG size */
90#define LD_PTE_SIZE             3       /* PTE size  */
91#define LD_SEG_SIZE             28      /* segment size */
92#define LD_MIN_PT_SIZE  16      /* minimal size of a page table */
93#define LD_HASH_SIZE    19      /* lengh of a hash */
94#define LD_VSID_SIZE    24      /* vsid bits in seg. register */
95
96#else /* DEBUG_MAIN */
97
98/* Reduced 'fantasy' sizes for testing */
99#define LD_PHYS_SIZE    32      /* physical address space */
100#define LD_PG_SIZE              6       /* page size */
101#define LD_PTEG_SIZE    5       /* PTEG size */
102#define LD_PTE_SIZE             3       /* PTE size  */
103#define LD_SEG_SIZE             28      /* segment size */
104#define LD_MIN_PT_SIZE  7       /* minimal size of a page table */
105#define LD_HASH_SIZE    19      /* lengh of a hash */
106
107#endif /* DEBUG_MAIN */
108
109/* Derived sizes */
110
111/* Size of a page index */
112#define LD_PI_SIZE              ((LD_SEG_SIZE) - (LD_PG_SIZE))
113
114/* Number of PTEs in a PTEG */
115#define PTE_PER_PTEG    (1<<((LD_PTEG_SIZE)-(LD_PTE_SIZE)))
116
117/* Segment register bits */
118#define KEY_SUP                 (1<<30) /* supervisor mode key */
119#define KEY_USR                 (1<<29) /* user mode key */
120
121/* The range of effective addresses to scan with 'tlbie'
122 * instructions in order to flush all TLBs.
123 * On the 750 and 7400, there are 128 two way I and D TLBs,
124 * indexed by EA[14:19]. Hence calling
125 *   tlbie rx
126 * where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
127 * is sufficient to do the job
128 */
129#define NUM_TLB_PER_WAY 64      /* 750 and 7400 have 128 two way TLBs */
130#define FLUSH_EA_RANGE  (NUM_TLB_PER_WAY<<LD_PG_SIZE)
131
132/*************************** MACRO DEFINITIONS ****************************/
133
134/* Macros to split a (32bit) 'effective' address into
135 * VSID (virtual segment id) and PI (page index)
136 * using a 1:1 mapping of 'effective' to 'virtual'
137 * addresses.
138 *
139 * For 32bit addresses this looks like follows
140 * (each 'x' or '0' stands for a 'nibble' [4bits]):
141 *
142 *         32bit effective address (EA)
143 *
144 *              x x x x x x x x
145 *               |       |
146 *    0 0 0 0 0 x|x x x x|x x x
147 *       VSID    |  PI   |  PO (page offset)
148 *               |       |
149 */
150/* 1:1 VSID of an EA  */
151#define VSID121(ea) (((ea)>>LD_SEG_SIZE) & ((1<<(LD_PHYS_SIZE-LD_SEG_SIZE))-1))
152/* page index of an EA */
153#define PI121(ea)       (((ea)>>LD_PG_SIZE) & ((1<<LD_PI_SIZE)-1))
154
155/* read VSID from segment register */
156#ifndef DEBUG_MAIN
157static uint32_t
158seg2vsid (uint32_t ea)
159{
160  asm volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
161  return ea & ((1 << LD_VSID_SIZE) - 1);
162}
163#else
164#define seg2vsid(ea) VSID121(ea)
165#endif
166
167/* Primary and secondary PTE hash functions */
168
169/* Compute the primary hash from a VSID and a PI */
170#define PTE_HASH1(vsid, pi) (((vsid)^(pi))&((1<<LD_HASH_SIZE)-1))
171
172/* Compute the secondary hash from a primary hash */
173#define PTE_HASH2(hash1) ((~(hash1))&((1<<LD_HASH_SIZE)-1))
174
175/* Extract the abbreviated page index (which is the
176 * part of the PI which does not go into the hash
177 * under all circumstances [10 bits to -> 6bit API])
178 */
179#define API(pi) ((pi)>>((LD_MIN_PT_SIZE)-(LD_PTEG_SIZE)))
180
181
182/* Horrible Macros */
183#ifdef __rtems__
184/* must not use printf until multitasking is up */
185typedef void (*PrintF) (const char *, ...);
186static PrintF
187whatPrintf (void)
188{
189  return _Thread_Executing ? (PrintF) printf : printk;
190}
191
192#define PRINTF(args...) ((void)(whatPrintf())(args))
193#else
194#define PRINTF(args...) printf(args)
195#endif
196
197#ifdef DEBUG
198unsigned long triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expect);
199
200static int consistencyPass = 0;
201#define CONSCHECK(expect) triv121PgTblConsistency(&pgTbl,consistencyPass++,(expect))
202#else
203#define CONSCHECK(expect) do {} while (0)
204#endif
205
206/**************************** TYPE DEFINITIONS ****************************/
207
208/* internal description of a trivial page table */
209typedef struct Triv121PgTblRec_
210{
211  APte base;
212  unsigned long size;
213  int active;
214} Triv121PgTblRec;
215
216
217/************************** FORWARD DECLARATIONS *************************/
218
219#ifdef DEBUG_EXC
220static void myhdl (BSP_Exception_frame * excPtr);
221#endif
222
223static void dumpPte (APte pte);
224
225#ifdef DEBUG
226static void
227dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash);
228#endif
229
230unsigned long
231triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end);
232
233static void do_dssall (void);
234
235/**************************** STATIC VARIABLES ****************************/
236
237/* dont malloc - we might have to use this before
238 * we have malloc or even RTEMS workspace available
239 */
240static Triv121PgTblRec pgTbl = { 0 };
241
242#ifdef DEBUG_EXC
243static void *ohdl;              /* keep a pointer to the original handler */
244#endif
245
246/*********************** INLINES & PRIVATE ROUTINES ***********************/
247
248/* compute the page table entry group (PTEG) of a hash */
249static inline APte
250ptegOf (Triv121PgTbl pt, unsigned long hash)
251{
252  hash &= ((1 << LD_HASH_SIZE) - 1);
253  return (APte) (((unsigned long) pt->
254                  base) | ((hash << LD_PTEG_SIZE) & (pt->size - 1)));
255}
256
257/* see if a vsid/pi combination is already mapped
258 *
259 * RETURNS: PTE of mapping / NULL if none exists
260 *
261 * NOTE: a vsid<0 is legal and will tell this
262 *       routine that 'pi' is actually an EA to
263 *       be split into vsid and pi...
264 */
265static APte
266alreadyMapped (Triv121PgTbl pt, long vsid, unsigned long pi)
267{
268  int i;
269  unsigned long hash, api;
270  APte pte;
271
272  if (!pt->size)
273    return 0;
274
275  if (TRIV121_121_VSID == vsid) {
276    vsid = VSID121 (pi);
277    pi = PI121 (pi);
278  } else if (TRIV121_SEG_VSID == vsid) {
279    vsid = seg2vsid (pi);
280    pi = PI121 (pi);
281  }
282
283  hash = PTE_HASH1 (vsid, pi);
284  api = API (pi);
285  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
286    if (pte->v && pte->vsid == vsid && pte->api == api && 0 == pte->h)
287      return pte;
288  /* try the secondary hash table */
289  hash = PTE_HASH2 (hash);
290  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
291    if (pte->v && pte->vsid == vsid && pte->api == api && 1 == pte->h)
292      return pte;
293  return 0;
294}
295
296/* find the first available slot for  vsid/pi
297 *
298 * NOTE: it is NOT legal to pass a vsid<0 / EA combination.
299 *
300 * RETURNS free slot with the 'marked' field set. The 'h'
301 *         field is set to 0 or one, depending on whether
302 *         the slot was allocated by using the primary or
303 *         the secondary hash, respectively.
304 */
305static APte
306slotFor (Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
307{
308  int i;
309  unsigned long hash, api;
310  APte pte;
311
312  /* primary hash */
313  hash = PTE_HASH1 (vsid, pi);
314  api = API (pi);
315  /* linear search thru all buckets for this hash */
316  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
317    if (!pte->v && !pte->marked) {
318      /* found a free PTE; mark it as potentially used and return */
319      pte->h = 0;               /* found by the primary hash fn */
320      pte->marked = 1;
321      return pte;
322    }
323  }
324
325#ifdef DEBUG
326  /* Strange: if the hash table was allocated big enough,
327   *          this should not happen (when using a 1:1 mapping)
328   *          Give them some information...
329   */
330  PRINTF ("## First hash bucket full - ");
331  dumpPteg (vsid, pi, hash);
332#endif
333
334  hash = PTE_HASH2 (hash);
335#ifdef DEBUG
336  PRINTF ("   Secondary pteg is 0x%08x\n", (unsigned) ptegOf (pt, hash));
337#endif
338  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
339    if (!pte->v && !pte->marked) {
340      /* mark this pte as potentially used */
341      pte->marked = 1;
342      pte->h = 1;
343      return pte;
344    }
345  }
346#ifdef DEBUG
347  /* Even more strange - most likely, something is REALLY messed up */
348  PRINTF ("## Second hash bucket full - ");
349  dumpPteg (vsid, pi, hash);
350#endif
351  return 0;
352}
353
354/* unmark all entries */
355static void
356unmarkAll (Triv121PgTbl pt)
357{
358  unsigned long n = pt->size / sizeof (PTERec);
359  unsigned long i;
360  APte pte;
361  for (i = 0, pte = pt->base; i < n; i++, pte++)
362    pte->marked = 0;
363
364}
365
366/* calculate the minimal size of a page/hash table
367 * to map a range of 'size' bytes in EA space.
368 *
369 * RETURNS: size in 'number of bits', i.e. the
370 *          integer part of LOGbase2(minsize)
371 *          is returned.
372 * NOTE:        G3/G4 machines need at least 16 bits
373 *          (64k).
374 */
375unsigned long
376triv121PgTblLdMinSize (unsigned long size)
377{
378  unsigned long i;
379  /* round 'size' up to the next page boundary */
380  size += (1 << LD_PG_SIZE) - 1;
381  size &= ~((1 << LD_PG_SIZE) - 1);
382  /* divide by number of PTEs  and multiply
383   * by the size of a PTE.
384   */
385  size >>= LD_PG_SIZE - LD_PTE_SIZE;
386  /* find the next power of 2 >= size */
387  for (i = 0; i < LD_PHYS_SIZE; i++) {
388    if ((1 << i) >= size)
389      break;
390  }
391  /* pop up to the allowed minimum, if necessary */
392  if (i < LD_MIN_PT_SIZE)
393    i = LD_MIN_PT_SIZE;
394  return i;
395}
396
397/* initialize a trivial page table of 2^ldSize bytes
398 * at 'base' in memory.
399 *
400 * RETURNS:     OPAQUE HANDLE (not the hash table address)
401 *          or NULL on failure.
402 */
403Triv121PgTbl
404triv121PgTblInit (unsigned long base, unsigned ldSize)
405{
406  if (pgTbl.size) {
407    /* already initialized */
408    return 0;
409  }
410
411  if (ldSize < LD_MIN_PT_SIZE)
412    return 0;                   /* too small */
413
414  if (base & ((1 << ldSize) - 1))
415    return 0;                   /* misaligned */
416
417  /* This was tested on 604r, 750 and 7400.
418   * On other CPUs, verify that the TLB invalidation works
419   * for a new CPU variant and that it has hardware PTE lookup/
420   * TLB replacement before adding it to this list.
421   *
422   * NOTE: The 603 features no hardware PTE lookup - and
423   *       hence the page tables should NOT be used.
424   *               Although lookup could be implemented in
425   *               software this is probably not desirable
426   *               as it could have an impact on hard realtime
427   *               performance, screwing deterministic latency!
428   *               (Could still be useful for debugging, though)
429   */
430  if ( ! ppc_cpu_has_hw_ptbl_lkup() )
431    return 0;                   /* unsupported by this CPU */
432
433  pgTbl.base = (APte) base;
434  pgTbl.size = 1 << ldSize;
435  /* clear all page table entries */
436  memset (pgTbl.base, 0, pgTbl.size);
437
438  CONSCHECK (0);
439
440  /* map the page table itself 'm' and 'readonly' */
441  if (triv121PgTblMap (&pgTbl,
442                       TRIV121_121_VSID,
443                       base,
444                       (pgTbl.size >> LD_PG_SIZE),
445                       TRIV121_ATTR_M, TRIV121_PP_RO_PAGE) >= 0)
446    return 0;
447
448  CONSCHECK ((pgTbl.size >> LD_PG_SIZE));
449
450  return &pgTbl;
451}
452
453/* return the handle of the (one and only) page table
454 * or NULL if none has been initialized yet.
455 */
456Triv121PgTbl
457triv121PgTblGet (void)
458{
459  return pgTbl.size ? &pgTbl : 0;
460}
461
462/* NOTE: this routine returns -1 on success;
463 *       on failure, the page table index for
464 *       which no PTE could be allocated is returned
465 *
466 * (Consult header about argument/return value
467 * description)
468 */
469long
470triv121PgTblMap (Triv121PgTbl pt,
471                 long ovsid,
472                 unsigned long start,
473                 unsigned long numPages,
474                 unsigned attributes, unsigned protection)
475{
476  int i, pass;
477  unsigned long pi;
478  APte pte;
479  long vsid;
480#ifdef DEBUG
481  long saved_vsid = ovsid;
482#endif
483
484  if (TRIV121_121_VSID == ovsid) {
485    /* use 1:1 mapping */
486    ovsid = VSID121 (start);
487  } else if (TRIV121_SEG_VSID == ovsid) {
488    ovsid = seg2vsid (start);
489  }
490
491#ifdef DEBUG
492  PRINTF ("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
493          (unsigned) numPages, (unsigned) numPages,
494          (unsigned) start, (unsigned) ovsid);
495#endif
496
497  /* map in two passes. During the first pass, we try
498   * to claim entries as needed. The 'slotFor()' routine
499   * will 'mark' the claimed entries without 'valid'ating
500   * them.
501   * If the mapping fails, all claimed entries are unmarked
502   * and we return the PI for which allocation failed.
503   *
504   * Once we know that the allocation would succeed, we
505   * do a second pass; during the second pass, the PTE
506   * is actually written.
507   *
508   */
509  for (pass = 0; pass < 2; pass++) {
510    /* check if we would succeed during the first pass */
511    for (i = 0, pi = PI121 (start), vsid = ovsid; i < numPages; i++, pi++) {
512      if (pi >= 1 << LD_PI_SIZE) {
513        vsid++;
514        pi = 0;
515      }
516      /* leave alone existing mappings for this EA */
517      if (!alreadyMapped (pt, vsid, pi)) {
518        if (!(pte = slotFor (pt, vsid, pi))) {
519          /* no free slot found for page index 'pi' */
520          unmarkAll (pt);
521          return pi;
522        } else {
523          /* have a free slot; marked by slotFor() */
524          if (pass) {
525            /* second pass; do the real work */
526            pte->vsid = vsid;
527            /* H was set by slotFor() */
528            pte->api = API (pi);
529            /* set up 1:1 mapping */
530            pte->rpn =
531              ((((unsigned long) vsid) &
532                ((1 << (LD_PHYS_SIZE - LD_SEG_SIZE)) -
533                 1)) << LD_PI_SIZE) | pi;
534            pte->wimg = attributes & 0xf;
535            pte->pp = protection & 0x3;
536            /* mark it valid */
537            pte->marked = 0;
538            if (pt->active) {
539              uint32_t flags;
540              rtems_interrupt_disable (flags);
541              /* order setting 'v' after writing everything else */
542              asm volatile ("eieio":::"memory");
543              pte->v = 1;
544              asm volatile ("sync":::"memory");
545              rtems_interrupt_enable (flags);
546            } else {
547              pte->v = 1;
548            }
549
550#ifdef DEBUG
551            /* add paranoia */
552            assert (alreadyMapped (pt, vsid, pi) == pte);
553#endif
554          }
555        }
556      }
557    }
558    unmarkAll (pt);
559  }
560#ifdef DEBUG
561  {
562    unsigned long failedat;
563    CONSCHECK (-1);
564    /* double check that the requested range is mapped */
565    failedat =
566      triv121IsRangeMapped (saved_vsid, start,
567                            start + (1 << LD_PG_SIZE) * numPages);
568    if (0x0C0C != failedat) {
569      PRINTF ("triv121 mapping failed at 0x%08x\n", (unsigned) failedat);
570      return PI121 (failedat);
571    }
572  }
573#endif
574  return TRIV121_MAP_SUCCESS;   /* -1 !! */
575}
576
577unsigned long
578triv121PgTblSDR1 (Triv121PgTbl pt)
579{
580  return (((unsigned long) pt->base) & ~((1 << LD_MIN_PT_SIZE) - 1)) |
581    (((pt->size - 1) >> LD_MIN_PT_SIZE) &
582     ((1 << (LD_HASH_SIZE - (LD_MIN_PT_SIZE - LD_PTEG_SIZE))) - 1)
583    );
584}
585
586void
587triv121PgTblActivate (Triv121PgTbl pt)
588{
589#ifndef DEBUG_MAIN
590  unsigned long          sdr1 = triv121PgTblSDR1 (pt);
591  register unsigned long tmp0 = 16;     /* initial counter value (#segment regs) */
592  register unsigned long tmp1 = (KEY_USR | KEY_SUP);
593  register unsigned long tmp2 = (MSR_EE | MSR_IR | MSR_DR);
594#endif
595  pt->active = 1;
596
597#ifndef DEBUG_MAIN
598#ifdef DEBUG_EXC
599  /* install our exception handler */
600  ohdl = globalExceptHdl;
601  globalExceptHdl = myhdl;
602  __asm__ __volatile__ ("sync"::"memory");
603#endif
604
605  /* This section of assembly code takes care of the
606   * following:
607   * - get MSR and switch interrupts + MMU off
608   *
609   * - load up the segment registers with a
610   *   1:1 effective <-> virtual mapping;
611   *   give user & supervisor keys
612   *
613   * - flush all TLBs;
614   *   NOTE: the TLB flushing code is probably
615   *         CPU dependent!
616   *
617   * - setup SDR1
618   *
619   * - restore original MSR
620   */
621  __asm__ __volatile (
622    "   mtctr   %[tmp0]\n"
623    /* Get MSR and switch interrupts off - just in case.
624     * Also switch the MMU off; the book
625     * says that SDR1 must not be changed with either
626     * MSR_IR or MSR_DR set. I would guess that it could
627     * be safe as long as the IBAT & DBAT mappings override
628     * the page table...
629     */
630    "   mfmsr   %[tmp0]\n"
631    "   andc    %[tmp2], %[tmp0], %[tmp2]\n"
632    "   mtmsr   %[tmp2]\n"
633    "   isync   \n"
634    /* set up the segment registers */
635    "   li              %[tmp2], 0\n"
636    "1: mtsrin  %[tmp1], %[tmp2]\n"
637    "   addis   %[tmp2], %[tmp2], 0x1000\n" /* address next SR */
638    "   addi    %[tmp1], %[tmp1], 1\n"      /* increment VSID  */
639    "   bdnz    1b\n"
640    /* Now flush all TLBs, starting with the topmost index */
641    "   lis             %[tmp2], %[ea_range]@h\n"
642    "2: addic.  %[tmp2], %[tmp2], -%[pg_sz]\n"    /* address the next one (decrementing) */
643    "   tlbie   %[tmp2]\n"             /* invalidate & repeat */
644    "   bgt             2b\n"
645    "   eieio   \n"
646    "   tlbsync \n"
647    "   sync    \n"
648    /* set up SDR1 */
649    "   mtspr   %[sdr1], %[sdr1val]\n"
650    /* restore original MSR  */
651    "   mtmsr   %[tmp0]\n"
652    "   isync   \n"
653      :[tmp0]"+r&"(tmp0), [tmp1]"+b&"(tmp1), [tmp2]"+b&"(tmp2)
654      :[ea_range]"i"(FLUSH_EA_RANGE), [pg_sz]"i" (1 << LD_PG_SIZE),
655       [sdr1]"i"(SDR1), [sdr1val]"r" (sdr1)
656      :"ctr", "cc", "memory"
657  );
658
659  /* At this point, BAT0 is probably still active; it's the
660   * caller's job to deactivate it...
661   */
662#endif
663}
664
665/**************************  DEBUGGING ROUTINES  *************************/
666
667/* Exception handler to catch page faults */
668#ifdef DEBUG_EXC
669
670#define BAT_VALID_BOTH  3       /* allow user + super access */
671
672static void
673myhdl (BSP_Exception_frame * excPtr)
674{
675  if (3 == excPtr->_EXC_number) {
676    unsigned long dsisr;
677
678    /* reactivate DBAT0 and read DSISR */
679    __asm__ __volatile__ (
680      "mfspr %0, %1   \n"
681      "ori   %0, %0, 3\n"
682      "mtspr %1, %0   \n"
683      "sync\n"
684      "mfspr %0, %2\n"
685        :"=&r" (dsisr)
686        :"i" (DBAT0U), "i" (DSISR), "i" (BAT_VALID_BOTH)
687    );
688
689    printk ("Data Access Exception (DSI) # 3\n");
690    printk ("Reactivated DBAT0 mapping\n");
691
692
693    printk ("DSISR 0x%08x\n", dsisr);
694
695    printk ("revectoring to prevent default handler panic().\n");
696    printk ("NOTE: exception number %i below is BOGUS\n", ASM_DEC_VECTOR);
697    /* make this exception 'recoverable' for
698     * the default handler by faking a decrementer
699     * exception.
700     * Note that the default handler's message will be
701     * wrong about the exception number.
702     */
703    excPtr->_EXC_number = ASM_DEC_VECTOR;
704  }
705/* now call the original handler */
706  ((void (*)()) ohdl) (excPtr);
707}
708#endif
709
710
711
712/* test the consistency of the page table
713 *
714 * 'pass' is merely a number which will be printed
715 * by this routine, so the caller may give some
716 * context information.
717 *
718 * 'expected' is the number of valid (plus 'marked')
719 * entries the caller believes the page table should
720 * have. This routine complains if its count differs.
721 *
722 * It basically verifies that the topmost 20bits
723 * of all VSIDs as well as the unused bits are all
724 * zero. Then it counts all valid and all 'marked'
725 * entries, adding them up and comparing them to the
726 * 'expected' number of occupied slots.
727 *
728 * RETURNS: total number of valid plus 'marked' slots.
729 */
730unsigned long
731triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expected)
732{
733  APte pte;
734  int i;
735  unsigned v, m;
736  int warn = 0;
737  int errs = 0;
738  static int maxw = 20;         /* mute after detecting this many errors */
739
740  PRINTF ("Checking page table at 0x%08x (size %i==0x%x)\n",
741          (unsigned) pt->base, (unsigned) pt->size, (unsigned) pt->size);
742
743  if (!pt->base || !pt->size) {
744    PRINTF ("Uninitialized Page Table!\n");
745    return 0;
746  }
747
748  v = m = 0;
749#if 1
750  /* 10/9/2002: I had machine checks crashing after this loop
751   *            terminated. Maybe caused by speculative loads
752   *            from beyond the valid memory area (since the
753   *            page hash table sits at the top of physical
754   *            memory).
755   *            Very bizarre - the other loops in this file
756   *            seem to be fine. Maybe there is a compiler bug??
757   *            For the moment, I let the loop run backwards...
758   *
759   *                        Also see the comment a couple of lines down.
760   */
761  for (i = pt->size / sizeof (PTERec) - 1, pte = pt->base + i; i >= 0;
762       i--, pte--)
763#else
764  for (i = 0, pte = pt->base; i < pt->size / sizeof (PTERec); i++, pte++)
765#endif
766  {
767    int err = 0;
768    char buf[500];
769    unsigned long *lp = (unsigned long *) pte;
770#if 0
771    /* If I put this bogus while statement here (the body is
772     * never reached), the original loop works OK
773     */
774    while (pte >= pt->base + pt->size / sizeof (PTERec))
775      /* never reached */ ;
776#endif
777
778    if ( /* T.S: allow any VSID... (*lp & (0xfffff0 << 7)) || */ (*(lp + 1) & 0xe00)
779        || (pte->v && pte->marked)) {
780      /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
781      sprintf (buf, "unused bits or v && m");
782      err = 1;
783    } else {
784      if ( (*lp & (0xfffff0 << 7)) ) {
785        sprintf(buf,"(warning) non-1:1 VSID found");
786        err = 2;
787      }
788      if (pte->v)
789        v++;
790      if (pte->marked)
791        m++;
792    }
793    if (err && maxw) {
794      PRINTF
795        ("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
796         pass, (unsigned) pte, i, i);
797      PRINTF ("Reason: %s\n", buf);
798      dumpPte (pte);
799      if ( err & 2 ) {
800         warn++;
801      } else {
802         errs++;
803      }
804      maxw--;
805    }
806  }
807  if (errs) {
808    PRINTF ("%i errors %s", errs, warn ? "and ":"");
809  }
810  if (warn) {
811    PRINTF ("%i warnings ",warn);
812  }
813  if (errs || warn) {
814    PRINTF ("found; currently %i entries marked, %i are valid\n",
815            m, v);
816  }
817  v += m;
818  if (maxw && expected >= 0 && expected != v) {
819    /* number of occupied slots not what they expected */
820    PRINTF ("Wrong # of occupied slots detected during pass");
821    PRINTF ("%i; should be %i (0x%x) is %i (0x%x)\n",
822            pass, expected, (unsigned) expected, v, (unsigned) v);
823    maxw--;
824  }
825  return v;
826}
827
828/* Find the PTE for a EA and print its contents
829 * RETURNS: pte for EA or NULL if no entry was found.
830 */
831APte
832triv121DumpEa (unsigned long ea)
833{
834  APte pte;
835
836  pte =
837    alreadyMapped (&pgTbl, pgTbl.active ? TRIV121_SEG_VSID : TRIV121_121_VSID,
838                   ea);
839
840  if (pte)
841    dumpPte (pte);
842  return pte;
843}
844
845APte
846triv121FindPte (unsigned long vsid, unsigned long pi)
847{
848  return alreadyMapped (&pgTbl, vsid, pi);
849}
850
851APte
852triv121UnmapEa (unsigned long ea)
853{
854  uint32_t flags;
855  APte pte;
856
857  if (!pgTbl.active) {
858    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
859    if (pte)                    /* alreadyMapped checks for pte->v */
860      pte->v = 0;
861    return pte;
862  }
863
864  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
865
866  if (!pte)
867    return 0;
868
869  rtems_interrupt_disable (flags);
870  pte->v = 0;
871  do_dssall ();
872  asm volatile ("       sync            \n\t"
873                "       tlbie %0        \n\t"
874                "       eieio           \n\t"
875                "       tlbsync         \n\t"
876                "       sync            \n\t"::"r" (ea):"memory");
877  rtems_interrupt_enable (flags);
878  return pte;
879}
880
881/* A context synchronizing jump */
882#define SYNC_LONGJMP(msr)                               \
883        asm volatile(                                           \
884                "       mtsrr1  %0                      \n\t"   \
885                "       bl              1f                      \n\t"   \
886                "1:     mflr    3                       \n\t"   \
887                "       addi    3,3,1f-1b       \n\t"   \
888                "       mtsrr0  3                       \n\t"   \
889                "       rfi                                     \n\t"   \
890                "1:                                             \n\t"   \
891                :                                                               \
892                :"r"(msr)                                               \
893                :"3","lr","memory")
894
895/* The book doesn't mention dssall when changing PTEs
896 * but they require it for BAT changes and I guess
897 * it makes sense in the case of PTEs as well.
898 * Just do it to be on the safe side...
899 */
900static void
901do_dssall (void)
902{
903  /* Before changing BATs, 'dssall' must be issued.
904   * We check MSR for MSR_VE and issue a 'dssall' if
905   * MSR_VE is set hoping that
906   *  a) on non-altivec CPUs MSR_VE reads as zero
907   *  b) all altivec CPUs use the same bit
908   */
909  if (_read_MSR () & MSR_VE) {
910    /* this construct is needed because we don't know
911     * if this file is compiled with -maltivec.
912     * (I plan to add altivec support outside of
913     * RTEMS core and hence I'd rather not
914     * rely on consistent compiler flags).
915     */
916#define DSSALL  0x7e00066c      /* dssall opcode */
917    asm volatile ("     .long %0"::"i" (DSSALL));
918#undef  DSSALL
919  }
920}
921
922APte
923triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
924{
925  APte pte;
926  unsigned long msr;
927
928  if (!pgTbl.active) {
929    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
930    if (!pte)
931      return 0;
932    if (wimg > 0)
933      pte->wimg = wimg;
934    if (pp > 0)
935      pte->pp = pp;
936    return pte;
937  }
938
939  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
940
941  if (!pte)
942    return 0;
943
944  if (wimg < 0 && pp < 0)
945    return pte;
946
947  asm volatile ("mfmsr %0":"=r" (msr));
948
949  /* switch MMU and IRQs off */
950  SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
951
952  pte->v = 0;
953  do_dssall ();
954  asm volatile ("sync":::"memory");
955  if (wimg >= 0)
956    pte->wimg = wimg;
957  if (pp >= 0)
958    pte->pp = pp;
959  asm volatile ("tlbie %0; eieio"::"r" (ea):"memory");
960  pte->v = 1;
961  asm volatile ("tlbsync; sync":::"memory");
962
963  /* restore, i.e., switch MMU and IRQs back on */
964  SYNC_LONGJMP (msr);
965
966  return pte;
967}
968
969static void
970pgtblChangePP (Triv121PgTbl pt, int pp)
971{
972  unsigned long n = pt->size >> LD_PG_SIZE;
973  unsigned long b, i;
974
975  for (i = 0, b = (unsigned long) pt->base; i < n;
976       i++, b += (1 << LD_PG_SIZE)) {
977    triv121ChangeEaAttributes (b, -1, pp);
978  }
979}
980
981void
982triv121MakePgTblRW ()
983{
984  pgtblChangePP (&pgTbl, TRIV121_PP_RW_PAGE);
985}
986
987void
988triv121MakePgTblRO ()
989{
990  pgtblChangePP (&pgTbl, TRIV121_PP_RO_PAGE);
991}
992
993long
994triv121DumpPte (APte pte)
995{
996  if (pte)
997    dumpPte (pte);
998  return 0;
999}
1000
1001
1002#ifdef DEBUG
1003/* Dump an entire PTEG */
1004
1005static void
1006dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash)
1007{
1008  APte pte = ptegOf (&pgTbl, hash);
1009  int i;
1010  PRINTF ("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
1011          (unsigned) hash, (unsigned) pte, (unsigned) vsid, (unsigned) pi);
1012  for (i = 0; i < PTE_PER_PTEG; i++, pte++) {
1013    PRINTF ("pte 0x%08x is 0x%08x : 0x%08x\n",
1014            (unsigned) pte,
1015            (unsigned) *(unsigned long *) pte,
1016            (unsigned) *(((unsigned long *) pte) + 1));
1017  }
1018}
1019#endif
1020
1021/* Verify that a range of addresses is mapped the page table.
1022 * start/end are segment offsets or EAs (if vsid has one of
1023 * the special values), respectively.
1024 *
1025 * RETURNS: address of the first page for which no
1026 *          PTE was found (i.e. page index * page size)
1027 *
1028 *          ON SUCCESS, the special value 0x0C0C ("OKOK")
1029 *          [which is not page aligned and hence is not
1030 *          a valid page address].
1031 */
1032
1033unsigned long
1034triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end)
1035{
1036unsigned pi;
1037
1038  start &= ~((1 << LD_PG_SIZE) - 1);
1039  while (start < end) {
1040    if ( TRIV121_SEG_VSID != vsid && TRIV121_121_VSID != vsid )
1041      pi = PI121(start);
1042    else
1043      pi = start;
1044    if (!alreadyMapped (&pgTbl, vsid, pi))
1045      return start;
1046    start += 1 << LD_PG_SIZE;
1047  }
1048  return 0x0C0C;                /* OKOK - not on a page boundary */
1049}
1050
1051
1052#include <stdlib.h>
1053
1054/* print a PTE */
1055static void
1056dumpPte (APte pte)
1057{
1058  if (0 == ((unsigned long) pte & ((1 << LD_PTEG_SIZE) - 1)))
1059    PRINTF ("PTEG--");
1060  else
1061    PRINTF ("......");
1062  if (pte->v) {
1063    PRINTF ("VSID: 0x%08x H:%1i API: 0x%02x\n", pte->vsid, pte->h, pte->api);
1064    PRINTF ("      ");
1065    PRINTF ("RPN:  0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
1066            pte->rpn, pte->wimg, pte->marked, pte->pp);
1067  } else {
1068    PRINTF ("xxxxxx\n");
1069    PRINTF ("      ");
1070    PRINTF ("xxxxxx\n");
1071  }
1072}
1073
1074
1075/* dump page table entries from index 'from' to 'to'
1076 * The special values (unsigned)-1 are allowed which
1077 * cause the routine to dump the entire table.
1078 *
1079 * RETURNS 0
1080 */
1081int
1082triv121PgTblDump (Triv121PgTbl pt, unsigned from, unsigned to)
1083{
1084  int i;
1085  APte pte;
1086  PRINTF ("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
1087          (unsigned) pt->size, (unsigned) pt->size, (unsigned) pt->base);
1088  if (from > pt->size >> LD_PTE_SIZE)
1089    from = 0;
1090  if (to > pt->size >> LD_PTE_SIZE)
1091    to = (pt->size >> LD_PTE_SIZE);
1092  for (i = from, pte = pt->base + from; i < (long) to; i++, pte++) {
1093    dumpPte (pte);
1094  }
1095  return 0;
1096}
1097
1098
1099#if defined(DEBUG_MAIN)
1100
1101#define LD_DBG_PT_SIZE  LD_MIN_PT_SIZE
1102
1103int
1104main (int argc, char **argv)
1105{
1106  unsigned long base, start, numPages;
1107  unsigned long size = 1 << LD_DBG_PT_SIZE;
1108  Triv121PgTbl pt;
1109
1110  base = (unsigned long) malloc (size << 1);
1111
1112  assert (base);
1113
1114  /* align pt */
1115  base += size - 1;
1116  base &= ~(size - 1);
1117
1118  assert (pt = triv121PgTblInit (base, LD_DBG_PT_SIZE));
1119
1120  triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1);
1121  do {
1122    do {
1123      PRINTF ("Start Address:");
1124      fflush (stdout);
1125    } while (1 != scanf ("%i", &start));
1126    do {
1127      PRINTF ("# pages:");
1128      fflush (stdout);
1129    } while (1 != scanf ("%i", &numPages));
1130  } while (TRIV121_MAP_SUCCESS ==
1131           triv121PgTblMap (pt, TRIV121_121_VSID, start, numPages,
1132                            TRIV121_ATTR_IO_PAGE, 2)
1133           && 0 == triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1));
1134}
1135#endif
Note: See TracBrowser for help on using the repository browser.