source: rtems/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c @ 4712cdc

Last change on this file since 4712cdc was 4712cdc, checked in by Till Straumann <strauman@…>, on Jan 17, 2007 at 5:55:45 AM

2007-01-16 Till Straumann <strauman@…>

  • mpc6xx/mmu/pte121.h, mpc6xx/mmu/pte121.c: Added SLAC/Stanford Authorship Note / Copyright + Liability Disclaimer.
  • Property mode set to 100644
File size: 31.0 KB
Line 
1/* $Id$ */
2
3/* Trivial page table setup for RTEMS
4 * Purpose: allow write protection of text/ro-data
5 */
6
7/*
8 * Authorship
9 * ----------
10 * This software was created by
11 *     Till Straumann <strauman@slac.stanford.edu>, 4/2002, 2003, 2004,
12 *         Stanford Linear Accelerator Center, Stanford University.
13 *
14 * Acknowledgement of sponsorship
15 * ------------------------------
16 * This software was produced by
17 *     the Stanford Linear Accelerator Center, Stanford University,
18 *         under Contract DE-AC03-76SFO0515 with the Department of Energy.
19 *
20 * Government disclaimer of liability
21 * ----------------------------------
22 * Neither the United States nor the United States Department of Energy,
23 * nor any of their employees, makes any warranty, express or implied, or
24 * assumes any legal liability or responsibility for the accuracy,
25 * completeness, or usefulness of any data, apparatus, product, or process
26 * disclosed, or represents that its use would not infringe privately owned
27 * rights.
28 *
29 * Stanford disclaimer of liability
30 * --------------------------------
31 * Stanford University makes no representations or warranties, express or
32 * implied, nor assumes any liability for the use of this software.
33 *
34 * Stanford disclaimer of copyright
35 * --------------------------------
36 * Stanford University, owner of the copyright, hereby disclaims its
37 * copyright and all other rights in this software.  Hence, anyone may
38 * freely use it for any purpose without restriction. 
39 *
40 * Maintenance of notices
41 * ----------------------
42 * In the interest of clarity regarding the origin and status of this
43 * SLAC software, this and all the preceding Stanford University notices
44 * are to remain affixed to any copy or derivative of this software made
45 * or distributed by the recipient and are to be affixed to any copy of
46 * software made or distributed by the recipient that contains a copy or
47 * derivative of this software.
48 *
49 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
50 */ 
51
52/* Chose debugging options */
53#undef  DEBUG_MAIN              /* create a standalone (host) program for basic testing */
54#undef  DEBUG                   /* target debugging and consistency checking */
55#undef  DEBUG_EXC               /* add exception handler which reenables BAT0 and recovers from a page fault */
56
57#ifdef  DEBUG_MAIN
58#undef  DEBUG                   /* must not use these together with DEBUG_MAIN */
59#undef  DEBUG_EXC
60#endif
61
62/***************************** INCLUDE HEADERS ****************************/
63
64#ifndef DEBUG_MAIN
65#include <rtems.h>
66#include <rtems/bspIo.h>
67#include <libcpu/cpuIdent.h>
68#include <libcpu/spr.h>
69#ifdef  DEBUG_EXC
70#include <bsp.h>
71#include <bsp/vectors.h>
72#include <libcpu/raw_exception.h>
73#endif
74#endif
75
76#include <stdio.h>
77#include <assert.h>
78#include <string.h>
79
80#include "pte121.h"
81
82/************************** CONSTANT DEFINITIONS **************************/
83
84/* Base 2 logs of some sizes */
85
86#ifndef DEBUG_MAIN
87
88#define LD_PHYS_SIZE    32      /* physical address space */
89#define LD_PG_SIZE              12      /* page size */
90#define LD_PTEG_SIZE    6       /* PTEG size */
91#define LD_PTE_SIZE             3       /* PTE size  */
92#define LD_SEG_SIZE             28      /* segment size */
93#define LD_MIN_PT_SIZE  16      /* minimal size of a page table */
94#define LD_HASH_SIZE    19      /* lengh of a hash */
95#define LD_VSID_SIZE    24      /* vsid bits in seg. register */
96
97#else /* DEBUG_MAIN */
98
99/* Reduced 'fantasy' sizes for testing */
100#define LD_PHYS_SIZE    32      /* physical address space */
101#define LD_PG_SIZE              6       /* page size */
102#define LD_PTEG_SIZE    5       /* PTEG size */
103#define LD_PTE_SIZE             3       /* PTE size  */
104#define LD_SEG_SIZE             28      /* segment size */
105#define LD_MIN_PT_SIZE  7       /* minimal size of a page table */
106#define LD_HASH_SIZE    19      /* lengh of a hash */
107
108#endif /* DEBUG_MAIN */
109
110/* Derived sizes */
111
112/* Size of a page index */
113#define LD_PI_SIZE              ((LD_SEG_SIZE) - (LD_PG_SIZE))
114
115/* Number of PTEs in a PTEG */
116#define PTE_PER_PTEG    (1<<((LD_PTEG_SIZE)-(LD_PTE_SIZE)))
117
118/* Segment register bits */
119#define KEY_SUP                 (1<<30) /* supervisor mode key */
120#define KEY_USR                 (1<<29) /* user mode key */
121
122/* The range of effective addresses to scan with 'tlbie'
123 * instructions in order to flush all TLBs.
124 * On the 750 and 7400, there are 128 two way I and D TLBs,
125 * indexed by EA[14:19]. Hence calling
126 *   tlbie rx
127 * where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
128 * is sufficient to do the job
129 */
130#define NUM_TLB_PER_WAY 64      /* 750 and 7400 have 128 two way TLBs */
131#define FLUSH_EA_RANGE  (NUM_TLB_PER_WAY<<LD_PG_SIZE)
132
133/*************************** MACRO DEFINITIONS ****************************/
134
135/* Macros to split a (32bit) 'effective' address into
136 * VSID (virtual segment id) and PI (page index)
137 * using a 1:1 mapping of 'effective' to 'virtual'
138 * addresses.
139 *
140 * For 32bit addresses this looks like follows
141 * (each 'x' or '0' stands for a 'nibble' [4bits]):
142 *
143 *         32bit effective address (EA)
144 *
145 *              x x x x x x x x
146 *               |       |
147 *    0 0 0 0 0 x|x x x x|x x x
148 *       VSID    |  PI   |  PO (page offset)
149 *               |       |
150 */
151/* 1:1 VSID of an EA  */
152#define VSID121(ea) (((ea)>>LD_SEG_SIZE) & ((1<<(LD_PHYS_SIZE-LD_SEG_SIZE))-1))
153/* page index of an EA */
154#define PI121(ea)       (((ea)>>LD_PG_SIZE) & ((1<<LD_PI_SIZE)-1))
155
156/* read VSID from segment register */
157#ifndef DEBUG_MAIN
158static uint32_t
159seg2vsid (uint32_t ea)
160{
161  asm volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
162  return ea & ((1 << LD_VSID_SIZE) - 1);
163}
164#else
165#define seg2vsid(ea) VSID121(ea)
166#endif
167
168/* Primary and secondary PTE hash functions */
169
170/* Compute the primary hash from a VSID and a PI */
171#define PTE_HASH1(vsid, pi) (((vsid)^(pi))&((1<<LD_HASH_SIZE)-1))
172
173/* Compute the secondary hash from a primary hash */
174#define PTE_HASH2(hash1) ((~(hash1))&((1<<LD_HASH_SIZE)-1))
175
176/* Extract the abbreviated page index (which is the
177 * part of the PI which does not go into the hash
178 * under all circumstances [10 bits to -> 6bit API])
179 */
180#define API(pi) ((pi)>>((LD_MIN_PT_SIZE)-(LD_PTEG_SIZE)))
181
182
183/* Horrible Macros */
184#ifdef __rtems__
185/* must not use printf until multitasking is up */
186typedef void (*PrintF) (char *, ...);
187static PrintF
188whatPrintf (void)
189{
190  return _Thread_Executing ? (PrintF) printf : printk;
191}
192
193#define PRINTF(args...) ((void)(whatPrintf())(args))
194#else
195#define PRINTF(args...) printf(args)
196#endif
197
198#ifdef DEBUG
199unsigned long triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expect);
200
201static int consistencyPass = 0;
202#define CONSCHECK(expect) triv121PgTblConsistency(&pgTbl,consistencyPass++,(expect))
203#else
204#define CONSCHECK(expect) do {} while (0)
205#endif
206
207/**************************** TYPE DEFINITIONS ****************************/
208
209/* internal description of a trivial page table */
210typedef struct Triv121PgTblRec_
211{
212  APte base;
213  unsigned long size;
214  int active;
215} Triv121PgTblRec;
216
217
218/************************** FORWARD DECLARATIONS *************************/
219
220#ifdef DEBUG_EXC
221static void myhdl (BSP_Exception_frame * excPtr);
222#endif
223
224static void dumpPte (APte pte);
225
226#ifdef DEBUG
227static void
228dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash);
229#endif
230
231unsigned long
232triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end);
233
234static void do_dssall ();
235
236/**************************** STATIC VARIABLES ****************************/
237
238/* dont malloc - we might have to use this before
239 * we have malloc or even RTEMS workspace available
240 */
241static Triv121PgTblRec pgTbl = { 0 };
242
243#ifdef DEBUG_EXC
244static void *ohdl;              /* keep a pointer to the original handler */
245#endif
246
247/*********************** INLINES & PRIVATE ROUTINES ***********************/
248
249/* compute the page table entry group (PTEG) of a hash */
250static inline APte
251ptegOf (Triv121PgTbl pt, unsigned long hash)
252{
253  hash &= ((1 << LD_HASH_SIZE) - 1);
254  return (APte) (((unsigned long) pt->
255                  base) | ((hash << LD_PTEG_SIZE) & (pt->size - 1)));
256}
257
258/* see if a vsid/pi combination is already mapped
259 *
260 * RETURNS: PTE of mapping / NULL if none exists
261 *
262 * NOTE: a vsid<0 is legal and will tell this
263 *       routine that 'pi' is actually an EA to
264 *       be split into vsid and pi...
265 */
266static APte
267alreadyMapped (Triv121PgTbl pt, long vsid, unsigned long pi)
268{
269  int i;
270  unsigned long hash, api;
271  APte pte;
272
273  if (!pt->size)
274    return 0;
275
276  if (TRIV121_121_VSID == vsid) {
277    vsid = VSID121 (pi);
278    pi = PI121 (pi);
279  } else if (TRIV121_SEG_VSID == vsid) {
280    vsid = seg2vsid (pi);
281    pi = PI121 (pi);
282  }
283
284  hash = PTE_HASH1 (vsid, pi);
285  api = API (pi);
286  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
287    if (pte->v && pte->vsid == vsid && pte->api == api && 0 == pte->h)
288      return pte;
289  /* try the secondary hash table */
290  hash = PTE_HASH2 (hash);
291  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
292    if (pte->v && pte->vsid == vsid && pte->api == api && 1 == pte->h)
293      return pte;
294  return 0;
295}
296
297/* find the first available slot for  vsid/pi
298 *
299 * NOTE: it is NOT legal to pass a vsid<0 / EA combination.
300 *
301 * RETURNS free slot with the 'marked' field set. The 'h'
302 *         field is set to 0 or one, depending on whether
303 *         the slot was allocated by using the primary or
304 *         the secondary hash, respectively.
305 */
306static APte
307slotFor (Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
308{
309  int i;
310  unsigned long hash, api;
311  APte pte;
312
313  /* primary hash */
314  hash = PTE_HASH1 (vsid, pi);
315  api = API (pi);
316  /* linear search thru all buckets for this hash */
317  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
318    if (!pte->v && !pte->marked) {
319      /* found a free PTE; mark it as potentially used and return */
320      pte->h = 0;               /* found by the primary hash fn */
321      pte->marked = 1;
322      return pte;
323    }
324  }
325
326#ifdef DEBUG
327  /* Strange: if the hash table was allocated big enough,
328   *          this should not happen (when using a 1:1 mapping)
329   *          Give them some information...
330   */
331  PRINTF ("## First hash bucket full - ");
332  dumpPteg (vsid, pi, hash);
333#endif
334
335  hash = PTE_HASH2 (hash);
336#ifdef DEBUG
337  PRINTF ("   Secondary pteg is 0x%08x\n", (unsigned) ptegOf (pt, hash));
338#endif
339  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
340    if (!pte->v && !pte->marked) {
341      /* mark this pte as potentially used */
342      pte->marked = 1;
343      pte->h = 1;
344      return pte;
345    }
346  }
347#ifdef DEBUG
348  /* Even more strange - most likely, something is REALLY messed up */
349  PRINTF ("## Second hash bucket full - ");
350  dumpPteg (vsid, pi, hash);
351#endif
352  return 0;
353}
354
355/* unmark all entries */
356static void
357unmarkAll (Triv121PgTbl pt)
358{
359  unsigned long n = pt->size / sizeof (PTERec);
360  unsigned long i;
361  APte pte;
362  for (i = 0, pte = pt->base; i < n; i++, pte++)
363    pte->marked = 0;
364
365}
366
367/* calculate the minimal size of a page/hash table
368 * to map a range of 'size' bytes in EA space.
369 *
370 * RETURNS: size in 'number of bits', i.e. the
371 *          integer part of LOGbase2(minsize)
372 *          is returned.
373 * NOTE:        G3/G4 machines need at least 16 bits
374 *          (64k).
375 */
376unsigned long
377triv121PgTblLdMinSize (unsigned long size)
378{
379  unsigned long i;
380  /* round 'size' up to the next page boundary */
381  size += (1 << LD_PG_SIZE) - 1;
382  size &= ~((1 << LD_PG_SIZE) - 1);
383  /* divide by number of PTEs  and multiply
384   * by the size of a PTE.
385   */
386  size >>= LD_PG_SIZE - LD_PTE_SIZE;
387  /* find the next power of 2 >= size */
388  for (i = 0; i < LD_PHYS_SIZE; i++) {
389    if ((1 << i) >= size)
390      break;
391  }
392  /* pop up to the allowed minimum, if necessary */
393  if (i < LD_MIN_PT_SIZE)
394    i = LD_MIN_PT_SIZE;
395  return i;
396}
397
398/* initialize a trivial page table of 2^ldSize bytes
399 * at 'base' in memory.
400 *
401 * RETURNS:     OPAQUE HANDLE (not the hash table address)
402 *          or NULL on failure.
403 */
404Triv121PgTbl
405triv121PgTblInit (unsigned long base, unsigned ldSize)
406{
407  if (pgTbl.size) {
408    /* already initialized */
409    return 0;
410  }
411
412  if (ldSize < LD_MIN_PT_SIZE)
413    return 0;                   /* too small */
414
415  if (base & ((1 << ldSize) - 1))
416    return 0;                   /* misaligned */
417
418  /* This was tested on 604r, 750 and 7400.
419   * On other CPUs, verify that the TLB invalidation works
420   * for a new CPU variant and that it has hardware PTE lookup/
421   * TLB replacement before adding it to this list.
422   *
423   * NOTE: The 603 features no hardware PTE lookup - and
424   *       hence the page tables should NOT be used.
425   *               Although lookup could be implemented in
426   *               software this is probably not desirable
427   *               as it could have an impact on hard realtime
428   *               performance, screwing deterministic latency!
429   *               (Could still be useful for debugging, though)
430   */
431  if (   PPC_604 != current_ppc_cpu
432      && PPC_604e != current_ppc_cpu
433      && PPC_604r != current_ppc_cpu
434      && PPC_750  != current_ppc_cpu
435      && PPC_7400 != current_ppc_cpu
436      && PPC_7455 != current_ppc_cpu
437      && PPC_7457 != current_ppc_cpu
438     )
439    return 0;                   /* unsupported by this CPU */
440
441  pgTbl.base = (APte) base;
442  pgTbl.size = 1 << ldSize;
443  /* clear all page table entries */
444  memset (pgTbl.base, 0, pgTbl.size);
445
446  CONSCHECK (0);
447
448  /* map the page table itself 'm' and 'readonly' */
449  if (triv121PgTblMap (&pgTbl,
450                       TRIV121_121_VSID,
451                       base,
452                       (pgTbl.size >> LD_PG_SIZE),
453                       TRIV121_ATTR_M, TRIV121_PP_RO_PAGE) >= 0)
454    return 0;
455
456  CONSCHECK ((pgTbl.size >> LD_PG_SIZE));
457
458  return &pgTbl;
459}
460
461/* return the handle of the (one and only) page table
462 * or NULL if none has been initialized yet.
463 */
464Triv121PgTbl
465triv121PgTblGet (void)
466{
467  return pgTbl.size ? &pgTbl : 0;
468}
469
470/* NOTE: this routine returns -1 on success;
471 *       on failure, the page table index for
472 *       which no PTE could be allocated is returned
473 *
474 * (Consult header about argument/return value
475 * description)
476 */
477long
478triv121PgTblMap (Triv121PgTbl pt,
479                 long ovsid,
480                 unsigned long start,
481                 unsigned long numPages,
482                 unsigned attributes, unsigned protection)
483{
484  int i, pass;
485  unsigned long pi;
486  APte pte;
487  long vsid;
488#ifdef DEBUG
489  long saved_vsid = ovsid;
490#endif
491
492  if (TRIV121_121_VSID == ovsid) {
493    /* use 1:1 mapping */
494    ovsid = VSID121 (start);
495  } else if (TRIV121_SEG_VSID == ovsid) {
496    ovsid = seg2vsid (start);
497  }
498
499#ifdef DEBUG
500  PRINTF ("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
501          (unsigned) numPages, (unsigned) numPages,
502          (unsigned) start, (unsigned) ovsid);
503#endif
504
505  /* map in two passes. During the first pass, we try
506   * to claim entries as needed. The 'slotFor()' routine
507   * will 'mark' the claimed entries without 'valid'ating
508   * them.
509   * If the mapping fails, all claimed entries are unmarked
510   * and we return the PI for which allocation failed.
511   *
512   * Once we know that the allocation would succeed, we
513   * do a second pass; during the second pass, the PTE
514   * is actually written.
515   *
516   */
517  for (pass = 0; pass < 2; pass++) {
518    /* check if we would succeed during the first pass */
519    for (i = 0, pi = PI121 (start), vsid = ovsid; i < numPages; i++, pi++) {
520      if (pi >= 1 << LD_PI_SIZE) {
521        vsid++;
522        pi = 0;
523      }
524      /* leave alone existing mappings for this EA */
525      if (!alreadyMapped (pt, vsid, pi)) {
526        if (!(pte = slotFor (pt, vsid, pi))) {
527          /* no free slot found for page index 'pi' */
528          unmarkAll (pt);
529          return pi;
530        } else {
531          /* have a free slot; marked by slotFor() */
532          if (pass) {
533            /* second pass; do the real work */
534            pte->vsid = vsid;
535            /* H was set by slotFor() */
536            pte->api = API (pi);
537            /* set up 1:1 mapping */
538            pte->rpn =
539              ((((unsigned long) vsid) &
540                ((1 << (LD_PHYS_SIZE - LD_SEG_SIZE)) -
541                 1)) << LD_PI_SIZE) | pi;
542            pte->wimg = attributes & 0xf;
543            pte->pp = protection & 0x3;
544            /* mark it valid */
545            pte->marked = 0;
546            if (pt->active) {
547              uint32_t flags;
548              rtems_interrupt_disable (flags);
549              /* order setting 'v' after writing everything else */
550              asm volatile ("eieio"::"m"(*pte));
551              pte->v = 1;
552              asm volatile ("sync"::"m"(*pte));
553              rtems_interrupt_enable (flags);
554            } else {
555              pte->v = 1;
556            }
557
558#ifdef DEBUG
559            /* add paranoia */
560            assert (alreadyMapped (pt, vsid, pi) == pte);
561#endif
562          }
563        }
564      }
565    }
566    unmarkAll (pt);
567  }
568#ifdef DEBUG
569  {
570    unsigned long failedat;
571    CONSCHECK (-1);
572    /* double check that the requested range is mapped */
573    failedat =
574      triv121IsRangeMapped (saved_vsid, start,
575                            start + (1 << LD_PG_SIZE) * numPages);
576    if (0x0C0C != failedat) {
577      PRINTF ("triv121 mapping failed at 0x%08x\n", (unsigned) failedat);
578      return PI121 (failedat);
579    }
580  }
581#endif
582  return TRIV121_MAP_SUCCESS;   /* -1 !! */
583}
584
585unsigned long
586triv121PgTblSDR1 (Triv121PgTbl pt)
587{
588  return (((unsigned long) pt->base) & ~((1 << LD_MIN_PT_SIZE) - 1)) |
589    (((pt->size - 1) >> LD_MIN_PT_SIZE) &
590     ((1 << (LD_HASH_SIZE - (LD_MIN_PT_SIZE - LD_PTEG_SIZE))) - 1)
591    );
592}
593
594void
595triv121PgTblActivate (Triv121PgTbl pt)
596{
597#ifndef DEBUG_MAIN
598  unsigned long          sdr1 = triv121PgTblSDR1 (pt);
599  register unsigned long tmp0 = 16;     /* initial counter value (#segment regs) */
600  register unsigned long tmp1 = (KEY_USR | KEY_SUP);
601  register unsigned long tmp2 = (MSR_EE | MSR_IR | MSR_DR);
602#endif
603  pt->active = 1;
604
605#ifndef DEBUG_MAIN
606#ifdef DEBUG_EXC
607  /* install our exception handler */
608  ohdl = globalExceptHdl;
609  globalExceptHdl = myhdl;
610  __asm__ __volatile__ ("sync"::"memory");
611#endif
612
613  /* This section of assembly code takes care of the
614   * following:
615   * - get MSR and switch interrupts + MMU off
616   *
617   * - load up the segment registers with a
618   *   1:1 effective <-> virtual mapping;
619   *   give user & supervisor keys
620   *
621   * - flush all TLBs;
622   *   NOTE: the TLB flushing code is probably
623   *         CPU dependent!
624   *
625   * - setup SDR1
626   *
627   * - restore original MSR
628   */
629  __asm__ __volatile (
630    "   mtctr   %[tmp0]\n"
631    /* Get MSR and switch interrupts off - just in case.
632     * Also switch the MMU off; the book
633     * says that SDR1 must not be changed with either
634     * MSR_IR or MSR_DR set. I would guess that it could
635     * be safe as long as the IBAT & DBAT mappings override
636     * the page table...
637     */
638    "   mfmsr   %[tmp0]\n"
639    "   andc    %[tmp2], %[tmp0], %[tmp2]\n"
640    "   mtmsr   %[tmp2]\n"
641    "   isync   \n"
642    /* set up the segment registers */
643    "   li              %[tmp2], 0\n"
644    "1: mtsrin  %[tmp1], %[tmp2]\n"
645    "   addis   %[tmp2], %[tmp2], 0x1000\n" /* address next SR */
646    "   addi    %[tmp1], %[tmp1], 1\n"      /* increment VSID  */
647    "   bdnz    1b\n"
648    /* Now flush all TLBs, starting with the topmost index */
649    "   lis             %[tmp2], %[ea_range]@h\n"
650    "2: addic.  %[tmp2], %[tmp2], -%[pg_sz]\n"    /* address the next one (decrementing) */
651    "   tlbie   %[tmp2]\n"             /* invalidate & repeat */
652    "   bgt             2b\n"
653    "   eieio   \n"
654    "   tlbsync \n"
655    "   sync    \n"
656    /* set up SDR1 */
657    "   mtspr   %[sdr1], %[sdr1val]\n"
658    /* restore original MSR  */
659    "   mtmsr   %[tmp0]\n"
660    "   isync   \n"
661      :[tmp0]"+r&"(tmp0), [tmp1]"+b&"(tmp1), [tmp2]"+b&"(tmp2)
662      :[ea_range]"i"(FLUSH_EA_RANGE), [pg_sz]"i" (1 << LD_PG_SIZE),
663       [sdr1]"i"(SDR1), [sdr1val]"r" (sdr1)
664      :"ctr", "cc", "memory"
665  );
666
667  /* At this point, BAT0 is probably still active; it's the
668   * caller's job to deactivate it...
669   */
670#endif
671}
672
673/**************************  DEBUGGING ROUTINES  *************************/
674
675/* Exception handler to catch page faults */
676#ifdef DEBUG_EXC
677
678#define BAT_VALID_BOTH  3       /* allow user + super access */
679
680static void
681myhdl (BSP_Exception_frame * excPtr)
682{
683  if (3 == excPtr->_EXC_number) {
684    unsigned long dsisr;
685
686    /* reactivate DBAT0 and read DSISR */
687    __asm__ __volatile__ (
688      "mfspr %0, %1   \n"
689      "ori   %0, %0, 3\n"
690      "mtspr %1, %0   \n"
691      "sync\n"
692      "mfspr %0, %2\n"
693        :"=&r" (dsisr)
694        :"i" (DBAT0U), "i" (DSISR), "i" (BAT_VALID_BOTH)
695    );
696
697    printk ("Data Access Exception (DSI) # 3\n");
698    printk ("Reactivated DBAT0 mapping\n");
699
700
701    printk ("DSISR 0x%08x\n", dsisr);
702
703    printk ("revectoring to prevent default handler panic().\n");
704    printk ("NOTE: exception number %i below is BOGUS\n", ASM_DEC_VECTOR);
705    /* make this exception 'recoverable' for
706     * the default handler by faking a decrementer
707     * exception.
708     * Note that the default handler's message will be
709     * wrong about the exception number.
710     */
711    excPtr->_EXC_number = ASM_DEC_VECTOR;
712  }
713/* now call the original handler */
714  ((void (*)()) ohdl) (excPtr);
715}
716#endif
717
718
719
720/* test the consistency of the page table
721 *
722 * 'pass' is merely a number which will be printed
723 * by this routine, so the caller may give some
724 * context information.
725 *
726 * 'expected' is the number of valid (plus 'marked')
727 * entries the caller believes the page table should
728 * have. This routine complains if its count differs.
729 *
730 * It basically verifies that the topmost 20bits
731 * of all VSIDs as well as the unused bits are all
732 * zero. Then it counts all valid and all 'marked'
733 * entries, adding them up and comparing them to the
734 * 'expected' number of occupied slots.
735 *
736 * RETURNS: total number of valid plus 'marked' slots.
737 */
738unsigned long
739triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expected)
740{
741  APte pte;
742  int i;
743  unsigned v, m;
744  int warn = 0;
745  int errs = 0;
746  static int maxw = 20;         /* mute after detecting this many errors */
747
748  PRINTF ("Checking page table at 0x%08x (size %i==0x%x)\n",
749          (unsigned) pt->base, (unsigned) pt->size, (unsigned) pt->size);
750
751  if (!pt->base || !pt->size) {
752    PRINTF ("Uninitialized Page Table!\n");
753    return 0;
754  }
755
756  v = m = 0;
757#if 1
758  /* 10/9/2002: I had machine checks crashing after this loop
759   *            terminated. Maybe caused by speculative loads
760   *            from beyond the valid memory area (since the
761   *            page hash table sits at the top of physical
762   *            memory).
763   *            Very bizarre - the other loops in this file
764   *            seem to be fine. Maybe there is a compiler bug??
765   *            For the moment, I let the loop run backwards...
766   *
767   *                        Also see the comment a couple of lines down.
768   */
769  for (i = pt->size / sizeof (PTERec) - 1, pte = pt->base + i; i >= 0;
770       i--, pte--)
771#else
772  for (i = 0, pte = pt->base; i < pt->size / sizeof (PTERec); i++, pte++)
773#endif
774  {
775    int err = 0;
776    char buf[500];
777    unsigned long *lp = (unsigned long *) pte;
778#if 0
779    /* If I put this bogus while statement here (the body is
780     * never reached), the original loop works OK
781     */
782    while (pte >= pt->base + pt->size / sizeof (PTERec))
783      /* never reached */ ;
784#endif
785
786    if ( /* T.S: allow any VSID... (*lp & (0xfffff0 << 7)) || */ (*(lp + 1) & 0xe00)
787        || (pte->v && pte->marked)) {
788      /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
789      sprintf (buf, "unused bits or v && m");
790      err = 1;
791    } else {
792      if ( (*lp & (0xfffff0 << 7)) ) {
793        sprintf(buf,"(warning) non-1:1 VSID found");
794        err = 2;
795      }
796      if (pte->v)
797        v++;
798      if (pte->marked)
799        m++;
800    }
801    if (err && maxw) {
802      PRINTF
803        ("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
804         pass, (unsigned) pte, i, i);
805      PRINTF ("Reason: %s\n", buf);
806      dumpPte (pte);
807      if ( err & 2 ) {
808         warn++;
809      } else {
810         errs++;
811      }
812      maxw--;
813    }
814  }
815  if (errs) {
816    PRINTF ("%i errors %s", errs, warn ? "and ":"");
817  }
818  if (warn) {
819    PRINTF ("%i warnings ",warn);
820  }
821  if (errs || warn) {
822    PRINTF ("found; currently %i entries marked, %i are valid\n",
823            m, v);
824  }
825  v += m;
826  if (maxw && expected >= 0 && expected != v) {
827    /* number of occupied slots not what they expected */
828    PRINTF ("Wrong # of occupied slots detected during pass");
829    PRINTF ("%i; should be %i (0x%x) is %i (0x%x)\n",
830            pass, expected, (unsigned) expected, v, (unsigned) v);
831    maxw--;
832  }
833  return v;
834}
835
836/* Find the PTE for a EA and print its contents
837 * RETURNS: pte for EA or NULL if no entry was found.
838 */
839APte
840triv121DumpEa (unsigned long ea)
841{
842  APte pte;
843
844  pte =
845    alreadyMapped (&pgTbl, pgTbl.active ? TRIV121_SEG_VSID : TRIV121_121_VSID,
846                   ea);
847
848  if (pte)
849    dumpPte (pte);
850  return pte;
851}
852
853APte
854triv121FindPte (unsigned long vsid, unsigned long pi)
855{
856  return alreadyMapped (&pgTbl, vsid, pi);
857}
858
859APte
860triv121UnmapEa (unsigned long ea)
861{
862  uint32_t flags;
863  APte pte;
864
865  if (!pgTbl.active) {
866    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
867    if (pte)                    /* alreadyMapped checks for pte->v */
868      pte->v = 0;
869    return pte;
870  }
871
872  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
873
874  if (!pte)
875    return 0;
876
877  rtems_interrupt_disable (flags);
878  pte->v = 0;
879  do_dssall ();
880  asm volatile ("       sync            \n\t"
881                "       tlbie %0        \n\t"
882                "       eieio           \n\t"
883                "       tlbsync         \n\t"
884                "       sync            \n\t"::"r" (ea):"memory");
885  rtems_interrupt_enable (flags);
886  return pte;
887}
888
889/* A context synchronizing jump */
890#define SYNC_LONGJMP(msr)                               \
891        asm volatile(                                           \
892                "       mtsrr1  %0                      \n\t"   \
893                "       bl              1f                      \n\t"   \
894                "1:     mflr    3                       \n\t"   \
895                "       addi    3,3,1f-1b       \n\t"   \
896                "       mtsrr0  3                       \n\t"   \
897                "       rfi                                     \n\t"   \
898                "1:                                             \n\t"   \
899                :                                                               \
900                :"r"(msr)                                               \
901                :"3","lr","memory")
902
903/* The book doesn't mention dssall when changing PTEs
904 * but they require it for BAT changes and I guess
905 * it makes sense in the case of PTEs as well.
906 * Just do it to be on the safe side...
907 */
908static void
909do_dssall ()
910{
911  /* Before changing BATs, 'dssall' must be issued.
912   * We check MSR for MSR_VE and issue a 'dssall' if
913   * MSR_VE is set hoping that
914   *  a) on non-altivec CPUs MSR_VE reads as zero
915   *  b) all altivec CPUs use the same bit
916   */
917  if (_read_MSR () & MSR_VE) {
918    /* this construct is needed because we don't know
919     * if this file is compiled with -maltivec.
920     * (I plan to add altivec support outside of
921     * RTEMS core and hence I'd rather not
922     * rely on consistent compiler flags).
923     */
924#define DSSALL  0x7e00066c      /* dssall opcode */
925    asm volatile ("     .long %0"::"i" (DSSALL));
926#undef  DSSALL
927  }
928}
929
930APte
931triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
932{
933  APte pte;
934  unsigned long msr;
935
936  if (!pgTbl.active) {
937    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
938    if (!pte)
939      return 0;
940    if (wimg > 0)
941      pte->wimg = wimg;
942    if (pp > 0)
943      pte->pp = pp;
944    return pte;
945  }
946
947  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
948
949  if (!pte)
950    return 0;
951
952  if (wimg < 0 && pp < 0)
953    return pte;
954
955  asm volatile ("mfmsr %0":"=r" (msr));
956
957  /* switch MMU and IRQs off */
958  SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
959
960  pte->v = 0;
961  do_dssall ();
962  asm volatile ("sync":::"memory");
963  if (wimg >= 0)
964    pte->wimg = wimg;
965  if (pp >= 0)
966    pte->pp = pp;
967  asm volatile ("tlbie %0; eieio"::"r" (ea):"memory");
968  pte->v = 1;
969  asm volatile ("tlbsync; sync":::"memory");
970
971  /* restore, i.e., switch MMU and IRQs back on */
972  SYNC_LONGJMP (msr);
973
974  return pte;
975}
976
977static void
978pgtblChangePP (Triv121PgTbl pt, int pp)
979{
980  unsigned long n = pt->size >> LD_PG_SIZE;
981  unsigned long b, i;
982
983  for (i = 0, b = (unsigned long) pt->base; i < n;
984       i++, b += (1 << LD_PG_SIZE)) {
985    triv121ChangeEaAttributes (b, -1, pp);
986  }
987}
988
989void
990triv121MakePgTblRW ()
991{
992  pgtblChangePP (&pgTbl, TRIV121_PP_RW_PAGE);
993}
994
995void
996triv121MakePgTblRO ()
997{
998  pgtblChangePP (&pgTbl, TRIV121_PP_RO_PAGE);
999}
1000
1001long
1002triv121DumpPte (APte pte)
1003{
1004  if (pte)
1005    dumpPte (pte);
1006  return 0;
1007}
1008
1009
1010#ifdef DEBUG
1011/* Dump an entire PTEG */
1012
1013static void
1014dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash)
1015{
1016  APte pte = ptegOf (&pgTbl, hash);
1017  int i;
1018  PRINTF ("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
1019          (unsigned) hash, (unsigned) pte, (unsigned) vsid, (unsigned) pi);
1020  for (i = 0; i < PTE_PER_PTEG; i++, pte++) {
1021    PRINTF ("pte 0x%08x is 0x%08x : 0x%08x\n",
1022            (unsigned) pte,
1023            (unsigned) *(unsigned long *) pte,
1024            (unsigned) *(((unsigned long *) pte) + 1));
1025  }
1026}
1027#endif
1028
1029/* Verify that a range of addresses is mapped the page table.
1030 * start/end are segment offsets or EAs (if vsid has one of
1031 * the special values), respectively.
1032 *
1033 * RETURNS: address of the first page for which no
1034 *          PTE was found (i.e. page index * page size)
1035 *         
1036 *          ON SUCCESS, the special value 0x0C0C ("OKOK")
1037 *          [which is not page aligned and hence is not
1038 *          a valid page address].
1039 */
1040
1041unsigned long
1042triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end)
1043{
1044unsigned pi;
1045
1046  start &= ~((1 << LD_PG_SIZE) - 1);
1047  while (start < end) {
1048    if ( TRIV121_SEG_VSID != vsid && TRIV121_121_VSID != vsid )
1049      pi = PI121(start);
1050    else
1051      pi = start;
1052    if (!alreadyMapped (&pgTbl, vsid, pi))
1053      return start;
1054    start += 1 << LD_PG_SIZE;
1055  }
1056  return 0x0C0C;                /* OKOK - not on a page boundary */
1057}
1058
1059
1060#include <stdlib.h>
1061
1062/* print a PTE */
1063static void
1064dumpPte (APte pte)
1065{
1066  if (0 == ((unsigned long) pte & ((1 << LD_PTEG_SIZE) - 1)))
1067    PRINTF ("PTEG--");
1068  else
1069    PRINTF ("......");
1070  if (pte->v) {
1071    PRINTF ("VSID: 0x%08x H:%1i API: 0x%02x\n", pte->vsid, pte->h, pte->api);
1072    PRINTF ("      ");
1073    PRINTF ("RPN:  0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
1074            pte->rpn, pte->wimg, pte->marked, pte->pp);
1075  } else {
1076    PRINTF ("xxxxxx\n");
1077    PRINTF ("      ");
1078    PRINTF ("xxxxxx\n");
1079  }
1080}
1081
1082
1083/* dump page table entries from index 'from' to 'to'
1084 * The special values (unsigned)-1 are allowed which
1085 * cause the routine to dump the entire table.
1086 *
1087 * RETURNS 0
1088 */
1089int
1090triv121PgTblDump (Triv121PgTbl pt, unsigned from, unsigned to)
1091{
1092  int i;
1093  APte pte;
1094  PRINTF ("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
1095          (unsigned) pt->size, (unsigned) pt->size, (unsigned) pt->base);
1096  if (from > pt->size >> LD_PTE_SIZE)
1097    from = 0;
1098  if (to > pt->size >> LD_PTE_SIZE)
1099    to = (pt->size >> LD_PTE_SIZE);
1100  for (i = from, pte = pt->base + from; i < (long) to; i++, pte++) {
1101    dumpPte (pte);
1102  }
1103  return 0;
1104}
1105
1106
1107#if defined(DEBUG_MAIN)
1108
1109#define LD_DBG_PT_SIZE  LD_MIN_PT_SIZE
1110
1111int
1112main (int argc, char **argv)
1113{
1114  unsigned long base, start, numPages;
1115  unsigned long size = 1 << LD_DBG_PT_SIZE;
1116  Triv121PgTbl pt;
1117
1118  base = (unsigned long) malloc (size << 1);
1119
1120  assert (base);
1121
1122  /* align pt */
1123  base += size - 1;
1124  base &= ~(size - 1);
1125
1126  assert (pt = triv121PgTblInit (base, LD_DBG_PT_SIZE));
1127
1128  triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1);
1129  do {
1130    do {
1131      PRINTF ("Start Address:");
1132      fflush (stdout);
1133    } while (1 != scanf ("%i", &start));
1134    do {
1135      PRINTF ("# pages:");
1136      fflush (stdout);
1137    } while (1 != scanf ("%i", &numPages));
1138  } while (TRIV121_MAP_SUCCESS ==
1139           triv121PgTblMap (pt, TRIV121_121_VSID, start, numPages,
1140                            TRIV121_ATTR_IO_PAGE, 2)
1141           && 0 == triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1));
1142}
1143#endif
Note: See TracBrowser for help on using the repository browser.