source: rtems/c/src/lib/libcpu/powerpc/mpc6xx/mmu/pte121.c @ 73f8d93

Last change on this file since 73f8d93 was 73f8d93, checked in by Sebastian Huber <sebastian.huber@…>, on Feb 15, 2017 at 10:09:50 AM

bsps/powerpc: Fix warnings

  • Property mode set to 100644
File size: 30.9 KB
Line 
1/*
2 * Trivial page table setup for RTEMS
3 * Purpose: allow write protection of text/RO-data
4 */
5
6/*
7 * Authorship
8 * ----------
9 * This software was created by
10 *     Till Straumann <strauman@slac.stanford.edu>, 4/2002, 2003, 2004,
11 *         Stanford Linear Accelerator Center, Stanford University.
12 *
13 * Acknowledgement of sponsorship
14 * ------------------------------
15 * This software was produced by
16 *     the Stanford Linear Accelerator Center, Stanford University,
17 *         under Contract DE-AC03-76SFO0515 with the Department of Energy.
18 *
19 * Government disclaimer of liability
20 * ----------------------------------
21 * Neither the United States nor the United States Department of Energy,
22 * nor any of their employees, makes any warranty, express or implied, or
23 * assumes any legal liability or responsibility for the accuracy,
24 * completeness, or usefulness of any data, apparatus, product, or process
25 * disclosed, or represents that its use would not infringe privately owned
26 * rights.
27 *
28 * Stanford disclaimer of liability
29 * --------------------------------
30 * Stanford University makes no representations or warranties, express or
31 * implied, nor assumes any liability for the use of this software.
32 *
33 * Stanford disclaimer of copyright
34 * --------------------------------
35 * Stanford University, owner of the copyright, hereby disclaims its
36 * copyright and all other rights in this software.  Hence, anyone may
37 * freely use it for any purpose without restriction.
38 *
39 * Maintenance of notices
40 * ----------------------
41 * In the interest of clarity regarding the origin and status of this
42 * SLAC software, this and all the preceding Stanford University notices
43 * are to remain affixed to any copy or derivative of this software made
44 * or distributed by the recipient and are to be affixed to any copy of
45 * software made or distributed by the recipient that contains a copy or
46 * derivative of this software.
47 *
48 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
49 */
50
51/* Chose debugging options */
52#undef  DEBUG_MAIN              /* create a standalone (host) program for basic testing */
53#undef  DEBUG                   /* target debugging and consistency checking */
54#undef  DEBUG_EXC               /* add exception handler which reenables BAT0 and recovers from a page fault */
55
56#ifdef  DEBUG_MAIN
57#undef  DEBUG                   /* must not use these together with DEBUG_MAIN */
58#undef  DEBUG_EXC
59#endif
60
61/***************************** INCLUDE HEADERS ****************************/
62
63#ifndef DEBUG_MAIN
64#include <rtems.h>
65#include <rtems/bspIo.h>
66#include <rtems/score/percpu.h>
67#include <libcpu/cpuIdent.h>
68#include <libcpu/spr.h>
69#ifdef  DEBUG_EXC
70#include <bsp.h>
71#include <bsp/vectors.h>
72#endif
73#endif
74
75#include <stdio.h>
76#include <assert.h>
77#include <string.h>
78
79#include "pte121.h"
80
81/************************** CONSTANT DEFINITIONS **************************/
82
83/* Base 2 logs of some sizes */
84
85#ifndef DEBUG_MAIN
86
87#define LD_PHYS_SIZE    32      /* physical address space */
88#define LD_PG_SIZE              12      /* page size */
89#define LD_PTEG_SIZE    6       /* PTEG size */
90#define LD_PTE_SIZE             3       /* PTE size  */
91#define LD_SEG_SIZE             28      /* segment size */
92#define LD_MIN_PT_SIZE  16      /* minimal size of a page table */
93#define LD_HASH_SIZE    19      /* lengh of a hash */
94#define LD_VSID_SIZE    24      /* vsid bits in seg. register */
95
96#else /* DEBUG_MAIN */
97
98/* Reduced 'fantasy' sizes for testing */
99#define LD_PHYS_SIZE    32      /* physical address space */
100#define LD_PG_SIZE              6       /* page size */
101#define LD_PTEG_SIZE    5       /* PTEG size */
102#define LD_PTE_SIZE             3       /* PTE size  */
103#define LD_SEG_SIZE             28      /* segment size */
104#define LD_MIN_PT_SIZE  7       /* minimal size of a page table */
105#define LD_HASH_SIZE    19      /* lengh of a hash */
106
107#endif /* DEBUG_MAIN */
108
109/* Derived sizes */
110
111/* Size of a page index */
112#define LD_PI_SIZE              ((LD_SEG_SIZE) - (LD_PG_SIZE))
113
114/* Number of PTEs in a PTEG */
115#define PTE_PER_PTEG    (1<<((LD_PTEG_SIZE)-(LD_PTE_SIZE)))
116
117/* Segment register bits */
118#define KEY_SUP                 (1<<30) /* supervisor mode key */
119#define KEY_USR                 (1<<29) /* user mode key */
120
121/* The range of effective addresses to scan with 'tlbie'
122 * instructions in order to flush all TLBs.
123 * On the 750 and 7400, there are 128 two way I and D TLBs,
124 * indexed by EA[14:19]. Hence calling
125 *   tlbie rx
126 * where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000
127 * is sufficient to do the job
128 */
129#define NUM_TLB_PER_WAY 64      /* 750 and 7400 have 128 two way TLBs */
130#define FLUSH_EA_RANGE  (NUM_TLB_PER_WAY<<LD_PG_SIZE)
131
132/*************************** MACRO DEFINITIONS ****************************/
133
134/* Macros to split a (32bit) 'effective' address into
135 * VSID (virtual segment id) and PI (page index)
136 * using a 1:1 mapping of 'effective' to 'virtual'
137 * addresses.
138 *
139 * For 32bit addresses this looks like follows
140 * (each 'x' or '0' stands for a 'nibble' [4bits]):
141 *
142 *         32bit effective address (EA)
143 *
144 *              x x x x x x x x
145 *               |       |
146 *    0 0 0 0 0 x|x x x x|x x x
147 *       VSID    |  PI   |  PO (page offset)
148 *               |       |
149 */
150/* 1:1 VSID of an EA  */
151#define VSID121(ea) (((ea)>>LD_SEG_SIZE) & ((1<<(LD_PHYS_SIZE-LD_SEG_SIZE))-1))
152/* page index of an EA */
153#define PI121(ea)       (((ea)>>LD_PG_SIZE) & ((1<<LD_PI_SIZE)-1))
154
155/* read VSID from segment register */
156#ifndef DEBUG_MAIN
157static uint32_t
158seg2vsid (uint32_t ea)
159{
160  __asm__ volatile ("mfsrin %0, %0":"=r" (ea):"0" (ea));
161  return ea & ((1 << LD_VSID_SIZE) - 1);
162}
163#else
164#define seg2vsid(ea) VSID121(ea)
165#endif
166
167/* Primary and secondary PTE hash functions */
168
169/* Compute the primary hash from a VSID and a PI */
170#define PTE_HASH1(vsid, pi) (((vsid)^(pi))&((1<<LD_HASH_SIZE)-1))
171
172/* Compute the secondary hash from a primary hash */
173#define PTE_HASH2(hash1) ((~(hash1))&((1<<LD_HASH_SIZE)-1))
174
175/* Extract the abbreviated page index (which is the
176 * part of the PI which does not go into the hash
177 * under all circumstances [10 bits to -> 6bit API])
178 */
179#define API(pi) ((pi)>>((LD_MIN_PT_SIZE)-(LD_PTEG_SIZE)))
180
181
182/* Horrible Macros */
183#ifdef __rtems__
184/* must not use printf until multitasking is up */
185typedef int (*PrintF) (const char *, ...);
186static PrintF
187whatPrintf (void)
188{
189  return _Thread_Executing ? printf : printk;
190}
191
192#define PRINTF(args...) ((void)(whatPrintf())(args))
193#else
194#define PRINTF(args...) printf(args)
195#endif
196
197#ifdef DEBUG
198static unsigned long triv121PgTblConsistency(
199  Triv121PgTbl pt, int pass, int expect);
200
201static int consistencyPass = 0;
202#define CONSCHECK(expect) triv121PgTblConsistency(&pgTbl,consistencyPass++,(expect))
203#else
204#define CONSCHECK(expect) do {} while (0)
205#endif
206
207/**************************** TYPE DEFINITIONS ****************************/
208
209/* internal description of a trivial page table */
210typedef struct Triv121PgTblRec_
211{
212  APte base;
213  unsigned long size;
214  int active;
215} Triv121PgTblRec;
216
217
218/************************** FORWARD DECLARATIONS *************************/
219
220#ifdef DEBUG_EXC
221static void myhdl (BSP_Exception_frame * excPtr);
222#endif
223
224static void dumpPte (APte pte);
225
226#ifdef DEBUG
227static void
228dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash);
229#endif
230
231unsigned long
232triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end);
233
234static void do_dssall (void);
235
236/**************************** STATIC VARIABLES ****************************/
237
238/* dont malloc - we might have to use this before
239 * we have malloc or even RTEMS workspace available
240 */
241static Triv121PgTblRec pgTbl = { 0 };
242
243#ifdef DEBUG_EXC
244static void *ohdl;              /* keep a pointer to the original handler */
245#endif
246
247/*********************** INLINES & PRIVATE ROUTINES ***********************/
248
249/* compute the page table entry group (PTEG) of a hash */
250static inline APte
251ptegOf (Triv121PgTbl pt, unsigned long hash)
252{
253  hash &= ((1 << LD_HASH_SIZE) - 1);
254  return (APte) (((unsigned long) pt->
255                  base) | ((hash << LD_PTEG_SIZE) & (pt->size - 1)));
256}
257
258/* see if a vsid/pi combination is already mapped
259 *
260 * RETURNS: PTE of mapping / NULL if none exists
261 *
262 * NOTE: a vsid<0 is legal and will tell this
263 *       routine that 'pi' is actually an EA to
264 *       be split into vsid and pi...
265 */
266static APte
267alreadyMapped (Triv121PgTbl pt, long vsid, unsigned long pi)
268{
269  int i;
270  unsigned long hash, api;
271  APte pte;
272
273  if (!pt->size)
274    return 0;
275
276  if (TRIV121_121_VSID == vsid) {
277    vsid = VSID121 (pi);
278    pi = PI121 (pi);
279  } else if (TRIV121_SEG_VSID == vsid) {
280    vsid = seg2vsid (pi);
281    pi = PI121 (pi);
282  }
283
284  hash = PTE_HASH1 (vsid, pi);
285  api = API (pi);
286  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
287    if (pte->v && pte->vsid == vsid && pte->api == api && 0 == pte->h)
288      return pte;
289  /* try the secondary hash table */
290  hash = PTE_HASH2 (hash);
291  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++)
292    if (pte->v && pte->vsid == vsid && pte->api == api && 1 == pte->h)
293      return pte;
294  return 0;
295}
296
297/* find the first available slot for  vsid/pi
298 *
299 * NOTE: it is NOT legal to pass a vsid<0 / EA combination.
300 *
301 * RETURNS free slot with the 'marked' field set. The 'h'
302 *         field is set to 0 or one, depending on whether
303 *         the slot was allocated by using the primary or
304 *         the secondary hash, respectively.
305 */
306static APte
307slotFor (Triv121PgTbl pt, unsigned long vsid, unsigned long pi)
308{
309  int i;
310  unsigned long hash;
311  APte pte;
312
313  /* primary hash */
314  hash = PTE_HASH1 (vsid, pi);
315  /* linear search thru all buckets for this hash */
316  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
317    if (!pte->v && !pte->marked) {
318      /* found a free PTE; mark it as potentially used and return */
319      pte->h = 0;               /* found by the primary hash fn */
320      pte->marked = 1;
321      return pte;
322    }
323  }
324
325#ifdef DEBUG
326  /* Strange: if the hash table was allocated big enough,
327   *          this should not happen (when using a 1:1 mapping)
328   *          Give them some information...
329   */
330  PRINTF ("## First hash bucket full - ");
331  dumpPteg (vsid, pi, hash);
332#endif
333
334  hash = PTE_HASH2 (hash);
335#ifdef DEBUG
336  PRINTF ("   Secondary pteg is 0x%08x\n", (unsigned) ptegOf (pt, hash));
337#endif
338  for (i = 0, pte = ptegOf (pt, hash); i < PTE_PER_PTEG; i++, pte++) {
339    if (!pte->v && !pte->marked) {
340      /* mark this pte as potentially used */
341      pte->marked = 1;
342      pte->h = 1;
343      return pte;
344    }
345  }
346#ifdef DEBUG
347  /* Even more strange - most likely, something is REALLY messed up */
348  PRINTF ("## Second hash bucket full - ");
349  dumpPteg (vsid, pi, hash);
350#endif
351  return 0;
352}
353
354/* unmark all entries */
355static void
356unmarkAll (Triv121PgTbl pt)
357{
358  unsigned long n = pt->size / sizeof (PTERec);
359  unsigned long i;
360  APte pte;
361  for (i = 0, pte = pt->base; i < n; i++, pte++)
362    pte->marked = 0;
363
364}
365
366/* calculate the minimal size of a page/hash table
367 * to map a range of 'size' bytes in EA space.
368 *
369 * RETURNS: size in 'number of bits', i.e. the
370 *          integer part of LOGbase2(minsize)
371 *          is returned.
372 * NOTE:        G3/G4 machines need at least 16 bits
373 *          (64k).
374 */
375unsigned long
376triv121PgTblLdMinSize (unsigned long size)
377{
378  unsigned long i;
379  /* round 'size' up to the next page boundary */
380  size += (1 << LD_PG_SIZE) - 1;
381  size &= ~((1 << LD_PG_SIZE) - 1);
382  /* divide by number of PTEs  and multiply
383   * by the size of a PTE.
384   */
385  size >>= LD_PG_SIZE - LD_PTE_SIZE;
386  /* find the next power of 2 >= size */
387  for (i = 0; i < LD_PHYS_SIZE; i++) {
388    if ((1 << i) >= size)
389      break;
390  }
391  /* pop up to the allowed minimum, if necessary */
392  if (i < LD_MIN_PT_SIZE)
393    i = LD_MIN_PT_SIZE;
394  return i;
395}
396
397/* initialize a trivial page table of 2^ldSize bytes
398 * at 'base' in memory.
399 *
400 * RETURNS:     OPAQUE HANDLE (not the hash table address)
401 *          or NULL on failure.
402 */
403Triv121PgTbl
404triv121PgTblInit (unsigned long base, unsigned ldSize)
405{
406  if (pgTbl.size) {
407    /* already initialized */
408    return 0;
409  }
410
411  if (ldSize < LD_MIN_PT_SIZE)
412    return 0;                   /* too small */
413
414  if (base & ((1 << ldSize) - 1))
415    return 0;                   /* misaligned */
416
417  /* This was tested on 604r, 750 and 7400.
418   * On other CPUs, verify that the TLB invalidation works
419   * for a new CPU variant and that it has hardware PTE lookup/
420   * TLB replacement before adding it to this list.
421   *
422   * NOTE: The 603 features no hardware PTE lookup - and
423   *       hence the page tables should NOT be used.
424   *               Although lookup could be implemented in
425   *               software this is probably not desirable
426   *               as it could have an impact on hard realtime
427   *               performance, screwing deterministic latency!
428   *               (Could still be useful for debugging, though)
429   */
430  if ( ! ppc_cpu_has_hw_ptbl_lkup() )
431    return 0;                   /* unsupported by this CPU */
432
433  pgTbl.base = (APte) base;
434  pgTbl.size = 1 << ldSize;
435  /* clear all page table entries */
436  memset (pgTbl.base, 0, pgTbl.size);
437
438  CONSCHECK (0);
439
440  /* map the page table itself 'm' and 'readonly' */
441  if (triv121PgTblMap (&pgTbl,
442                       TRIV121_121_VSID,
443                       base,
444                       (pgTbl.size >> LD_PG_SIZE),
445                       TRIV121_ATTR_M, TRIV121_PP_RO_PAGE) >= 0)
446    return 0;
447
448  CONSCHECK ((pgTbl.size >> LD_PG_SIZE));
449
450  return &pgTbl;
451}
452
453/* return the handle of the (one and only) page table
454 * or NULL if none has been initialized yet.
455 */
456Triv121PgTbl
457triv121PgTblGet (void)
458{
459  return pgTbl.size ? &pgTbl : 0;
460}
461
462/* NOTE: this routine returns -1 on success;
463 *       on failure, the page table index for
464 *       which no PTE could be allocated is returned
465 *
466 * (Consult header about argument/return value
467 * description)
468 */
469long
470triv121PgTblMap (Triv121PgTbl pt,
471                 long ovsid,
472                 unsigned long start,
473                 unsigned long numPages,
474                 unsigned attributes, unsigned protection)
475{
476  int i, pass;
477  unsigned long pi;
478  APte pte;
479  long vsid;
480#ifdef DEBUG
481  long saved_vsid = ovsid;
482#endif
483
484  if (TRIV121_121_VSID == ovsid) {
485    /* use 1:1 mapping */
486    ovsid = VSID121 (start);
487  } else if (TRIV121_SEG_VSID == ovsid) {
488    ovsid = seg2vsid (start);
489  }
490
491#ifdef DEBUG
492  PRINTF ("Mapping %i (0x%x) pages at 0x%08x for VSID 0x%08x\n",
493          (unsigned) numPages, (unsigned) numPages,
494          (unsigned) start, (unsigned) ovsid);
495#endif
496
497  /* map in two passes. During the first pass, we try
498   * to claim entries as needed. The 'slotFor()' routine
499   * will 'mark' the claimed entries without 'valid'ating
500   * them.
501   * If the mapping fails, all claimed entries are unmarked
502   * and we return the PI for which allocation failed.
503   *
504   * Once we know that the allocation would succeed, we
505   * do a second pass; during the second pass, the PTE
506   * is actually written.
507   *
508   */
509  for (pass = 0; pass < 2; pass++) {
510    /* check if we would succeed during the first pass */
511    for (i = 0, pi = PI121 (start), vsid = ovsid; i < numPages; i++, pi++) {
512      if (pi >= 1 << LD_PI_SIZE) {
513        vsid++;
514        pi = 0;
515      }
516      /* leave alone existing mappings for this EA */
517      if (!alreadyMapped (pt, vsid, pi)) {
518        if (!(pte = slotFor (pt, vsid, pi))) {
519          /* no free slot found for page index 'pi' */
520          unmarkAll (pt);
521          return pi;
522        } else {
523          /* have a free slot; marked by slotFor() */
524          if (pass) {
525            /* second pass; do the real work */
526            pte->vsid = vsid;
527            /* H was set by slotFor() */
528            pte->api = API (pi);
529            /* set up 1:1 mapping */
530            pte->rpn =
531              ((((unsigned long) vsid) &
532                ((1 << (LD_PHYS_SIZE - LD_SEG_SIZE)) -
533                 1)) << LD_PI_SIZE) | pi;
534            pte->wimg = attributes & 0xf;
535            pte->pp = protection & 0x3;
536            /* mark it valid */
537            pte->marked = 0;
538            if (pt->active) {
539              uint32_t flags;
540              rtems_interrupt_disable (flags);
541              /* order setting 'v' after writing everything else */
542              __asm__ volatile ("eieio":::"memory");
543              pte->v = 1;
544              __asm__ volatile ("sync":::"memory");
545              rtems_interrupt_enable (flags);
546            } else {
547              pte->v = 1;
548            }
549
550#ifdef DEBUG
551            /* add paranoia */
552            assert (alreadyMapped (pt, vsid, pi) == pte);
553#endif
554          }
555        }
556      }
557    }
558    unmarkAll (pt);
559  }
560#ifdef DEBUG
561  {
562    unsigned long failedat;
563    CONSCHECK (-1);
564    /* double check that the requested range is mapped */
565    failedat =
566      triv121IsRangeMapped (saved_vsid, start,
567                            start + (1 << LD_PG_SIZE) * numPages);
568    if (0x0C0C != failedat) {
569      PRINTF ("triv121 mapping failed at 0x%08x\n", (unsigned) failedat);
570      return PI121 (failedat);
571    }
572  }
573#endif
574  return TRIV121_MAP_SUCCESS;   /* -1 !! */
575}
576
577unsigned long
578triv121PgTblSDR1 (Triv121PgTbl pt)
579{
580  return (((unsigned long) pt->base) & ~((1 << LD_MIN_PT_SIZE) - 1)) |
581    (((pt->size - 1) >> LD_MIN_PT_SIZE) &
582     ((1 << (LD_HASH_SIZE - (LD_MIN_PT_SIZE - LD_PTEG_SIZE))) - 1)
583    );
584}
585
586void
587triv121PgTblActivate (Triv121PgTbl pt)
588{
589#ifndef DEBUG_MAIN
590  unsigned long          sdr1 = triv121PgTblSDR1 (pt);
591  register unsigned long tmp0 = 16;     /* initial counter value (#segment regs) */
592  register unsigned long tmp1 = (KEY_USR | KEY_SUP);
593  register unsigned long tmp2 = (MSR_EE | MSR_IR | MSR_DR);
594#endif
595  pt->active = 1;
596
597#ifndef DEBUG_MAIN
598#ifdef DEBUG_EXC
599  /* install our exception handler */
600  ohdl = globalExceptHdl;
601  globalExceptHdl = myhdl;
602  __asm__ __volatile__ ("sync"::"memory");
603#endif
604
605  /* This section of assembly code takes care of the
606   * following:
607   * - get MSR and switch interrupts + MMU off
608   *
609   * - load up the segment registers with a
610   *   1:1 effective <-> virtual mapping;
611   *   give user & supervisor keys
612   *
613   * - flush all TLBs;
614   *   NOTE: the TLB flushing code is probably
615   *         CPU dependent!
616   *
617   * - setup SDR1
618   *
619   * - restore original MSR
620   */
621  __asm__ __volatile (
622    "   mtctr   %[tmp0]\n"
623    /* Get MSR and switch interrupts off - just in case.
624     * Also switch the MMU off; the book
625     * says that SDR1 must not be changed with either
626     * MSR_IR or MSR_DR set. I would guess that it could
627     * be safe as long as the IBAT & DBAT mappings override
628     * the page table...
629     */
630    "   mfmsr   %[tmp0]\n"
631    "   andc    %[tmp2], %[tmp0], %[tmp2]\n"
632    "   mtmsr   %[tmp2]\n"
633    "   isync   \n"
634    /* set up the segment registers */
635    "   li              %[tmp2], 0\n"
636    "1: mtsrin  %[tmp1], %[tmp2]\n"
637    "   addis   %[tmp2], %[tmp2], 0x1000\n" /* address next SR */
638    "   addi    %[tmp1], %[tmp1], 1\n"      /* increment VSID  */
639    "   bdnz    1b\n"
640    /* Now flush all TLBs, starting with the topmost index */
641    "   lis             %[tmp2], %[ea_range]@h\n"
642    "2: addic.  %[tmp2], %[tmp2], -%[pg_sz]\n"    /* address the next one (decrementing) */
643    "   tlbie   %[tmp2]\n"             /* invalidate & repeat */
644    "   bgt             2b\n"
645    "   eieio   \n"
646    "   tlbsync \n"
647    "   sync    \n"
648    /* set up SDR1 */
649    "   mtspr   %[sdr1], %[sdr1val]\n"
650    /* restore original MSR  */
651    "   mtmsr   %[tmp0]\n"
652    "   isync   \n"
653      :[tmp0]"+r&"(tmp0), [tmp1]"+b&"(tmp1), [tmp2]"+b&"(tmp2)
654      :[ea_range]"i"(FLUSH_EA_RANGE), [pg_sz]"i" (1 << LD_PG_SIZE),
655       [sdr1]"i"(SDR1), [sdr1val]"r" (sdr1)
656      :"ctr", "cc", "memory"
657  );
658
659  /* At this point, BAT0 is probably still active; it's the
660   * caller's job to deactivate it...
661   */
662#endif
663}
664
665/**************************  DEBUGGING ROUTINES  *************************/
666
667/* Exception handler to catch page faults */
668#ifdef DEBUG_EXC
669
670#define BAT_VALID_BOTH  3       /* allow user + super access */
671
672static void
673myhdl (BSP_Exception_frame * excPtr)
674{
675  if (3 == excPtr->_EXC_number) {
676    unsigned long dsisr;
677
678    /* reactivate DBAT0 and read DSISR */
679    __asm__ __volatile__ (
680      "mfspr %0, %1   \n"
681      "ori   %0, %0, 3\n"
682      "mtspr %1, %0   \n"
683      "sync\n"
684      "mfspr %0, %2\n"
685        :"=&r" (dsisr)
686        :"i" (DBAT0U), "i" (DSISR), "i" (BAT_VALID_BOTH)
687    );
688
689    printk ("Data Access Exception (DSI) # 3\n");
690    printk ("Reactivated DBAT0 mapping\n");
691
692
693    printk ("DSISR 0x%08x\n", dsisr);
694
695    printk ("revectoring to prevent default handler panic().\n");
696    printk ("NOTE: exception number %i below is BOGUS\n", ASM_DEC_VECTOR);
697    /* make this exception 'recoverable' for
698     * the default handler by faking a decrementer
699     * exception.
700     * Note that the default handler's message will be
701     * wrong about the exception number.
702     */
703    excPtr->_EXC_number = ASM_DEC_VECTOR;
704  }
705/* now call the original handler */
706  ((void (*)()) ohdl) (excPtr);
707}
708#endif
709
710
711
712#ifdef DEBUG
713/* test the consistency of the page table
714 *
715 * 'pass' is merely a number which will be printed
716 * by this routine, so the caller may give some
717 * context information.
718 *
719 * 'expected' is the number of valid (plus 'marked')
720 * entries the caller believes the page table should
721 * have. This routine complains if its count differs.
722 *
723 * It basically verifies that the topmost 20bits
724 * of all VSIDs as well as the unused bits are all
725 * zero. Then it counts all valid and all 'marked'
726 * entries, adding them up and comparing them to the
727 * 'expected' number of occupied slots.
728 *
729 * RETURNS: total number of valid plus 'marked' slots.
730 */
731static unsigned long
732triv121PgTblConsistency (Triv121PgTbl pt, int pass, int expected)
733{
734  APte pte;
735  int i;
736  unsigned v, m;
737  int warn = 0;
738  int errs = 0;
739  static int maxw = 20;         /* mute after detecting this many errors */
740
741  PRINTF ("Checking page table at 0x%08x (size %i==0x%x)\n",
742          (unsigned) pt->base, (unsigned) pt->size, (unsigned) pt->size);
743
744  if (!pt->base || !pt->size) {
745    PRINTF ("Uninitialized Page Table!\n");
746    return 0;
747  }
748
749  v = m = 0;
750#if 1
751  /* 10/9/2002: I had machine checks crashing after this loop
752   *            terminated. Maybe caused by speculative loads
753   *            from beyond the valid memory area (since the
754   *            page hash table sits at the top of physical
755   *            memory).
756   *            Very bizarre - the other loops in this file
757   *            seem to be fine. Maybe there is a compiler bug??
758   *            For the moment, I let the loop run backwards...
759   *
760   *                        Also see the comment a couple of lines down.
761   */
762  for (i = pt->size / sizeof (PTERec) - 1, pte = pt->base + i; i >= 0;
763       i--, pte--)
764#else
765  for (i = 0, pte = pt->base; i < pt->size / sizeof (PTERec); i++, pte++)
766#endif
767  {
768    int err = 0;
769    char buf[500];
770    unsigned long *lp = (unsigned long *) pte;
771#if 0
772    /* If I put this bogus while statement here (the body is
773     * never reached), the original loop works OK
774     */
775    while (pte >= pt->base + pt->size / sizeof (PTERec))
776      /* never reached */ ;
777#endif
778
779    if ( /* T.S: allow any VSID... (*lp & (0xfffff0 << 7)) || */ (*(lp + 1) & 0xe00)
780        || (pte->v && pte->marked)) {
781      /* check for vsid (without segment bits) == 0, unused bits == 0, valid && marked */
782      sprintf (buf, "unused bits or v && m");
783      err = 1;
784    } else {
785      if ( (*lp & (0xfffff0 << 7)) ) {
786        sprintf(buf,"(warning) non-1:1 VSID found");
787        err = 2;
788      }
789      if (pte->v)
790        v++;
791      if (pte->marked)
792        m++;
793    }
794    if (err && maxw) {
795      PRINTF
796        ("Pass %i -- strange PTE at 0x%08x found for page index %i == 0x%08x:\n",
797         pass, (unsigned) pte, i, i);
798      PRINTF ("Reason: %s\n", buf);
799      dumpPte (pte);
800      if ( err & 2 ) {
801         warn++;
802      } else {
803         errs++;
804      }
805      maxw--;
806    }
807  }
808  if (errs) {
809    PRINTF ("%i errors %s", errs, warn ? "and ":"");
810  }
811  if (warn) {
812    PRINTF ("%i warnings ",warn);
813  }
814  if (errs || warn) {
815    PRINTF ("found; currently %i entries marked, %i are valid\n",
816            m, v);
817  }
818  v += m;
819  if (maxw && expected >= 0 && expected != v) {
820    /* number of occupied slots not what they expected */
821    PRINTF ("Wrong # of occupied slots detected during pass");
822    PRINTF ("%i; should be %i (0x%x) is %i (0x%x)\n",
823            pass, expected, (unsigned) expected, v, (unsigned) v);
824    maxw--;
825  }
826  return v;
827}
828#endif
829
830/* Find the PTE for a EA and print its contents
831 * RETURNS: pte for EA or NULL if no entry was found.
832 */
833APte
834triv121DumpEa (unsigned long ea)
835{
836  APte pte;
837
838  pte =
839    alreadyMapped (&pgTbl, pgTbl.active ? TRIV121_SEG_VSID : TRIV121_121_VSID,
840                   ea);
841
842  if (pte)
843    dumpPte (pte);
844  return pte;
845}
846
847APte
848triv121FindPte (unsigned long vsid, unsigned long pi)
849{
850  return alreadyMapped (&pgTbl, vsid, pi);
851}
852
853APte
854triv121UnmapEa (unsigned long ea)
855{
856  uint32_t flags;
857  APte pte;
858
859  if (!pgTbl.active) {
860    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
861    if (pte)                    /* alreadyMapped checks for pte->v */
862      pte->v = 0;
863    return pte;
864  }
865
866  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
867
868  if (!pte)
869    return 0;
870
871  rtems_interrupt_disable (flags);
872  pte->v = 0;
873  do_dssall ();
874  __asm__ volatile ("   sync            \n\t"
875                "       tlbie %0        \n\t"
876                "       eieio           \n\t"
877                "       tlbsync         \n\t"
878                "       sync            \n\t"::"r" (ea):"memory");
879  rtems_interrupt_enable (flags);
880  return pte;
881}
882
883/* A context synchronizing jump */
884#define SYNC_LONGJMP(msr)                               \
885        asm volatile(                                           \
886                "       mtsrr1  %0                      \n\t"   \
887                "       bl              1f                      \n\t"   \
888                "1:     mflr    3                       \n\t"   \
889                "       addi    3,3,1f-1b       \n\t"   \
890                "       mtsrr0  3                       \n\t"   \
891                "       rfi                                     \n\t"   \
892                "1:                                             \n\t"   \
893                :                                                               \
894                :"r"(msr)                                               \
895                :"3","lr","memory")
896
897/* The book doesn't mention dssall when changing PTEs
898 * but they require it for BAT changes and I guess
899 * it makes sense in the case of PTEs as well.
900 * Just do it to be on the safe side...
901 */
902static void
903do_dssall (void)
904{
905  /* Before changing BATs, 'dssall' must be issued.
906   * We check MSR for MSR_VE and issue a 'dssall' if
907   * MSR_VE is set hoping that
908   *  a) on non-altivec CPUs MSR_VE reads as zero
909   *  b) all altivec CPUs use the same bit
910   *
911   * NOTE: psim doesn't implement dssall so we skip if we run on psim
912   */
913  if ( (_read_MSR () & MSR_VE) && PPC_PSIM != get_ppc_cpu_type() ) {
914    /* this construct is needed because we don't know
915     * if this file is compiled with -maltivec.
916     * (I plan to add altivec support outside of
917     * RTEMS core and hence I'd rather not
918     * rely on consistent compiler flags).
919     */
920#define DSSALL  0x7e00066c      /* dssall opcode */
921    __asm__ volatile (" .long %0"::"i" (DSSALL));
922#undef  DSSALL
923  }
924}
925
926APte
927triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp)
928{
929  APte pte;
930  unsigned long msr;
931
932  if (!pgTbl.active) {
933    pte = alreadyMapped (&pgTbl, TRIV121_121_VSID, ea);
934    if (!pte)
935      return 0;
936    if (wimg > 0)
937      pte->wimg = wimg;
938    if (pp > 0)
939      pte->pp = pp;
940    return pte;
941  }
942
943  pte = alreadyMapped (&pgTbl, TRIV121_SEG_VSID, ea);
944
945  if (!pte)
946    return 0;
947
948  if (wimg < 0 && pp < 0)
949    return pte;
950
951  __asm__ volatile ("mfmsr %0":"=r" (msr));
952
953  /* switch MMU and IRQs off */
954  SYNC_LONGJMP (msr & ~(MSR_EE | MSR_DR | MSR_IR));
955
956  pte->v = 0;
957  do_dssall ();
958  __asm__ volatile ("sync":::"memory");
959  if (wimg >= 0)
960    pte->wimg = wimg;
961  if (pp >= 0)
962    pte->pp = pp;
963  __asm__ volatile ("tlbie %0; eieio"::"r" (ea):"memory");
964  pte->v = 1;
965  __asm__ volatile ("tlbsync; sync":::"memory");
966
967  /* restore, i.e., switch MMU and IRQs back on */
968  SYNC_LONGJMP (msr);
969
970  return pte;
971}
972
973static void
974pgtblChangePP (Triv121PgTbl pt, int pp)
975{
976  unsigned long n = pt->size >> LD_PG_SIZE;
977  unsigned long b, i;
978
979  for (i = 0, b = (unsigned long) pt->base; i < n;
980       i++, b += (1 << LD_PG_SIZE)) {
981    triv121ChangeEaAttributes (b, -1, pp);
982  }
983}
984
985void
986triv121MakePgTblRW ()
987{
988  pgtblChangePP (&pgTbl, TRIV121_PP_RW_PAGE);
989}
990
991void
992triv121MakePgTblRO ()
993{
994  pgtblChangePP (&pgTbl, TRIV121_PP_RO_PAGE);
995}
996
997long
998triv121DumpPte (APte pte)
999{
1000  if (pte)
1001    dumpPte (pte);
1002  return 0;
1003}
1004
1005
1006#ifdef DEBUG
1007/* Dump an entire PTEG */
1008
1009static void
1010dumpPteg (unsigned long vsid, unsigned long pi, unsigned long hash)
1011{
1012  APte pte = ptegOf (&pgTbl, hash);
1013  int i;
1014  PRINTF ("hash 0x%08x, pteg 0x%08x (vsid 0x%08x, pi 0x%08x)\n",
1015          (unsigned) hash, (unsigned) pte, (unsigned) vsid, (unsigned) pi);
1016  for (i = 0; i < PTE_PER_PTEG; i++, pte++) {
1017    PRINTF ("pte 0x%08x is 0x%08x : 0x%08x\n",
1018            (unsigned) pte,
1019            (unsigned) *(unsigned long *) pte,
1020            (unsigned) *(((unsigned long *) pte) + 1));
1021  }
1022}
1023#endif
1024
1025/* Verify that a range of addresses is mapped the page table.
1026 * start/end are segment offsets or EAs (if vsid has one of
1027 * the special values), respectively.
1028 *
1029 * RETURNS: address of the first page for which no
1030 *          PTE was found (i.e. page index * page size)
1031 *
1032 *          ON SUCCESS, the special value 0x0C0C ("OKOK")
1033 *          [which is not page aligned and hence is not
1034 *          a valid page address].
1035 */
1036
1037unsigned long
1038triv121IsRangeMapped (long vsid, unsigned long start, unsigned long end)
1039{
1040unsigned pi;
1041
1042  start &= ~((1 << LD_PG_SIZE) - 1);
1043  while (start < end) {
1044    if ( TRIV121_SEG_VSID != vsid && TRIV121_121_VSID != vsid )
1045      pi = PI121(start);
1046    else
1047      pi = start;
1048    if (!alreadyMapped (&pgTbl, vsid, pi))
1049      return start;
1050    start += 1 << LD_PG_SIZE;
1051  }
1052  return 0x0C0C;                /* OKOK - not on a page boundary */
1053}
1054
1055
1056#include <stdlib.h>
1057
1058/* print a PTE */
1059static void
1060dumpPte (APte pte)
1061{
1062  if (0 == ((unsigned long) pte & ((1 << LD_PTEG_SIZE) - 1)))
1063    PRINTF ("PTEG--");
1064  else
1065    PRINTF ("......");
1066  if (pte->v) {
1067    PRINTF ("VSID: 0x%08x H:%1i API: 0x%02x\n", pte->vsid, pte->h, pte->api);
1068    PRINTF ("      ");
1069    PRINTF ("RPN:  0x%08x WIMG: 0x%1x, (m %1i), pp: 0x%1x\n",
1070            pte->rpn, pte->wimg, pte->marked, pte->pp);
1071  } else {
1072    PRINTF ("xxxxxx\n");
1073    PRINTF ("      ");
1074    PRINTF ("xxxxxx\n");
1075  }
1076}
1077
1078
1079#if defined(DEBUG_MAIN)
1080/* dump page table entries from index 'from' to 'to'
1081 * The special values (unsigned)-1 are allowed which
1082 * cause the routine to dump the entire table.
1083 *
1084 * RETURNS 0
1085 */
1086int
1087triv121PgTblDump (Triv121PgTbl pt, unsigned from, unsigned to)
1088{
1089  int i;
1090  APte pte;
1091  PRINTF ("Dumping PT [size 0x%08x == %i] at 0x%08x\n",
1092          (unsigned) pt->size, (unsigned) pt->size, (unsigned) pt->base);
1093  if (from > pt->size >> LD_PTE_SIZE)
1094    from = 0;
1095  if (to > pt->size >> LD_PTE_SIZE)
1096    to = (pt->size >> LD_PTE_SIZE);
1097  for (i = from, pte = pt->base + from; i < (long) to; i++, pte++) {
1098    dumpPte (pte);
1099  }
1100  return 0;
1101}
1102
1103
1104
1105#define LD_DBG_PT_SIZE  LD_MIN_PT_SIZE
1106
1107int
1108main (int argc, char **argv)
1109{
1110  unsigned long base, start, numPages;
1111  unsigned long size = 1 << LD_DBG_PT_SIZE;
1112  Triv121PgTbl pt;
1113
1114  base = (unsigned long) malloc (size << 1);
1115
1116  assert (base);
1117
1118  /* align pt */
1119  base += size - 1;
1120  base &= ~(size - 1);
1121
1122  assert (pt = triv121PgTblInit (base, LD_DBG_PT_SIZE));
1123
1124  triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1);
1125  do {
1126    do {
1127      PRINTF ("Start Address:");
1128      fflush (stdout);
1129    } while (1 != scanf ("%i", &start));
1130    do {
1131      PRINTF ("# pages:");
1132      fflush (stdout);
1133    } while (1 != scanf ("%i", &numPages));
1134  } while (TRIV121_MAP_SUCCESS ==
1135           triv121PgTblMap (pt, TRIV121_121_VSID, start, numPages,
1136                            TRIV121_ATTR_IO_PAGE, 2)
1137           && 0 == triv121PgTblDump (pt, (unsigned) -1, (unsigned) -1));
1138}
1139#endif
Note: See TracBrowser for help on using the repository browser.