source: rtems/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c @ 7e85bfbe

4.115
Last change on this file since 7e85bfbe was 7e85bfbe, checked in by Sebastian Huber <sebastian.huber@…>, on 08/24/11 at 09:48:56

2011-08-24 Sebastian Huber <sebastian.huber@…>

  • shared/bootloader/exception.S, shared/bootloader/misc.c, shared/bootloader/mm.c, shared/console/polled_io.c, shared/startup/probeMemEnd.c: Update due to API changes.
  • Property mode set to 100644
File size: 27.6 KB
Line 
1/*
2 *  mm.c -- Crude memory management for early boot.
3 *
4 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
5 *
6 *  Modified to compile in RTEMS development environment
7 *  by Eric Valette
8 *
9 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
10 *
11 *  The license and distribution terms for this file may be
12 *  found in the file LICENSE in this distribution or at
13 *  http://www.rtems.com/license/LICENSE.
14 *
15 * $Id$
16 */
17
18/* This code is a crude memory manager for early boot for LinuxPPC.
19 * As such, it does not try to perform many optimiztions depending
20 * on the processor, it only uses features which are common to
21 * all processors (no BATs...).
22 *
23 * On PreP platorms (the only ones on which it works for now),
24 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
25 * residual data. The holes between these areas can be virtually
26 * remapped to any of these, since for some functions it is very handy
27 * to have virtually contiguous but physically discontiguous memory.
28 *
29 * Physical memory allocation is also very crude, since it's only
30 * designed to manage a small number of large chunks. For valloc/vfree
31 * and palloc/pfree, the unit of allocation is the 4kB page.
32 *
33 * The salloc/sfree has been added after tracing gunzip and seeing
34 * how it performed a very large number of small allocations.
35 * For these the unit of allocation is 8 bytes (the s stands for
36 * small or subpage). This memory is cleared when allocated.
37 *
38 */
39
40#include <rtems/bspIo.h>
41
42#include <sys/types.h>
43#include <libcpu/spr.h>
44#include "bootldr.h"
45#include <libcpu/mmu.h>
46#include <libcpu/page.h>
47#include <limits.h>
48
49extern void (tlb_handlers)(void);
50extern void (_handler_glue)(void);
51
52/* We use our own kind of simple memory areas for the loader, but
53 * we want to avoid potential clashes with kernel includes.
54 * Here a map maps contiguous areas from base to end,
55 * the firstpte entry corresponds to physical address and has the low
56 * order bits set for caching and permission.
57 */
58
59typedef struct _map {
60        struct _map *next;
61        u_long base;
62        u_long end;
63        u_long firstpte;
64} map;
65
66/* The LSB of the firstpte entries on map lists other than mappings
67 * are constants which can be checked for debugging. All these constants
68 * have bit of weight 4 set, this bit is zero in the mappings list entries.
69 * Actually firstpte&7 value is:
70 * - 0 or 1 should not happen
71 * - 2 for RW actual virtual->physical mappings
72 * - 3 for RO actual virtual->physical mappings
73 * - 6 for free areas to be suballocated by salloc
74 * - 7 for salloc'ated areas
75 * - 4 or 5 for all others, in this case firtpte & 63 is
76 *   - 4 for unused maps (on the free list)
77 *   - 12 for free physical memory
78 *   - 13 for physical memory in use
79 *   - 20 for free virtual address space
80 *   - 21 for allocated virtual address space
81 *   - 28 for physical memory space suballocated by salloc
82 *   - 29 for physical memory that can't be freed
83 */
84
85#define MAP_FREE_SUBS 6
86#define MAP_USED_SUBS 7
87
88#define MAP_FREE 4
89#define MAP_FREE_PHYS 12
90#define MAP_USED_PHYS 13
91#define MAP_FREE_VIRT 20
92#define MAP_USED_VIRT 21
93#define MAP_SUBS_PHYS 28
94#define MAP_PERM_PHYS 29
95
96SPR_RW(SDR1);
97SPR_RO(DSISR);
98SPR_RO(PPC_DAR);
99
100/* We need a few statically allocated free maps to bootstrap the
101 * memory managment */
102static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
103                           {free_maps+2, 0, 0, MAP_FREE},
104                           {free_maps+3, 0, 0, MAP_FREE},
105                           {NULL, 0, 0, MAP_FREE}};
106struct _mm_private {
107        void *sdr1;
108        u_long hashmask;
109        map *freemaps;     /* Pool of unused map structs */
110        map *mappings;     /* Sorted list of virtual->physical mappings */
111        map *physavail;    /* Unallocated physical address space */
112        map *physused;     /* Allocated physical address space */
113        map *physperm;     /* Permanently allocated physical space */
114        map *virtavail;    /* Unallocated virtual address space */
115        map *virtused;     /* Allocated virtual address space */
116        map *sallocfree;   /* Free maps for salloc */
117        map *sallocused;   /* Used maps for salloc */
118        map *sallocphys;   /* Physical areas used by salloc */
119        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
120} mm_private = {hashmask: 0xffc0,
121                freemaps: free_maps+0};
122
123/* A simplified hash table entry declaration */
124typedef struct _hash_entry {
125        int key;
126        u_long rpn;
127} hash_entry;
128
129void print_maps(map *, const char *);
130
131/* The handler used for all exceptions although for now it is only
132 * designed to properly handle MMU interrupts to fill the hash table.
133 */
134
135void _handler(int vec, ctxt *p) {
136        map *area;
137        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
138        u_long vaddr, cause;
139        if (vec==4 || vec==7) { /* ISI exceptions are different */
140                vaddr = p->nip;
141                cause = p->msr;
142        } else { /* Valid for DSI and alignment exceptions */
143                vaddr = _read_PPC_DAR();
144                cause = _read_DSISR();
145        }
146
147        if (vec==3 || vec==4) {
148                /* Panic if the fault is not PTE not found. */
149                if (!(cause & 0x40000000)) {
150                        MMUon();
151                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
152                        hang("Memory protection violation at ", vaddr, p);
153                }
154
155                for(area=mm->mappings; area; area=area->next) {
156                        if(area->base<=vaddr && vaddr<=area->end) break;
157                }
158
159                if (area) {
160                        u_long hash, vsid, rpn;
161                        hash_entry volatile *hte, *_hte1;
162                        u_int i, alt=0, flushva;
163
164                        vsid = _read_SR((void *)vaddr);
165                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
166                        hash = vsid<<6;
167                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
168                        hash &= mm->hashmask;
169                        /* Find an empty entry in the PTEG, else
170                         * replace a random one.
171                         */
172                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
173                        for (i=0; i<8; i++) {
174                                if (hte[i].key>=0) goto found;
175                        }
176                        hash ^= mm->hashmask;
177                        alt = 0x40; _hte1 = hte;
178                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
179
180                        for (i=0; i<8; i++) {
181                                if (hte[i].key>=0) goto found;
182                        }
183                        alt = 0;
184                        hte = _hte1;
185                        /* Chose a victim entry and replace it. There might be
186                         * better policies to choose the victim, but in a boot
187                         * loader we want simplicity as long as it works.
188                         *
189                         * We would not need to invalidate the TLB entry since
190                         * the mapping is still valid. But this would be a mess
191                         * when unmapping so we make sure that the TLB is a
192                         * subset of the hash table under all circumstances.
193                         */
194                        i = mm->hashcnt;
195                        mm->hashcnt = (mm->hashcnt+1)%8;
196                        /* Note that the hash is already complemented here ! */
197                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
198                        if (hte[i].key&0x40) flushva^=0x3ff000;
199                        flushva |= ((hte[i].key<<21)&0xf0000000)
200                          | ((hte[i].key<<22)&0x0fc00000);
201                        hte[i].key=0;
202                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
203                found:
204                        hte[i].rpn = rpn;
205                        asm volatile("eieio": : );
206                        hte[i].key = 0x80000000|(vsid<<7)|alt|
207                          ((vaddr>>22)&0x3f);
208                        return;
209                } else {
210                        MMUon();
211                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
212                        hang("\nInvalid memory access attempt at ", vaddr, p);
213                }
214        } else {
215          MMUon();
216          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
217                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
218          if (vec == 7) {
219            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
220            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
221              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
222          }
223          hang("Program or alignment exception at ", vaddr, p);
224        }
225}
226
227/* Generic routines for map handling.
228 */
229
230static inline
231void free_map(map *p) {
232        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
233        if (!p) return;
234        p->next=mm->freemaps;
235        mm->freemaps=p;
236        p->firstpte=MAP_FREE;
237}
238
239/* Sorted insertion in linked list */
240static
241int insert_map(map **head, map *p) {
242        map *q = *head;
243        if (!p) return 0;
244        if (q && (q->base < p->base)) {
245                for(;q->next && q->next->base<p->base; q = q->next);
246                if ((q->end >= p->base) ||
247                    (q->next && p->end>=q->next->base)) {
248                        free_map(p);
249                        printk("Overlapping areas!\n");
250                        return 1;
251                }
252                p->next = q->next;
253                q->next = p;
254        } else { /* Insert at head */
255                if (q && (p->end >= q->base)) {
256                        free_map(p);
257                        printk("Overlapping areas!\n");
258                        return 1;
259                }
260                p->next = q;
261                *head = p;
262        }
263        return 0;
264}
265
266/* Removal from linked list */
267
268static
269map *remove_map(map **head, map *p) {
270        map *q = *head;
271
272        if (!p || !q) return NULL;
273        if (q==p) {
274                *head = q->next;
275                return p;
276        }
277        for(;q && q->next!=p; q=q->next);
278        if (q) {
279                q->next=p->next;
280                return p;
281        } else {
282                return NULL;
283        }
284}
285
286static
287map *remove_map_at(map **head, void * vaddr) {
288        map *p, *q = *head;
289
290        if (!vaddr || !q) return NULL;
291        if (q->base==(u_long)vaddr) {
292                *head = q->next;
293                return q;
294        }
295        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
296        p=q->next;
297        if (p) q->next=p->next;
298        return p;
299}
300
301static inline
302map * alloc_map_page(void) {
303        map *from, *p;
304        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
305
306        /* printk("Allocating new map page !"); */
307        /* Get the highest page */
308        for (from=mm->physavail; from && from->next; from=from->next);
309        if (!from) return NULL;
310
311        from->end -= PAGE_SIZE;
312
313        mm->freemaps = (map *) (from->end+1);
314
315        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
316                p->next = p+1;
317                p->firstpte = MAP_FREE;
318        }
319        (p-1)->next=0;
320
321        /* Take the last one as pointer to self and insert
322         * the map into the permanent map list.
323         */
324
325        p->firstpte = MAP_PERM_PHYS;
326        p->base=(u_long) mm->freemaps;
327        p->end = p->base+PAGE_SIZE-1;
328
329        insert_map(&mm->physperm, p);
330
331        if (from->end+1 == from->base)
332                free_map(remove_map(&mm->physavail, from));
333
334        return mm->freemaps;
335}
336
337static
338map * alloc_map(void) {
339        map *p;
340        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
341
342        p = mm->freemaps;
343        if (!p) {
344                p=alloc_map_page();
345        }
346
347        if(p) mm->freemaps=p->next;
348
349        return p;
350}
351
352static
353void coalesce_maps(map *p) {
354        while(p) {
355                if (p->next && (p->end+1 == p->next->base)) {
356                        map *q=p->next;
357                        p->end=q->end;
358                        p->next=q->next;
359                        free_map(q);
360                } else {
361                        p = p->next;
362                }
363        }
364}
365
366/* These routines are used to find the free memory zones to avoid
367 * overlapping destructive copies when initializing.
368 * They work from the top because of the way we want to boot.
369 * In the following the term zone refers to the memory described
370 * by one or several contiguous so called segments in the
371 * residual data.
372 */
373#define STACK_PAGES 2
374static inline u_long
375find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
376        u_long i, newmin=0, size=0;
377        for(i=0; i<res->ActualNumMemSegs; i++) {
378                if (res->Segs[i].Usage & flags
379                    && res->Segs[i].BasePage<lowpage
380                    && res->Segs[i].BasePage>newmin) {
381                        newmin=res->Segs[i].BasePage;
382                        size=res->Segs[i].PageCount;
383                }
384        }
385        return newmin+size;
386}
387
388static inline u_long
389find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
390        u_long i;
391        int progress;
392        do {
393                progress=0;
394                for (i=0; i<res->ActualNumMemSegs; i++) {
395                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
396                              == highpage)
397                             && res->Segs[i].Usage & flags) {
398                                highpage=res->Segs[i].BasePage;
399                                progress=1;
400                        }
401                }
402        } while(progress);
403        return highpage;
404}
405
406/* The Motorola NT firmware does not provide any setting in the residual
407 * data about memory segment usage. The following table provides enough
408 * info so that this bootloader can work.
409 */
410MEM_MAP seg_fix[] = {
411    { 0x2000, 0xFFF00, 0x00100 },
412    { 0x0020, 0x02000, 0x7E000 },
413    { 0x0008, 0x00800, 0x00168 },
414    { 0x0004, 0x00000, 0x00005 },
415    { 0x0001, 0x006F1, 0x0010F },
416    { 0x0002, 0x006AD, 0x00044 },
417    { 0x0010, 0x00005, 0x006A8 },
418    { 0x0010, 0x00968, 0x00698 },
419    { 0x0800, 0xC0000, 0x3F000 },
420    { 0x0600, 0xBF800, 0x00800 },
421    { 0x0500, 0x81000, 0x3E800 },
422    { 0x0480, 0x80800, 0x00800 },
423    { 0x0440, 0x80000, 0x00800 } };
424
425/* The Motorola NT firmware does not set up all required info in the residual
426 * data. This routine changes some things in a way that the bootloader and
427 * linux are happy.
428 */
429void
430fix_residual( RESIDUAL *res )
431{
432#if 0
433    PPC_DEVICE *hostbridge;
434#endif
435    int i;
436
437    /* Missing memory segment information */
438    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
439    for (i=0; i<res->ActualNumMemSegs; i++) {
440        res->Segs[i].Usage = seg_fix[i].Usage;
441        res->Segs[i].BasePage = seg_fix[i].BasePage;
442        res->Segs[i].PageCount = seg_fix[i].PageCount;
443    }
444    /* The following should be fixed in the current version of the
445     * kernel and of the bootloader.
446     */
447#if 0
448    /* PPCBug has this zero */
449    res->VitalProductData.CacheLineSize = 0;
450    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
451    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
452        res->VitalProductData.TimeBaseDivisor = 4000;
453    }
454
455    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
456     * sets "PCIBridgeDirect". This bootloader and linux works better if
457     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
458     */
459    hostbridge=residual_find_device(PCIDEVICE, NULL,
460                                        BridgeController,
461                                        PCIBridge, -1, 0);
462    if (hostbridge) {
463        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
464        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
465    }
466#endif
467}
468
469/* This routine is the first C code called with very little stack space!
470 * Its goal is to find where the boot image can be moved. This will
471 * be the highest address with enough room.
472 */
473int early_setup(u_long image_size) {
474        register RESIDUAL *res = bd->residual;
475        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
476
477        if ( residual_fw_is_qemu( res ) ) {
478                /* save command-line - QEMU firmware sets R6/R7 to
479                 * commandline start/end (NON-PReP STD)
480                 */
481                int len = bd->r7 - bd->r6;
482                if ( len > 0 ) {
483                        if ( len > sizeof(bd->cmd_line) - 1 )
484                                len = sizeof(bd->cmd_line) - 1;
485                        codemove(bd->cmd_line, bd->r6, len, bd->cache_lsize);
486                        bd->cmd_line[len] = 0;
487                }
488        }
489
490        /* Fix residual if we are loaded by Motorola NT firmware */
491        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
492            fix_residual( res );
493
494        /* FIXME: if OF we should do something different */
495        if( !bd->of_entry && res &&
496           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
497                u_long lowpage=ULONG_MAX, highpage;
498                u_long imghigh=0, stkhigh=0;
499                /* Find the highest and large enough contiguous zone
500                   consisting of free and BootImage sections. */
501                /* Find 3 free areas of memory, one for the main image, one
502                 * for the stack (STACK_PAGES), and page one to put the map
503                 * structures. They are allocated from the top of memory.
504                 * In most cases the stack will be put just below the image.
505                 */
506                while((highpage =
507                       find_next_zone(res, lowpage, BootImage|Free))) {
508                        lowpage=find_zone_start(res, highpage, BootImage|Free);
509                        if ((highpage-lowpage)>minpages &&
510                            highpage>imghigh) {
511                                imghigh=highpage;
512                                highpage -=minpages;
513                        }
514                        if ((highpage-lowpage)>STACK_PAGES &&
515                            highpage>stkhigh) {
516                                stkhigh=highpage;
517                                highpage-=STACK_PAGES;
518                        }
519                }
520
521                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
522                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
523
524                /* The code mover is put at the lowest possible place
525                 * of free memory. If this corresponds to the loaded boot
526                 * partition image it does not matter because it overrides
527                 * the unused part of it (x86 code).
528                 */
529                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
530
531                /* Let us flush the caches in all cases. After all it should
532                 * not harm even on 601 and we don't care about performance.
533                 * Right now it's easy since all processors have a line size
534                 * of 32 bytes. Once again residual data has proved unreliable.
535                 */
536                bd->cache_lsize = 32;
537        }
538        /* For now we always assume that it's succesful, we should
539         * handle better the case of insufficient memory.
540         */
541        return 0;
542}
543
544void * valloc(u_long size) {
545        map *p, *q;
546        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
547
548        if (size==0) return NULL;
549        size=PAGE_ALIGN(size)-1;
550        for (p=mm->virtavail; p; p=p->next) {
551                if (p->base+size <= p->end) break;
552        }
553        if(!p) return NULL;
554        q=alloc_map();
555        q->base=p->base;
556        q->end=q->base+size;
557        q->firstpte=MAP_USED_VIRT;
558        insert_map(&mm->virtused, q);
559        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
560        else p->base += size+1;
561        return (void *)q->base;
562}
563
564static
565void vflush(map *virtmap) {
566        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
567        u_long i, limit=(mm->hashmask>>3)+8;
568        hash_entry volatile *p=(hash_entry *) mm->sdr1;
569
570        /* PTE handling is simple since the processor never update
571         * the entries. Writable pages always have the C bit set and
572         * all valid entries have the R bit set. From the processor
573         * point of view the hash table is read only.
574         */
575        for (i=0; i<limit; i++) {
576                if (p[i].key<0) {
577                        u_long va;
578                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
579                        if (p[i].key&0x40) va^=0x3ff000;
580                        va |= ((p[i].key<<21)&0xf0000000)
581                          | ((p[i].key<<22)&0x0fc00000);
582                        if (va>=virtmap->base && va<=virtmap->end) {
583                                p[i].key=0;
584                                asm volatile("sync; tlbie %0; sync" : :
585                                             "r" (va));
586                        }
587                }
588        }
589}
590
591void vfree(void *vaddr) {
592        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
593        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
594
595        /* Flush memory queues */
596        asm volatile("sync": : : "memory");
597
598        virtmap = remove_map_at(&mm->virtused, vaddr);
599        if (!virtmap) return;
600
601        /* Remove mappings corresponding to virtmap */
602        for (physmap=mm->mappings; physmap; ) {
603                map *nextmap=physmap->next;
604                if (physmap->base>=virtmap->base
605                    && physmap->base<virtmap->end) {
606                        free_map(remove_map(&mm->mappings, physmap));
607                }
608                physmap=nextmap;
609        }
610
611        vflush(virtmap);
612
613        virtmap->firstpte= MAP_FREE_VIRT;
614        insert_map(&mm->virtavail, virtmap);
615        coalesce_maps(mm->virtavail);
616}
617
618void vunmap(void *vaddr) {
619        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
620        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
621
622        /* Flush memory queues */
623        asm volatile("sync": : : "memory");
624
625        /* vaddr must be within one of the vm areas in use and
626         * then must correspond to one of the physical areas
627         */
628        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
629                if (virtmap->base<=(u_long)vaddr &&
630                    virtmap->end>=(u_long)vaddr) break;
631        }
632        if (!virtmap) return;
633
634        physmap = remove_map_at(&mm->mappings, vaddr);
635        if(!physmap) return;
636        vflush(physmap);
637        free_map(physmap);
638}
639
640int vmap(void *vaddr, u_long p, u_long size) {
641        map *q;
642        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
643
644        size=PAGE_ALIGN(size);
645        if(!size) return 1;
646        /* Check that the requested area fits in one vm image */
647        for (q=mm->virtused; q; q=q->next) {
648                if ((q->base <= (u_long)vaddr) &&
649                    (q->end>=(u_long)vaddr+size -1)) break;
650        }
651        if (!q) return 1;
652        q= alloc_map();
653        if (!q) return 1;
654        q->base = (u_long)vaddr;
655        q->end = (u_long)vaddr+size-1;
656        q->firstpte = p;
657        return insert_map(&mm->mappings, q);
658}
659
660static
661void create_identity_mappings(int type, int attr) {
662        u_long lowpage=ULONG_MAX, highpage;
663        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
664        RESIDUAL * res=bd->residual;
665
666        while((highpage = find_next_zone(res, lowpage, type))) {
667                map *p;
668                lowpage=find_zone_start(res, highpage, type);
669                p=alloc_map();
670                /* Do not map page 0 to catch null pointers */
671                lowpage = lowpage ? lowpage : 1;
672                p->base=lowpage<<PAGE_SHIFT;
673                p->end=(highpage<<PAGE_SHIFT)-1;
674                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
675                insert_map(&mm->mappings, p);
676        }
677}
678
679static inline
680void add_free_map(u_long base, u_long end) {
681        map *q=NULL;
682        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
683
684        if (base<end) q=alloc_map();
685        if (!q) return;
686        q->base=base;
687        q->end=end-1;
688        q->firstpte=MAP_FREE_VIRT;
689        insert_map(&mm->virtavail, q);
690}
691
692static inline
693void create_free_vm(void) {
694        map *p;
695        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
696
697        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
698        for(p=mm->mappings; p; p=p->next) {
699                add_free_map(vaddr, p->base);
700                vaddr=p->end+1;
701        }
702        /* Special end of memory case */
703        if (vaddr) add_free_map(vaddr,0);
704}
705
706/* Memory management initialization.
707 * Set up the mapping lists.
708 */
709
710static inline
711void add_perm_map(u_long start, u_long size) {
712        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
713        map *p=alloc_map();
714        p->base = start;
715        p->end = start + size - 1;
716        p->firstpte = MAP_PERM_PHYS;
717        insert_map(& mm->physperm , p);
718}
719
720void mm_init(u_long image_size)
721{
722        u_long lowpage=ULONG_MAX, highpage;
723        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
724        RESIDUAL * res=bd->residual;
725        int i;
726        map *p;
727
728        /* The checks are simplified by the fact that the image
729         * and stack area are always allocated at the upper end
730         * of a free block.
731         */
732        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
733                lowpage=find_zone_start(res, highpage, BootImage|Free);
734                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
735                     == highpage) {
736                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
737                        add_perm_map((u_long)bd->image, image_size);
738                }
739                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
740                        highpage -= STACK_PAGES;
741                        add_perm_map(highpage<<PAGE_SHIFT,
742                                     STACK_PAGES*PAGE_SIZE);
743                }
744                /* Protect the interrupt handlers that we need ! */
745                if (lowpage<2) lowpage=2;
746                /* Check for the special case of full area! */
747                if (highpage>lowpage) {
748                        p = alloc_map();
749                        p->base = lowpage<<PAGE_SHIFT;
750                        p->end = (highpage<<PAGE_SHIFT)-1;
751                        p->firstpte=MAP_FREE_PHYS;
752                        insert_map(&mm->physavail, p);
753                }
754        }
755
756        /* Allocate the hash table */
757        mm->sdr1=__palloc(0x10000, PA_PERM|16);
758        _write_SDR1((u_long)mm->sdr1);
759        memset(mm->sdr1, 0, 0x10000);
760        mm->hashmask = 0xffc0;
761
762        /* Setup the segment registers as we want them */
763        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
764        /* Create the maps for the physical memory, firwmarecode does not
765         * seem to be necessary. ROM is mapped read-only to reduce the risk
766         * of reprogramming it because it's often Flash and some are
767         * amazingly easy to overwrite.
768         */
769        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
770                                 FirmwareStack, PTE_RAM);
771        create_identity_mappings(SystemROM, PTE_ROM);
772        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
773                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
774
775        create_free_vm();
776
777        /* Install our own MMU and trap handlers. */
778        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
779        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
780        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
781        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
782}
783
784void * salloc(u_long size) {
785        map *p, *q;
786        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
787
788        if (size==0) return NULL;
789
790        size = (size+7)&~7;
791
792        for (p=mm->sallocfree; p; p=p->next) {
793                if (p->base+size <= p->end) break;
794        }
795        if(!p) {
796                void *m;
797                m = __palloc(size, PA_SUBALLOC);
798                p = alloc_map();
799                if (!m && !p) return NULL;
800                p->base = (u_long) m;
801                p->firstpte = MAP_FREE_SUBS;
802                p->end = (u_long)m+PAGE_ALIGN(size)-1;
803                insert_map(&mm->sallocfree, p);
804                coalesce_maps(mm->sallocfree);
805                coalesce_maps(mm->sallocphys);
806        };
807        q=alloc_map();
808        q->base=p->base;
809        q->end=q->base+size-1;
810        q->firstpte=MAP_USED_SUBS;
811        insert_map(&mm->sallocused, q);
812        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
813        else p->base += size;
814        memset((void *)q->base, 0, size);
815        return (void *)q->base;
816}
817
818void sfree(void *p) {
819        map *q;
820        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
821
822        q=remove_map_at(&mm->sallocused, p);
823        if (!q) return;
824        q->firstpte=MAP_FREE_SUBS;
825        insert_map(&mm->sallocfree, q);
826        coalesce_maps(mm->sallocfree);
827}
828
829/* first/last area fit, flags is a power of 2 indicating the required
830 * alignment. The algorithms are stupid because we expect very little
831 * fragmentation of the areas, if any. The unit of allocation is the page.
832 * The allocation is by default performed from higher addresses down,
833 * unless flags&PA_LOW is true.
834 */
835
836void * __palloc(u_long size, int flags)
837{
838        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
839        map *newmap, *frommap, *p, *splitmap=0;
840        map **queue;
841        u_long qflags;
842        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
843
844        /* Asking for a size which is not a multiple of the alignment
845           is likely to be an error. */
846
847        if (size & mask) return NULL;
848        size = PAGE_ALIGN(size);
849        if(!size) return NULL;
850
851        if (flags&PA_SUBALLOC) {
852                queue = &mm->sallocphys;
853                qflags = MAP_SUBS_PHYS;
854        } else if (flags&PA_PERM) {
855                queue = &mm->physperm;
856                qflags = MAP_PERM_PHYS;
857        } else {
858                queue = &mm->physused;
859                qflags = MAP_USED_PHYS;
860        }
861        /* We need to allocate that one now so no two allocations may attempt
862         * to take the same memory simultaneously. Alloc_map_page does
863         * not call back here to avoid infinite recursion in alloc_map.
864         */
865
866        if (mask&PAGE_MASK) {
867                splitmap=alloc_map();
868                if (!splitmap) return NULL;
869        }
870
871        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
872                u_long high = p->end;
873                u_long limit  = ((p->base+mask)&~mask) + size-1;
874                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
875                        frommap = p;
876                        if (flags&PA_LOW) break;
877                }
878        }
879
880        if (!frommap) {
881                if (splitmap) free_map(splitmap);
882                return NULL;
883        }
884
885        newmap=alloc_map();
886
887        if (flags&PA_LOW) {
888                newmap->base = (frommap->base+mask)&~mask;
889        } else {
890                newmap->base = (frommap->end +1 - size) & ~mask;
891        }
892
893        newmap->end = newmap->base+size-1;
894        newmap->firstpte = qflags;
895
896        /* Add a fragment if we don't allocate until the end. */
897
898        if (splitmap) {
899                splitmap->base=newmap->base+size;
900                splitmap->end=frommap->end;
901                splitmap->firstpte= MAP_FREE_PHYS;
902                frommap->end=newmap->base-1;
903        } else if (flags & PA_LOW) {
904                frommap->base=newmap->base+size;
905        } else {
906                frommap->end=newmap->base-1;
907        }
908
909        /* Remove a fragment if it becomes empty. */
910        if (frommap->base == frommap->end+1) {
911                free_map(remove_map(&mm->physavail, frommap));
912        }
913
914        if (splitmap) {
915                if (splitmap->base == splitmap->end+1) {
916                        free_map(remove_map(&mm->physavail, splitmap));
917                } else {
918                        insert_map(&mm->physavail, splitmap);
919                }
920        }
921
922        insert_map(queue, newmap);
923        return (void *) newmap->base;
924
925}
926
927void pfree(void * p) {
928        map *q;
929        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
930        q=remove_map_at(&mm->physused, p);
931        if (!q) return;
932        q->firstpte=MAP_FREE_PHYS;
933        insert_map(&mm->physavail, q);
934        coalesce_maps(mm->physavail);
935}
936
937#ifdef DEBUG
938/* Debugging functions */
939void print_maps(map *chain, const char *s) {
940        map *p;
941        printk("%s",s);
942        for(p=chain; p; p=p->next) {
943                printk("    %08lx-%08lx: %08lx\n",
944                       p->base, p->end, p->firstpte);
945        }
946}
947
948void print_all_maps(const char * s) {
949        u_long freemaps;
950        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
951        map *free;
952        printk("%s",s);
953        print_maps(mm->mappings, "  Currently defined mappings:\n");
954        print_maps(mm->physavail, "  Currently available physical areas:\n");
955        print_maps(mm->physused, "  Currently used physical areas:\n");
956        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
957        print_maps(mm->virtused, "  Currently used virtual areas:\n");
958        print_maps(mm->physperm, "  Permanently used physical areas:\n");
959        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
960        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
961        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
962        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
963        printk("  %ld free maps.\n", freemaps);
964}
965
966void print_hash_table(void) {
967        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
968        hash_entry *p=(hash_entry *) mm->sdr1;
969        u_int i, valid=0;
970        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
971                if (p[i].key<0) valid++;
972        }
973        printk("%u valid hash entries on pass 1.\n", valid);
974        valid = 0;
975        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
976                if (p[i].key<0) valid++;
977        }
978        printk("%u valid hash entries on pass 2.\n"
979               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
980        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
981                if (p[i].key<0) {
982                        u_int pteg=(i>>3);
983                        u_long vpn;
984                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
985                        if (p[i].key&0x40) vpn^=0x3ff;
986                        vpn |= ((p[i].key<<9)&0xffff0000)
987                          | ((p[i].key<<10)&0xfc00);
988                        printk("%08lx:%08lx, %s, %5d.%d\n",
989                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
990                               pteg, i%8);
991                }
992        }
993}
994
995#endif
Note: See TracBrowser for help on using the repository browser.