source: rtems/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c @ 116633f

Last change on this file since 116633f was 116633f, checked in by Joel Sherrill <joel.sherrill@…>, on 09/04/03 at 18:45:20

2003-09-04 Joel Sherrill <joel@…>

  • bootloader/bootldr.h, bootloader/em86.c, bootloader/em86real.S, bootloader/exception.S, bootloader/head.S, bootloader/lib.c, bootloader/misc.c, bootloader/mm.c, bootloader/pci.c, clock/p_clock.c, console/console.c, console/consoleIo.h, console/inch.c, console/keyboard.h, console/polled_io.c, include/bsp.h, irq/i8259.c, irq/irq.c, irq/irq.h, irq/irq_asm.S, irq/irq_init.c, motorola/motorola.c, motorola/motorola.h, openpic/openpic.c, openpic/openpic.h, pci/pci.c, residual/residual.c, start/start.S, startup/bspstart.c, vectors/vectors.h, vectors/vectors_init.c: URL for license changed.
  • Property mode set to 100644
File size: 27.3 KB
Line 
1/*
2 *  mm.c -- Crude memory management for early boot.
3 *
4 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
5 *
6 *  Modified to compile in RTEMS development environment
7 *  by Eric Valette
8 *
9 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
10 *
11 *  The license and distribution terms for this file may be
12 *  found in found in the file LICENSE in this distribution or at
13 *  http://www.rtems.com/license/LICENSE.
14 *
15 * $Id$
16 */
17
18/* This code is a crude memory manager for early boot for LinuxPPC.
19 * As such, it does not try to perform many optimiztions depending
20 * on the processor, it only uses features which are common to
21 * all processors (no BATs...).
22 *
23 * On PreP platorms (the only ones on which it works for now),
24 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
25 * residual data. The holes between these areas can be virtually
26 * remapped to any of these, since for some functions it is very handy
27 * to have virtually contiguous but physically discontiguous memory.
28 *
29 * Physical memory allocation is also very crude, since it's only
30 * designed to manage a small number of large chunks. For valloc/vfree
31 * and palloc/pfree, the unit of allocation is the 4kB page.
32 *
33 * The salloc/sfree has been added after tracing gunzip and seeing
34 * how it performed a very large number of small allocations.
35 * For these the unit of allocation is 8 bytes (the s stands for
36 * small or subpage). This memory is cleared when allocated.
37 *
38 */
39
40#include <rtems/bspIo.h>
41
42#include <sys/types.h>
43#include <libcpu/spr.h>
44#include "bootldr.h"
45#include <libcpu/mmu.h>
46#include <libcpu/page.h>
47#include <limits.h>
48
49/* We use our own kind of simple memory areas for the loader, but
50 * we want to avoid potential clashes with kernel includes.
51 * Here a map maps contiguous areas from base to end,
52 * the firstpte entry corresponds to physical address and has the low
53 * order bits set for caching and permission.
54 */
55
56typedef struct _map {
57        struct _map *next;
58        u_long base;
59        u_long end;
60        u_long firstpte;
61} map;
62
63/* The LSB of the firstpte entries on map lists other than mappings
64 * are constants which can be checked for debugging. All these constants
65 * have bit of weight 4 set, this bit is zero in the mappings list entries.
66 * Actually firstpte&7 value is:
67 * - 0 or 1 should not happen
68 * - 2 for RW actual virtual->physical mappings
69 * - 3 for RO actual virtual->physical mappings
70 * - 6 for free areas to be suballocated by salloc
71 * - 7 for salloc'ated areas
72 * - 4 or 5 for all others, in this case firtpte & 63 is
73 *   - 4 for unused maps (on the free list)
74 *   - 12 for free physical memory
75 *   - 13 for physical memory in use
76 *   - 20 for free virtual address space
77 *   - 21 for allocated virtual address space
78 *   - 28 for physical memory space suballocated by salloc
79 *   - 29 for physical memory that can't be freed
80 */
81
82#define MAP_FREE_SUBS 6
83#define MAP_USED_SUBS 7
84
85#define MAP_FREE 4     
86#define MAP_FREE_PHYS 12
87#define MAP_USED_PHYS 13
88#define MAP_FREE_VIRT 20
89#define MAP_USED_VIRT 21
90#define MAP_SUBS_PHYS 28
91#define MAP_PERM_PHYS 29
92
93SPR_RW(SDR1);
94SPR_RO(DSISR);
95SPR_RO(DAR);
96
97/* We need a few statically allocated free maps to bootstrap the
98 * memory managment */
99static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
100                           {free_maps+2, 0, 0, MAP_FREE},
101                           {free_maps+3, 0, 0, MAP_FREE},
102                           {NULL, 0, 0, MAP_FREE}};
103struct _mm_private {
104        void *sdr1;
105        u_long hashmask;
106        map *freemaps;     /* Pool of unused map structs */
107        map *mappings;     /* Sorted list of virtual->physical mappings */
108        map *physavail;    /* Unallocated physical address space */
109        map *physused;     /* Allocated physical address space */
110        map *physperm;     /* Permanently allocated physical space */
111        map *virtavail;    /* Unallocated virtual address space */
112        map *virtused;     /* Allocated virtual address space */
113        map *sallocfree;   /* Free maps for salloc */
114        map *sallocused;   /* Used maps for salloc */
115        map *sallocphys;   /* Physical areas used by salloc */
116        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
117} mm_private = {hashmask: 0xffc0,
118                freemaps: free_maps+0};
119
120/* A simplified hash table entry declaration */
121typedef struct _hash_entry {
122        int key;
123        u_long rpn;
124} hash_entry;
125
126void print_maps(map *, const char *);
127
128/* The handler used for all exceptions although for now it is only
129 * designed to properly handle MMU interrupts to fill the hash table.
130 */
131
132
133void _handler(int vec, ctxt *p) {
134        map *area;
135        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
136        u_long vaddr, cause;
137        if (vec==4 || vec==7) { /* ISI exceptions are different */
138                vaddr = p->nip;
139                cause = p->msr;
140        } else { /* Valid for DSI and alignment exceptions */
141                vaddr = _read_DAR();
142                cause = _read_DSISR();
143        }
144
145        if (vec==3 || vec==4) {
146                /* Panic if the fault is not PTE not found. */
147                if (!(cause & 0x40000000)) {
148                        MMUon();
149                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
150                        hang("Memory protection violation at ", vaddr, p);
151                }
152               
153                for(area=mm->mappings; area; area=area->next) {
154                        if(area->base<=vaddr && vaddr<=area->end) break;
155                }
156
157                if (area) {
158                        u_long hash, vsid, rpn;
159                        hash_entry volatile *hte, *_hte1;
160                        u_int i, alt=0, flushva;
161               
162                        vsid = _read_SR((void *)vaddr);
163                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
164                        hash = vsid<<6;
165                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
166                        hash &= mm->hashmask;
167                        /* Find an empty entry in the PTEG, else
168                         * replace a random one.
169                         */
170                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
171                        for (i=0; i<8; i++) {
172                                if (hte[i].key>=0) goto found;
173                        }
174                        hash ^= mm->hashmask;
175                        alt = 0x40; _hte1 = hte;
176                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
177                       
178                        for (i=0; i<8; i++) {
179                                if (hte[i].key>=0) goto found;
180                        }
181                        alt = 0;
182                        hte = _hte1;
183                        /* Chose a victim entry and replace it. There might be
184                         * better policies to choose the victim, but in a boot
185                         * loader we want simplicity as long as it works.
186                         *
187                         * We would not need to invalidate the TLB entry since
188                         * the mapping is still valid. But this would be a mess
189                         * when unmapping so we make sure that the TLB is a
190                         * subset of the hash table under all circumstances.
191                         */
192                        i = mm->hashcnt;
193                        mm->hashcnt = (mm->hashcnt+1)%8;
194                        /* Note that the hash is already complemented here ! */
195                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
196                        if (hte[i].key&0x40) flushva^=0x3ff000;
197                        flushva |= ((hte[i].key<<21)&0xf0000000)
198                          | ((hte[i].key<<22)&0x0fc00000);
199                        hte[i].key=0;
200                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
201                found:
202                        hte[i].rpn = rpn;
203                        asm volatile("eieio": : );
204                        hte[i].key = 0x80000000|(vsid<<7)|alt|
205                          ((vaddr>>22)&0x3f);
206                        return;
207                } else {
208                        MMUon();
209                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
210                        hang("\nInvalid memory access attempt at ", vaddr, p);
211                }
212        } else {
213          MMUon();
214          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
215                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
216          if (vec == 7) {
217            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
218            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
219              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
220          }
221          hang("Program or alignment exception at ", vaddr, p);
222        }
223}
224
225/* Generic routines for map handling.
226 */
227
228static inline
229void free_map(map *p) {
230        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
231        if (!p) return;
232        p->next=mm->freemaps;
233        mm->freemaps=p;
234        p->firstpte=MAP_FREE;
235}
236
237/* Sorted insertion in linked list */
238static
239int insert_map(map **head, map *p) {
240        map *q = *head;
241        if (!p) return 0;
242        if (q && (q->base < p->base)) {
243                for(;q->next && q->next->base<p->base; q = q->next);
244                if ((q->end >= p->base) ||
245                    (q->next && p->end>=q->next->base)) {
246                        free_map(p);
247                        printk("Overlapping areas!\n");
248                        return 1;
249                }
250                p->next = q->next;
251                q->next = p;
252        } else { /* Insert at head */
253                if (q && (p->end >= q->base)) {
254                        free_map(p);
255                        printk("Overlapping areas!\n");
256                        return 1;
257                }
258                p->next = q;
259                *head = p;
260        }
261        return 0;
262}
263
264
265/* Removal from linked list */
266
267static
268map *remove_map(map **head, map *p) {
269        map *q = *head;
270
271        if (!p || !q) return NULL;
272        if (q==p) {
273                *head = q->next;
274                return p;
275        }
276        for(;q && q->next!=p; q=q->next);
277        if (q) {
278                q->next=p->next;
279                return p;
280        } else {
281                return NULL;
282        }
283}
284
285static
286map *remove_map_at(map **head, void * vaddr) {
287        map *p, *q = *head;
288
289        if (!vaddr || !q) return NULL;
290        if (q->base==(u_long)vaddr) {
291                *head = q->next;
292                return q;
293        }
294        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
295        p=q->next;
296        if (p) q->next=p->next;
297        return p;
298}
299
300static inline
301map * alloc_map_page(void) {
302        map *from, *p;
303        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
304
305        /* printk("Allocating new map page !"); */
306        /* Get the highest page */
307        for (from=mm->physavail; from && from->next; from=from->next);
308        if (!from) return NULL;
309
310        from->end -= PAGE_SIZE;
311       
312        mm->freemaps = (map *) (from->end+1);
313       
314        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
315                p->next = p+1;
316                p->firstpte = MAP_FREE;
317        } 
318        (p-1)->next=0;
319
320        /* Take the last one as pointer to self and insert
321         * the map into the permanent map list.
322         */
323
324        p->firstpte = MAP_PERM_PHYS;
325        p->base=(u_long) mm->freemaps;
326        p->end = p->base+PAGE_SIZE-1;
327       
328        insert_map(&mm->physperm, p);
329       
330        if (from->end+1 == from->base)
331                free_map(remove_map(&mm->physavail, from));
332       
333        return mm->freemaps;
334}
335
336static
337map * alloc_map(void) {
338        map *p;
339        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
340
341        p = mm->freemaps;
342        if (!p) {
343                p=alloc_map_page();
344        }
345
346        if(p) mm->freemaps=p->next;
347
348        return p;
349}
350
351static
352void coalesce_maps(map *p) {
353        while(p) {
354                if (p->next && (p->end+1 == p->next->base)) {
355                        map *q=p->next;
356                        p->end=q->end;
357                        p->next=q->next;
358                        free_map(q);
359                } else {
360                        p = p->next;
361                }
362        }
363}
364
365/* These routines are used to find the free memory zones to avoid
366 * overlapping destructive copies when initializing.
367 * They work from the top because of the way we want to boot.
368 * In the following the term zone refers to the memory described
369 * by one or several contiguous so called segments in the
370 * residual data.
371 */
372#define STACK_PAGES 2
373static inline u_long
374find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
375        u_long i, newmin=0, size=0;
376        for(i=0; i<res->ActualNumMemSegs; i++) {
377                if (res->Segs[i].Usage & flags
378                    && res->Segs[i].BasePage<lowpage
379                    && res->Segs[i].BasePage>newmin) {
380                        newmin=res->Segs[i].BasePage;
381                        size=res->Segs[i].PageCount;
382                }
383        }
384        return newmin+size;
385}
386
387static inline u_long
388find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
389        u_long i;
390        int progress;
391        do {
392                progress=0;
393                for (i=0; i<res->ActualNumMemSegs; i++) {
394                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
395                              == highpage)
396                             && res->Segs[i].Usage & flags) {
397                                highpage=res->Segs[i].BasePage;
398                                progress=1;
399                        }
400                }
401        } while(progress);
402        return highpage;
403}
404
405/* The Motorola NT firmware does not provide any setting in the residual
406 * data about memory segment usage. The following table provides enough
407 * info so that this bootloader can work.
408 */
409MEM_MAP seg_fix[] = {
410    { 0x2000, 0xFFF00, 0x00100 },
411    { 0x0020, 0x02000, 0x7E000 },
412    { 0x0008, 0x00800, 0x00168 },
413    { 0x0004, 0x00000, 0x00005 },
414    { 0x0001, 0x006F1, 0x0010F },
415    { 0x0002, 0x006AD, 0x00044 },
416    { 0x0010, 0x00005, 0x006A8 },
417    { 0x0010, 0x00968, 0x00698 },
418    { 0x0800, 0xC0000, 0x3F000 },
419    { 0x0600, 0xBF800, 0x00800 },
420    { 0x0500, 0x81000, 0x3E800 },
421    { 0x0480, 0x80800, 0x00800 },
422    { 0x0440, 0x80000, 0x00800 } };
423
424
425/* The Motorola NT firmware does not set up all required info in the residual
426 * data. This routine changes some things in a way that the bootloader and
427 * linux are happy.
428 */
429void
430fix_residual( RESIDUAL *res )
431{
432#if 0
433    PPC_DEVICE *hostbridge;
434#endif
435    int i;
436
437    /* Missing memory segment information */
438    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
439    for (i=0; i<res->ActualNumMemSegs; i++) {
440        res->Segs[i].Usage = seg_fix[i].Usage;
441        res->Segs[i].BasePage = seg_fix[i].BasePage;
442        res->Segs[i].PageCount = seg_fix[i].PageCount;
443    }
444    /* The following should be fixed in the current version of the
445     * kernel and of the bootloader.
446     */
447#if 0
448    /* PPCBug has this zero */
449    res->VitalProductData.CacheLineSize = 0;
450    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
451    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
452        res->VitalProductData.TimeBaseDivisor = 4000;
453    }
454
455    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
456     * sets "PCIBridgeDirect". This bootloader and linux works better if
457     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
458     */
459    hostbridge=residual_find_device(PCIDEVICE, NULL,
460                                        BridgeController,
461                                        PCIBridge, -1, 0);
462    if (hostbridge) {
463        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
464        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
465    }
466#endif
467}
468
469/* This routine is the first C code called with very little stack space!
470 * Its goal is to find where the boot image can be moved. This will
471 * be the highest address with enough room.
472 */
473int early_setup(u_long image_size) {
474        register RESIDUAL *res = bd->residual;
475        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
476
477        /* Fix residual if we are loaded by Motorola NT firmware */
478        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
479            fix_residual( res );
480
481        /* FIXME: if OF we should do something different */
482        if( !bd->of_entry && res &&
483           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
484                u_long lowpage=ULONG_MAX, highpage;
485                u_long imghigh=0, stkhigh=0;
486                /* Find the highest and large enough contiguous zone
487                   consisting of free and BootImage sections. */
488                /* Find 3 free areas of memory, one for the main image, one
489                 * for the stack (STACK_PAGES), and page one to put the map 
490                 * structures. They are allocated from the top of memory.
491                 * In most cases the stack will be put just below the image.
492                 */
493                while((highpage =
494                       find_next_zone(res, lowpage, BootImage|Free))) {
495                        lowpage=find_zone_start(res, highpage, BootImage|Free);
496                        if ((highpage-lowpage)>minpages &&
497                            highpage>imghigh) {
498                                imghigh=highpage;
499                                highpage -=minpages;
500                        }
501                        if ((highpage-lowpage)>STACK_PAGES &&
502                            highpage>stkhigh) {
503                                stkhigh=highpage;
504                                highpage-=STACK_PAGES;
505                        }
506                }
507
508                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
509                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
510
511                /* The code mover is put at the lowest possible place
512                 * of free memory. If this corresponds to the loaded boot
513                 * partition image it does not matter because it overrides
514                 * the unused part of it (x86 code).
515                 */
516                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
517
518                /* Let us flush the caches in all cases. After all it should
519                 * not harm even on 601 and we don't care about performance.
520                 * Right now it's easy since all processors have a line size
521                 * of 32 bytes. Once again residual data has proved unreliable.
522                 */
523                bd->cache_lsize = 32;
524        }
525        /* For now we always assume that it's succesful, we should
526         * handle better the case of insufficient memory.
527         */
528        return 0;
529}
530
531void * valloc(u_long size) {
532        map *p, *q;
533        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
534
535        if (size==0) return NULL;
536        size=PAGE_ALIGN(size)-1;
537        for (p=mm->virtavail; p; p=p->next) {
538                if (p->base+size <= p->end) break;
539        }
540        if(!p) return NULL;
541        q=alloc_map();
542        q->base=p->base;
543        q->end=q->base+size;
544        q->firstpte=MAP_USED_VIRT;
545        insert_map(&mm->virtused, q);
546        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
547        else p->base += size+1;
548        return (void *)q->base;
549}
550
551static
552void vflush(map *virtmap) {
553        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
554        u_long i, limit=(mm->hashmask>>3)+8;
555        hash_entry volatile *p=(hash_entry *) mm->sdr1;
556
557        /* PTE handling is simple since the processor never update
558         * the entries. Writable pages always have the C bit set and
559         * all valid entries have the R bit set. From the processor
560         * point of view the hash table is read only.
561         */
562        for (i=0; i<limit; i++) {
563                if (p[i].key<0) {
564                        u_long va;
565                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
566                        if (p[i].key&0x40) va^=0x3ff000;
567                        va |= ((p[i].key<<21)&0xf0000000)
568                          | ((p[i].key<<22)&0x0fc00000);
569                        if (va>=virtmap->base && va<=virtmap->end) {
570                                p[i].key=0;
571                                asm volatile("sync; tlbie %0; sync" : :
572                                             "r" (va));
573                        }
574                }
575        }
576}
577
578void vfree(void *vaddr) {
579        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
580        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
581       
582        /* Flush memory queues */
583        asm volatile("sync": : : "memory");
584
585        virtmap = remove_map_at(&mm->virtused, vaddr);
586        if (!virtmap) return;
587
588        /* Remove mappings corresponding to virtmap */
589        for (physmap=mm->mappings; physmap; ) {
590                map *nextmap=physmap->next;
591                if (physmap->base>=virtmap->base
592                    && physmap->base<virtmap->end) {
593                        free_map(remove_map(&mm->mappings, physmap));
594                }
595                physmap=nextmap;
596        }
597
598        vflush(virtmap);
599
600        virtmap->firstpte= MAP_FREE_VIRT;
601        insert_map(&mm->virtavail, virtmap);
602        coalesce_maps(mm->virtavail);
603}
604
605void vunmap(void *vaddr) {
606        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
607        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
608       
609        /* Flush memory queues */
610        asm volatile("sync": : : "memory");
611
612        /* vaddr must be within one of the vm areas in use and
613         * then must correspond to one of the physical areas
614         */
615        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
616                if (virtmap->base<=(u_long)vaddr &&
617                    virtmap->end>=(u_long)vaddr) break;
618        }
619        if (!virtmap) return;
620
621        physmap = remove_map_at(&mm->mappings, vaddr);
622        if(!physmap) return;
623        vflush(physmap);
624        free_map(physmap);
625}
626
627int vmap(void *vaddr, u_long p, u_long size) {
628        map *q;
629        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
630
631        size=PAGE_ALIGN(size);
632        if(!size) return 1;
633        /* Check that the requested area fits in one vm image */
634        for (q=mm->virtused; q; q=q->next) {
635                if ((q->base <= (u_long)vaddr) &&
636                    (q->end>=(u_long)vaddr+size -1)) break;
637        }
638        if (!q) return 1;
639        q= alloc_map();
640        if (!q) return 1;
641        q->base = (u_long)vaddr;
642        q->end = (u_long)vaddr+size-1;
643        q->firstpte = p;
644        return insert_map(&mm->mappings, q);
645}
646
647static
648void create_identity_mappings(int type, int attr) {
649        u_long lowpage=ULONG_MAX, highpage;
650        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
651        RESIDUAL * res=bd->residual;
652
653        while((highpage = find_next_zone(res, lowpage, type))) {
654                map *p;
655                lowpage=find_zone_start(res, highpage, type);
656                p=alloc_map();
657                /* Do not map page 0 to catch null pointers */
658                lowpage = lowpage ? lowpage : 1;
659                p->base=lowpage<<PAGE_SHIFT;
660                p->end=(highpage<<PAGE_SHIFT)-1;
661                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
662                insert_map(&mm->mappings, p);
663        }
664}
665
666static inline
667void add_free_map(u_long base, u_long end) {
668        map *q=NULL;
669        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
670
671        if (base<end) q=alloc_map();
672        if (!q) return;
673        q->base=base;
674        q->end=end-1;
675        q->firstpte=MAP_FREE_VIRT;
676        insert_map(&mm->virtavail, q);
677}
678
679static inline
680void create_free_vm(void) {
681        map *p;
682        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
683
684        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
685        for(p=mm->mappings; p; p=p->next) {
686                add_free_map(vaddr, p->base);
687                vaddr=p->end+1;
688        }
689        /* Special end of memory case */
690        if (vaddr) add_free_map(vaddr,0);
691}
692
693/* Memory management initialization.
694 * Set up the mapping lists.
695 */
696
697static inline
698void add_perm_map(u_long start, u_long size) {
699        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
700        map *p=alloc_map();
701        p->base = start;
702        p->end = start + size - 1;
703        p->firstpte = MAP_PERM_PHYS;
704        insert_map(& mm->physperm , p);
705}
706
707void mm_init(u_long image_size)
708{
709        u_long lowpage=ULONG_MAX, highpage;
710        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
711        RESIDUAL * res=bd->residual;
712        extern void (tlb_handlers)(void);
713        extern void (_handler_glue)(void);
714        int i;
715        map *p;
716
717        /* The checks are simplified by the fact that the image
718         * and stack area are always allocated at the upper end
719         * of a free block.
720         */
721        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
722                lowpage=find_zone_start(res, highpage, BootImage|Free);
723                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
724                     == highpage) {
725                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
726                        add_perm_map((u_long)bd->image, image_size);
727                }
728                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
729                        highpage -= STACK_PAGES;
730                        add_perm_map(highpage<<PAGE_SHIFT,
731                                     STACK_PAGES*PAGE_SIZE);
732                }
733                /* Protect the interrupt handlers that we need ! */
734                if (lowpage<2) lowpage=2;
735                /* Check for the special case of full area! */
736                if (highpage>lowpage) {
737                        p = alloc_map();
738                        p->base = lowpage<<PAGE_SHIFT;
739                        p->end = (highpage<<PAGE_SHIFT)-1;
740                        p->firstpte=MAP_FREE_PHYS;
741                        insert_map(&mm->physavail, p);
742                }
743        }
744
745        /* Allocate the hash table */
746        mm->sdr1=__palloc(0x10000, PA_PERM|16);
747        _write_SDR1((u_long)mm->sdr1);
748        memset(mm->sdr1, 0, 0x10000);
749        mm->hashmask = 0xffc0;
750
751        /* Setup the segment registers as we want them */
752        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
753        /* Create the maps for the physical memory, firwmarecode does not
754         * seem to be necessary. ROM is mapped read-only to reduce the risk
755         * of reprogramming it because it's often Flash and some are
756         * amazingly easy to overwrite.
757         */
758        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
759                                 FirmwareStack, PTE_RAM);
760        create_identity_mappings(SystemROM, PTE_ROM);
761        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
762                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
763
764        create_free_vm();
765       
766        /* Install our own MMU and trap handlers. */
767        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
768        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
769        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
770        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
771}
772 
773void * salloc(u_long size) {
774        map *p, *q;
775        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
776
777        if (size==0) return NULL;
778
779        size = (size+7)&~7;
780
781        for (p=mm->sallocfree; p; p=p->next) {
782                if (p->base+size <= p->end) break;
783        }
784        if(!p) {
785                void *m;
786                m = __palloc(size, PA_SUBALLOC);
787                p = alloc_map();
788                if (!m && !p) return NULL;
789                p->base = (u_long) m;
790                p->firstpte = MAP_FREE_SUBS;
791                p->end = (u_long)m+PAGE_ALIGN(size)-1;
792                insert_map(&mm->sallocfree, p);
793                coalesce_maps(mm->sallocfree);
794                coalesce_maps(mm->sallocphys);
795        };
796        q=alloc_map();
797        q->base=p->base;
798        q->end=q->base+size-1;
799        q->firstpte=MAP_USED_SUBS;
800        insert_map(&mm->sallocused, q);
801        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
802        else p->base += size;
803        memset((void *)q->base, 0, size);
804        return (void *)q->base;
805}
806
807void sfree(void *p) {
808        map *q;
809        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
810
811        q=remove_map_at(&mm->sallocused, p);
812        if (!q) return;
813        q->firstpte=MAP_FREE_SUBS;
814        insert_map(&mm->sallocfree, q);
815        coalesce_maps(mm->sallocfree);
816}
817
818/* first/last area fit, flags is a power of 2 indicating the required
819 * alignment. The algorithms are stupid because we expect very little
820 * fragmentation of the areas, if any. The unit of allocation is the page.
821 * The allocation is by default performed from higher addresses down,
822 * unless flags&PA_LOW is true.
823 */
824
825void * __palloc(u_long size, int flags)
826{
827        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
828        map *newmap, *frommap, *p, *splitmap=0;
829        map **queue;
830        u_long qflags;
831        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
832
833        /* Asking for a size which is not a multiple of the alignment
834           is likely to be an error. */
835
836        if (size & mask) return NULL;
837        size = PAGE_ALIGN(size);
838        if(!size) return NULL;
839
840        if (flags&PA_SUBALLOC) {
841                queue = &mm->sallocphys;
842                qflags = MAP_SUBS_PHYS;
843        } else if (flags&PA_PERM) {
844                queue = &mm->physperm;
845                qflags = MAP_PERM_PHYS;
846        } else {
847                queue = &mm->physused;
848                qflags = MAP_USED_PHYS;
849        }
850        /* We need to allocate that one now so no two allocations may attempt
851         * to take the same memory simultaneously. Alloc_map_page does
852         * not call back here to avoid infinite recursion in alloc_map.
853         */
854
855        if (mask&PAGE_MASK) {
856                splitmap=alloc_map();
857                if (!splitmap) return NULL;
858        }
859
860        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
861                u_long high = p->end;
862                u_long limit  = ((p->base+mask)&~mask) + size-1;
863                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
864                        frommap = p;
865                        if (flags&PA_LOW) break;
866                }
867        }
868
869        if (!frommap) {
870                if (splitmap) free_map(splitmap);
871                return NULL; 
872        }
873       
874        newmap=alloc_map();
875       
876        if (flags&PA_LOW) {
877                newmap->base = (frommap->base+mask)&~mask;
878        } else {
879                newmap->base = (frommap->end +1 - size) & ~mask;
880        }
881
882        newmap->end = newmap->base+size-1;
883        newmap->firstpte = qflags;
884
885        /* Add a fragment if we don't allocate until the end. */
886       
887        if (splitmap) {
888                splitmap->base=newmap->base+size;
889                splitmap->end=frommap->end;
890                splitmap->firstpte= MAP_FREE_PHYS;
891                frommap->end=newmap->base-1;
892        } else if (flags & PA_LOW) {
893                frommap->base=newmap->base+size;
894        } else {
895                frommap->end=newmap->base-1;
896        }
897
898        /* Remove a fragment if it becomes empty. */
899        if (frommap->base == frommap->end+1) {
900                free_map(remove_map(&mm->physavail, frommap));
901        }
902
903        if (splitmap) {
904                if (splitmap->base == splitmap->end+1) {
905                        free_map(remove_map(&mm->physavail, splitmap));
906                } else {
907                        insert_map(&mm->physavail, splitmap); 
908                }
909        }
910
911        insert_map(queue, newmap);
912        return (void *) newmap->base;
913               
914}
915
916void pfree(void * p) {
917        map *q;
918        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
919        q=remove_map_at(&mm->physused, p);
920        if (!q) return;
921        q->firstpte=MAP_FREE_PHYS;
922        insert_map(&mm->physavail, q);
923        coalesce_maps(mm->physavail);
924}
925
926#ifdef DEBUG
927/* Debugging functions */
928void print_maps(map *chain, const char *s) {
929        map *p;
930        printk("%s",s);
931        for(p=chain; p; p=p->next) {
932                printk("    %08lx-%08lx: %08lx\n",
933                       p->base, p->end, p->firstpte);
934        }
935}
936
937void print_all_maps(const char * s) {
938        u_long freemaps;
939        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
940        map *free;
941        printk("%s",s);
942        print_maps(mm->mappings, "  Currently defined mappings:\n");
943        print_maps(mm->physavail, "  Currently available physical areas:\n");
944        print_maps(mm->physused, "  Currently used physical areas:\n");
945        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
946        print_maps(mm->virtused, "  Currently used virtual areas:\n");
947        print_maps(mm->physperm, "  Permanently used physical areas:\n");
948        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
949        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
950        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
951        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
952        printk("  %ld free maps.\n", freemaps);
953}
954
955void print_hash_table(void) {
956        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
957        hash_entry *p=(hash_entry *) mm->sdr1;
958        u_int i, valid=0;
959        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
960                if (p[i].key<0) valid++;
961        }
962        printk("%u valid hash entries on pass 1.\n", valid);
963        valid = 0;
964        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
965                if (p[i].key<0) valid++;
966        }
967        printk("%u valid hash entries on pass 2.\n"
968               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
969        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
970                if (p[i].key<0) {
971                        u_int pteg=(i>>3);
972                        u_long vpn;
973                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
974                        if (p[i].key&0x40) vpn^=0x3ff;
975                        vpn |= ((p[i].key<<9)&0xffff0000)
976                          | ((p[i].key<<10)&0xfc00);
977                        printk("%08lx:%08lx, %s, %5d.%d\n",
978                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
979                               pteg, i%8);
980                }
981        }
982}
983
984#endif
Note: See TracBrowser for help on using the repository browser.