source: rtems/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c @ df49c60

4.104.114.84.95
Last change on this file since df49c60 was acc25ee, checked in by Joel Sherrill <joel.sherrill@…>, on 12/02/99 at 14:31:19

Merged of mcp750 and mvme2307 BSP by Eric Valette <valette@…>.
As part of this effort, the mpc750 libcpu code is now shared with the
ppc6xx.

  • Property mode set to 100644
File size: 27.3 KB
Line 
1/*
2 *  mm.c -- Crude memory management for early boot.
3 *
4 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
5 *
6 *  Modified to compile in RTEMS development environment
7 *  by Eric Valette
8 *
9 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
10 *
11 *  The license and distribution terms for this file may be
12 *  found in found in the file LICENSE in this distribution or at
13 *  http://www.OARcorp.com/rtems/license.html.
14 *
15 * $Id$
16 */
17
18/* This code is a crude memory manager for early boot for LinuxPPC.
19 * As such, it does not try to perform many optimiztions depending
20 * on the processor, it only uses features which are common to
21 * all processors (no BATs...).
22 *
23 * On PreP platorms (the only ones on which it works for now),
24 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
25 * residual data. The holes between these areas can be virtually
26 * remapped to any of these, since for some functions it is very handy
27 * to have virtually contiguous but physically discontiguous memory.
28 *
29 * Physical memory allocation is also very crude, since it's only
30 * designed to manage a small number of large chunks. For valloc/vfree
31 * and palloc/pfree, the unit of allocation is the 4kB page.
32 *
33 * The salloc/sfree has been added after tracing gunzip and seeing
34 * how it performed a very large number of small allocations.
35 * For these the unit of allocation is 8 bytes (the s stands for
36 * small or subpage). This memory is cleared when allocated.
37 *
38 */
39
40#include <sys/types.h>
41#include <libcpu/spr.h>
42#include "bootldr.h"
43#include <libcpu/mmu.h>
44#include <libcpu/page.h>
45#include <limits.h>
46
47/* We use our own kind of simple memory areas for the loader, but
48 * we want to avoid potential clashes with kernel includes.
49 * Here a map maps contiguous areas from base to end,
50 * the firstpte entry corresponds to physical address and has the low
51 * order bits set for caching and permission.
52 */
53
54typedef struct _map {
55        struct _map *next;
56        u_long base;
57        u_long end;
58        u_long firstpte;
59} map;
60
61/* The LSB of the firstpte entries on map lists other than mappings
62 * are constants which can be checked for debugging. All these constants
63 * have bit of weight 4 set, this bit is zero in the mappings list entries.
64 * Actually firstpte&7 value is:
65 * - 0 or 1 should not happen
66 * - 2 for RW actual virtual->physical mappings
67 * - 3 for RO actual virtual->physical mappings
68 * - 6 for free areas to be suballocated by salloc
69 * - 7 for salloc'ated areas
70 * - 4 or 5 for all others, in this case firtpte & 63 is
71 *   - 4 for unused maps (on the free list)
72 *   - 12 for free physical memory
73 *   - 13 for physical memory in use
74 *   - 20 for free virtual address space
75 *   - 21 for allocated virtual address space
76 *   - 28 for physical memory space suballocated by salloc
77 *   - 29 for physical memory that can't be freed
78 */
79
80#define MAP_FREE_SUBS 6
81#define MAP_USED_SUBS 7
82
83#define MAP_FREE 4     
84#define MAP_FREE_PHYS 12
85#define MAP_USED_PHYS 13
86#define MAP_FREE_VIRT 20
87#define MAP_USED_VIRT 21
88#define MAP_SUBS_PHYS 28
89#define MAP_PERM_PHYS 29
90
91SPR_RW(SDR1);
92SPR_RO(DSISR);
93SPR_RO(DAR);
94
95/* We need a few statically allocated free maps to bootstrap the
96 * memory managment */
97static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
98                           {free_maps+2, 0, 0, MAP_FREE},
99                           {free_maps+3, 0, 0, MAP_FREE},
100                           {NULL, 0, 0, MAP_FREE}};
101struct _mm_private {
102        void *sdr1;
103        u_long hashmask;
104        map *freemaps;     /* Pool of unused map structs */
105        map *mappings;     /* Sorted list of virtual->physical mappings */
106        map *physavail;    /* Unallocated physical address space */
107        map *physused;     /* Allocated physical address space */
108        map *physperm;     /* Permanently allocated physical space */
109        map *virtavail;    /* Unallocated virtual address space */
110        map *virtused;     /* Allocated virtual address space */
111        map *sallocfree;   /* Free maps for salloc */
112        map *sallocused;   /* Used maps for salloc */
113        map *sallocphys;   /* Physical areas used by salloc */
114        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
115} mm_private = {hashmask: 0xffc0,
116                freemaps: free_maps+0};
117
118/* A simplified hash table entry declaration */
119typedef struct _hash_entry {
120        int key;
121        u_long rpn;
122} hash_entry;
123
124void print_maps(map *, const char *);
125
126/* The handler used for all exceptions although for now it is only
127 * designed to properly handle MMU interrupts to fill the hash table.
128 */
129
130
131void _handler(int vec, ctxt *p) {
132        map *area;
133        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
134        u_long vaddr, cause;
135        if (vec==4 || vec==7) { /* ISI exceptions are different */
136                vaddr = p->nip;
137                cause = p->msr;
138        } else { /* Valid for DSI and alignment exceptions */
139                vaddr = _read_DAR();
140                cause = _read_DSISR();
141        }
142
143        if (vec==3 || vec==4) {
144                /* Panic if the fault is not PTE not found. */
145                if (!(cause & 0x40000000)) {
146                        MMUon();
147                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
148                        hang("Memory protection violation at ", vaddr, p);
149                }
150               
151                for(area=mm->mappings; area; area=area->next) {
152                        if(area->base<=vaddr && vaddr<=area->end) break;
153                }
154
155                if (area) {
156                        u_long hash, vsid, rpn;
157                        hash_entry volatile *hte, *_hte1;
158                        u_int i, alt=0, flushva;
159               
160                        vsid = _read_SR((void *)vaddr);
161                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
162                        hash = vsid<<6;
163                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
164                        hash &= mm->hashmask;
165                        /* Find an empty entry in the PTEG, else
166                         * replace a random one.
167                         */
168                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
169                        for (i=0; i<8; i++) {
170                                if (hte[i].key>=0) goto found;
171                        }
172                        hash ^= mm->hashmask;
173                        alt = 0x40; _hte1 = hte;
174                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
175                       
176                        for (i=0; i<8; i++) {
177                                if (hte[i].key>=0) goto found;
178                        }
179                        alt = 0;
180                        hte = _hte1;
181                        /* Chose a victim entry and replace it. There might be
182                         * better policies to choose the victim, but in a boot
183                         * loader we want simplicity as long as it works.
184                         *
185                         * We would not need to invalidate the TLB entry since
186                         * the mapping is still valid. But this would be a mess
187                         * when unmapping so we make sure that the TLB is a
188                         * subset of the hash table under all circumstances.
189                         */
190                        i = mm->hashcnt;
191                        mm->hashcnt = (mm->hashcnt+1)%8;
192                        /* Note that the hash is already complemented here ! */
193                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
194                        if (hte[i].key&0x40) flushva^=0x3ff000;
195                        flushva |= ((hte[i].key<<21)&0xf0000000)
196                          | ((hte[i].key<<22)&0x0fc00000);
197                        hte[i].key=0;
198                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
199                found:
200                        hte[i].rpn = rpn;
201                        asm volatile("eieio": : );
202                        hte[i].key = 0x80000000|(vsid<<7)|alt|
203                          ((vaddr>>22)&0x3f);
204                        return;
205                } else {
206                        MMUon();
207                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
208                        hang("\nInvalid memory access attempt at ", vaddr, p);
209                }
210        } else {
211          MMUon();
212          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
213                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
214          if (vec == 7) {
215            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
216            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
217              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
218          }
219          hang("Program or alignment exception at ", vaddr, p);
220        }
221}
222
223/* Generic routines for map handling.
224 */
225
226static inline
227void free_map(map *p) {
228        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
229        if (!p) return;
230        p->next=mm->freemaps;
231        mm->freemaps=p;
232        p->firstpte=MAP_FREE;
233}
234
235/* Sorted insertion in linked list */
236static
237int insert_map(map **head, map *p) {
238        map *q = *head;
239        if (!p) return 0;
240        if (q && (q->base < p->base)) {
241                for(;q->next && q->next->base<p->base; q = q->next);
242                if ((q->end >= p->base) ||
243                    (q->next && p->end>=q->next->base)) {
244                        free_map(p);
245                        printk("Overlapping areas!\n");
246                        return 1;
247                }
248                p->next = q->next;
249                q->next = p;
250        } else { /* Insert at head */
251                if (q && (p->end >= q->base)) {
252                        free_map(p);
253                        printk("Overlapping areas!\n");
254                        return 1;
255                }
256                p->next = q;
257                *head = p;
258        }
259        return 0;
260}
261
262
263/* Removal from linked list */
264
265static
266map *remove_map(map **head, map *p) {
267        map *q = *head;
268
269        if (!p || !q) return NULL;
270        if (q==p) {
271                *head = q->next;
272                return p;
273        }
274        for(;q && q->next!=p; q=q->next);
275        if (q) {
276                q->next=p->next;
277                return p;
278        } else {
279                return NULL;
280        }
281}
282
283static
284map *remove_map_at(map **head, void * vaddr) {
285        map *p, *q = *head;
286
287        if (!vaddr || !q) return NULL;
288        if (q->base==(u_long)vaddr) {
289                *head = q->next;
290                return q;
291        }
292        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
293        p=q->next;
294        if (p) q->next=p->next;
295        return p;
296}
297
298static inline
299map * alloc_map_page(void) {
300        map *from, *p;
301        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
302
303        /* printk("Allocating new map page !"); */
304        /* Get the highest page */
305        for (from=mm->physavail; from && from->next; from=from->next);
306        if (!from) return NULL;
307
308        from->end -= PAGE_SIZE;
309       
310        mm->freemaps = (map *) (from->end+1);
311       
312        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
313                p->next = p+1;
314                p->firstpte = MAP_FREE;
315        } 
316        (p-1)->next=0;
317
318        /* Take the last one as pointer to self and insert
319         * the map into the permanent map list.
320         */
321
322        p->firstpte = MAP_PERM_PHYS;
323        p->base=(u_long) mm->freemaps;
324        p->end = p->base+PAGE_SIZE-1;
325       
326        insert_map(&mm->physperm, p);
327       
328        if (from->end+1 == from->base)
329                free_map(remove_map(&mm->physavail, from));
330       
331        return mm->freemaps;
332}
333
334static
335map * alloc_map(void) {
336        map *p;
337        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
338
339        p = mm->freemaps;
340        if (!p) {
341                p=alloc_map_page();
342        }
343
344        if(p) mm->freemaps=p->next;
345
346        return p;
347}
348
349static
350void coalesce_maps(map *p) {
351        while(p) {
352                if (p->next && (p->end+1 == p->next->base)) {
353                        map *q=p->next;
354                        p->end=q->end;
355                        p->next=q->next;
356                        free_map(q);
357                } else {
358                        p = p->next;
359                }
360        }
361}
362
363/* These routines are used to find the free memory zones to avoid
364 * overlapping destructive copies when initializing.
365 * They work from the top because of the way we want to boot.
366 * In the following the term zone refers to the memory described
367 * by one or several contiguous so called segments in the
368 * residual data.
369 */
370#define STACK_PAGES 2
371static inline u_long
372find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
373        u_long i, newmin=0, size=0;
374        for(i=0; i<res->ActualNumMemSegs; i++) {
375                if (res->Segs[i].Usage & flags
376                    && res->Segs[i].BasePage<lowpage
377                    && res->Segs[i].BasePage>newmin) {
378                        newmin=res->Segs[i].BasePage;
379                        size=res->Segs[i].PageCount;
380                }
381        }
382        return newmin+size;
383}
384
385static inline u_long
386find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
387        u_long i;
388        int progress;
389        do {
390                progress=0;
391                for (i=0; i<res->ActualNumMemSegs; i++) {
392                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
393                              == highpage)
394                             && res->Segs[i].Usage & flags) {
395                                highpage=res->Segs[i].BasePage;
396                                progress=1;
397                        }
398                }
399        } while(progress);
400        return highpage;
401}
402
403/* The Motorola NT firmware does not provide any setting in the residual
404 * data about memory segment usage. The following table provides enough
405 * info so that this bootloader can work.
406 */
407MEM_MAP seg_fix[] = {
408    { 0x2000, 0xFFF00, 0x00100 },
409    { 0x0020, 0x02000, 0x7E000 },
410    { 0x0008, 0x00800, 0x00168 },
411    { 0x0004, 0x00000, 0x00005 },
412    { 0x0001, 0x006F1, 0x0010F },
413    { 0x0002, 0x006AD, 0x00044 },
414    { 0x0010, 0x00005, 0x006A8 },
415    { 0x0010, 0x00968, 0x00698 },
416    { 0x0800, 0xC0000, 0x3F000 },
417    { 0x0600, 0xBF800, 0x00800 },
418    { 0x0500, 0x81000, 0x3E800 },
419    { 0x0480, 0x80800, 0x00800 },
420    { 0x0440, 0x80000, 0x00800 } };
421
422
423/* The Motorola NT firmware does not set up all required info in the residual
424 * data. This routine changes some things in a way that the bootloader and
425 * linux are happy.
426 */
427void
428fix_residual( RESIDUAL *res )
429{
430#if 0
431    PPC_DEVICE *hostbridge;
432#endif
433    int i;
434
435    /* Missing memory segment information */
436    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
437    for (i=0; i<res->ActualNumMemSegs; i++) {
438        res->Segs[i].Usage = seg_fix[i].Usage;
439        res->Segs[i].BasePage = seg_fix[i].BasePage;
440        res->Segs[i].PageCount = seg_fix[i].PageCount;
441    }
442    /* The following should be fixed in the current version of the
443     * kernel and of the bootloader.
444     */
445#if 0
446    /* PPCBug has this zero */
447    res->VitalProductData.CacheLineSize = 0;
448    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
449    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
450        res->VitalProductData.TimeBaseDivisor = 4000;
451    }
452
453    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
454     * sets "PCIBridgeDirect". This bootloader and linux works better if
455     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
456     */
457    hostbridge=residual_find_device(PCIDEVICE, NULL,
458                                        BridgeController,
459                                        PCIBridge, -1, 0);
460    if (hostbridge) {
461        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
462        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
463    }
464#endif
465}
466
467/* This routine is the first C code called with very little stack space!
468 * Its goal is to find where the boot image can be moved. This will
469 * be the highest address with enough room.
470 */
471int early_setup(u_long image_size) {
472        register RESIDUAL *res = bd->residual;
473        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
474
475        /* Fix residual if we are loaded by Motorola NT firmware */
476        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
477            fix_residual( res );
478
479        /* FIXME: if OF we should do something different */
480        if( !bd->of_entry && res &&
481           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
482                u_long lowpage=ULONG_MAX, highpage;
483                u_long imghigh=0, stkhigh=0;
484                /* Find the highest and large enough contiguous zone
485                   consisting of free and BootImage sections. */
486                /* Find 3 free areas of memory, one for the main image, one
487                 * for the stack (STACK_PAGES), and page one to put the map 
488                 * structures. They are allocated from the top of memory.
489                 * In most cases the stack will be put just below the image.
490                 */
491                while((highpage =
492                       find_next_zone(res, lowpage, BootImage|Free))) {
493                        lowpage=find_zone_start(res, highpage, BootImage|Free);
494                        if ((highpage-lowpage)>minpages &&
495                            highpage>imghigh) {
496                                imghigh=highpage;
497                                highpage -=minpages;
498                        }
499                        if ((highpage-lowpage)>STACK_PAGES &&
500                            highpage>stkhigh) {
501                                stkhigh=highpage;
502                                highpage-=STACK_PAGES;
503                        }
504                }
505
506                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
507                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
508
509                /* The code mover is put at the lowest possible place
510                 * of free memory. If this corresponds to the loaded boot
511                 * partition image it does not matter because it overrides
512                 * the unused part of it (x86 code).
513                 */
514                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
515
516                /* Let us flush the caches in all cases. After all it should
517                 * not harm even on 601 and we don't care about performance.
518                 * Right now it's easy since all processors have a line size
519                 * of 32 bytes. Once again residual data has proved unreliable.
520                 */
521                bd->cache_lsize = 32;
522        }
523        /* For now we always assume that it's succesful, we should
524         * handle better the case of insufficient memory.
525         */
526        return 0;
527}
528
529void * valloc(u_long size) {
530        map *p, *q;
531        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
532
533        if (size==0) return NULL;
534        size=PAGE_ALIGN(size)-1;
535        for (p=mm->virtavail; p; p=p->next) {
536                if (p->base+size <= p->end) break;
537        }
538        if(!p) return NULL;
539        q=alloc_map();
540        q->base=p->base;
541        q->end=q->base+size;
542        q->firstpte=MAP_USED_VIRT;
543        insert_map(&mm->virtused, q);
544        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
545        else p->base += size+1;
546        return (void *)q->base;
547}
548
549static
550void vflush(map *virtmap) {
551        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
552        u_long i, limit=(mm->hashmask>>3)+8;
553        hash_entry volatile *p=(hash_entry *) mm->sdr1;
554
555        /* PTE handling is simple since the processor never update
556         * the entries. Writable pages always have the C bit set and
557         * all valid entries have the R bit set. From the processor
558         * point of view the hash table is read only.
559         */
560        for (i=0; i<limit; i++) {
561                if (p[i].key<0) {
562                        u_long va;
563                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
564                        if (p[i].key&0x40) va^=0x3ff000;
565                        va |= ((p[i].key<<21)&0xf0000000)
566                          | ((p[i].key<<22)&0x0fc00000);
567                        if (va>=virtmap->base && va<=virtmap->end) {
568                                p[i].key=0;
569                                asm volatile("sync; tlbie %0; sync" : :
570                                             "r" (va));
571                        }
572                }
573        }
574}
575
576void vfree(void *vaddr) {
577        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
578        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
579       
580        /* Flush memory queues */
581        asm volatile("sync": : : "memory");
582
583        virtmap = remove_map_at(&mm->virtused, vaddr);
584        if (!virtmap) return;
585
586        /* Remove mappings corresponding to virtmap */
587        for (physmap=mm->mappings; physmap; ) {
588                map *nextmap=physmap->next;
589                if (physmap->base>=virtmap->base
590                    && physmap->base<virtmap->end) {
591                        free_map(remove_map(&mm->mappings, physmap));
592                }
593                physmap=nextmap;
594        }
595
596        vflush(virtmap);
597
598        virtmap->firstpte= MAP_FREE_VIRT;
599        insert_map(&mm->virtavail, virtmap);
600        coalesce_maps(mm->virtavail);
601}
602
603void vunmap(void *vaddr) {
604        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
605        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
606       
607        /* Flush memory queues */
608        asm volatile("sync": : : "memory");
609
610        /* vaddr must be within one of the vm areas in use and
611         * then must correspond to one of the physical areas
612         */
613        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
614                if (virtmap->base<=(u_long)vaddr &&
615                    virtmap->end>=(u_long)vaddr) break;
616        }
617        if (!virtmap) return;
618
619        physmap = remove_map_at(&mm->mappings, vaddr);
620        if(!physmap) return;
621        vflush(physmap);
622        free_map(physmap);
623}
624
625int vmap(void *vaddr, u_long p, u_long size) {
626        map *q;
627        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
628
629        size=PAGE_ALIGN(size);
630        if(!size) return 1;
631        /* Check that the requested area fits in one vm image */
632        for (q=mm->virtused; q; q=q->next) {
633                if ((q->base <= (u_long)vaddr) &&
634                    (q->end>=(u_long)vaddr+size -1)) break;
635        }
636        if (!q) return 1;
637        q= alloc_map();
638        if (!q) return 1;
639        q->base = (u_long)vaddr;
640        q->end = (u_long)vaddr+size-1;
641        q->firstpte = p;
642        return insert_map(&mm->mappings, q);
643}
644
645static
646void create_identity_mappings(int type, int attr) {
647        u_long lowpage=ULONG_MAX, highpage;
648        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
649        RESIDUAL * res=bd->residual;
650
651        while((highpage = find_next_zone(res, lowpage, type))) {
652                map *p;
653                lowpage=find_zone_start(res, highpage, type);
654                p=alloc_map();
655                /* Do not map page 0 to catch null pointers */
656                lowpage = lowpage ? lowpage : 1;
657                p->base=lowpage<<PAGE_SHIFT;
658                p->end=(highpage<<PAGE_SHIFT)-1;
659                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
660                insert_map(&mm->mappings, p);
661        }
662}
663
664static inline
665void add_free_map(u_long base, u_long end) {
666        map *q=NULL;
667        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
668
669        if (base<end) q=alloc_map();
670        if (!q) return;
671        q->base=base;
672        q->end=end-1;
673        q->firstpte=MAP_FREE_VIRT;
674        insert_map(&mm->virtavail, q);
675}
676
677static inline
678void create_free_vm(void) {
679        map *p;
680        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
681
682        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
683        for(p=mm->mappings; p; p=p->next) {
684                add_free_map(vaddr, p->base);
685                vaddr=p->end+1;
686        }
687        /* Special end of memory case */
688        if (vaddr) add_free_map(vaddr,0);
689}
690
691/* Memory management initialization.
692 * Set up the mapping lists.
693 */
694
695static inline
696void add_perm_map(u_long start, u_long size) {
697        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
698        map *p=alloc_map();
699        p->base = start;
700        p->end = start + size - 1;
701        p->firstpte = MAP_PERM_PHYS;
702        insert_map(& mm->physperm , p);
703}
704
705void mm_init(u_long image_size)
706{
707        u_long lowpage=ULONG_MAX, highpage;
708        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
709        RESIDUAL * res=bd->residual;
710        extern void (tlb_handlers)(void);
711        extern void (_handler_glue)(void);
712        int i;
713        map *p;
714
715        /* The checks are simplified by the fact that the image
716         * and stack area are always allocated at the upper end
717         * of a free block.
718         */
719        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
720                lowpage=find_zone_start(res, highpage, BootImage|Free);
721                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
722                     == highpage) {
723                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
724                        add_perm_map((u_long)bd->image, image_size);
725                }
726                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
727                        highpage -= STACK_PAGES;
728                        add_perm_map(highpage<<PAGE_SHIFT,
729                                     STACK_PAGES*PAGE_SIZE);
730                }
731                /* Protect the interrupt handlers that we need ! */
732                if (lowpage<2) lowpage=2;
733                /* Check for the special case of full area! */
734                if (highpage>lowpage) {
735                        p = alloc_map();
736                        p->base = lowpage<<PAGE_SHIFT;
737                        p->end = (highpage<<PAGE_SHIFT)-1;
738                        p->firstpte=MAP_FREE_PHYS;
739                        insert_map(&mm->physavail, p);
740                }
741        }
742
743        /* Allocate the hash table */
744        mm->sdr1=__palloc(0x10000, PA_PERM|16);
745        _write_SDR1((u_long)mm->sdr1);
746        memset(mm->sdr1, 0, 0x10000);
747        mm->hashmask = 0xffc0;
748
749        /* Setup the segment registers as we want them */
750        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
751        /* Create the maps for the physical memory, firwmarecode does not
752         * seem to be necessary. ROM is mapped read-only to reduce the risk
753         * of reprogramming it because it's often Flash and some are
754         * amazingly easy to overwrite.
755         */
756        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
757                                 FirmwareStack, PTE_RAM);
758        create_identity_mappings(SystemROM, PTE_ROM);
759        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
760                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
761
762        create_free_vm();
763       
764        /* Install our own MMU and trap handlers. */
765        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
766        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
767        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
768        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
769}
770 
771void * salloc(u_long size) {
772        map *p, *q;
773        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
774
775        if (size==0) return NULL;
776
777        size = (size+7)&~7;
778
779        for (p=mm->sallocfree; p; p=p->next) {
780                if (p->base+size <= p->end) break;
781        }
782        if(!p) {
783                void *m;
784                m = __palloc(size, PA_SUBALLOC);
785                p = alloc_map();
786                if (!m && !p) return NULL;
787                p->base = (u_long) m;
788                p->firstpte = MAP_FREE_SUBS;
789                p->end = (u_long)m+PAGE_ALIGN(size)-1;
790                insert_map(&mm->sallocfree, p);
791                coalesce_maps(mm->sallocfree);
792                coalesce_maps(mm->sallocphys);
793        };
794        q=alloc_map();
795        q->base=p->base;
796        q->end=q->base+size-1;
797        q->firstpte=MAP_USED_SUBS;
798        insert_map(&mm->sallocused, q);
799        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
800        else p->base += size;
801        memset((void *)q->base, 0, size);
802        return (void *)q->base;
803}
804
805void sfree(void *p) {
806        map *q;
807        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
808
809        q=remove_map_at(&mm->sallocused, p);
810        if (!q) return;
811        q->firstpte=MAP_FREE_SUBS;
812        insert_map(&mm->sallocfree, q);
813        coalesce_maps(mm->sallocfree);
814}
815
816/* first/last area fit, flags is a power of 2 indicating the required
817 * alignment. The algorithms are stupid because we expect very little
818 * fragmentation of the areas, if any. The unit of allocation is the page.
819 * The allocation is by default performed from higher addresses down,
820 * unless flags&PA_LOW is true.
821 */
822
823void * __palloc(u_long size, int flags)
824{
825        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
826        map *newmap, *frommap, *p, *splitmap=0;
827        map **queue;
828        u_long qflags;
829        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
830
831        /* Asking for a size which is not a multiple of the alignment
832           is likely to be an error. */
833
834        if (size & mask) return NULL;
835        size = PAGE_ALIGN(size);
836        if(!size) return NULL;
837
838        if (flags&PA_SUBALLOC) {
839                queue = &mm->sallocphys;
840                qflags = MAP_SUBS_PHYS;
841        } else if (flags&PA_PERM) {
842                queue = &mm->physperm;
843                qflags = MAP_PERM_PHYS;
844        } else {
845                queue = &mm->physused;
846                qflags = MAP_USED_PHYS;
847        }
848        /* We need to allocate that one now so no two allocations may attempt
849         * to take the same memory simultaneously. Alloc_map_page does
850         * not call back here to avoid infinite recursion in alloc_map.
851         */
852
853        if (mask&PAGE_MASK) {
854                splitmap=alloc_map();
855                if (!splitmap) return NULL;
856        }
857
858        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
859                u_long high = p->end;
860                u_long limit  = ((p->base+mask)&~mask) + size-1;
861                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
862                        frommap = p;
863                        if (flags&PA_LOW) break;
864                }
865        }
866
867        if (!frommap) {
868                if (splitmap) free_map(splitmap);
869                return NULL; 
870        }
871       
872        newmap=alloc_map();
873       
874        if (flags&PA_LOW) {
875                newmap->base = (frommap->base+mask)&~mask;
876        } else {
877                newmap->base = (frommap->end +1 - size) & ~mask;
878        }
879
880        newmap->end = newmap->base+size-1;
881        newmap->firstpte = qflags;
882
883        /* Add a fragment if we don't allocate until the end. */
884       
885        if (splitmap) {
886                splitmap->base=newmap->base+size;
887                splitmap->end=frommap->end;
888                splitmap->firstpte= MAP_FREE_PHYS;
889                frommap->end=newmap->base-1;
890        } else if (flags & PA_LOW) {
891                frommap->base=newmap->base+size;
892        } else {
893                frommap->end=newmap->base-1;
894        }
895
896        /* Remove a fragment if it becomes empty. */
897        if (frommap->base == frommap->end+1) {
898                free_map(remove_map(&mm->physavail, frommap));
899        }
900
901        if (splitmap) {
902                if (splitmap->base == splitmap->end+1) {
903                        free_map(remove_map(&mm->physavail, splitmap));
904                } else {
905                        insert_map(&mm->physavail, splitmap); 
906                }
907        }
908
909        insert_map(queue, newmap);
910        return (void *) newmap->base;
911               
912}
913
914void pfree(void * p) {
915        map *q;
916        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
917        q=remove_map_at(&mm->physused, p);
918        if (!q) return;
919        q->firstpte=MAP_FREE_PHYS;
920        insert_map(&mm->physavail, q);
921        coalesce_maps(mm->physavail);
922}
923
924#ifdef DEBUG
925/* Debugging functions */
926void print_maps(map *chain, const char *s) {
927        map *p;
928        printk("%s",s);
929        for(p=chain; p; p=p->next) {
930                printk("    %08lx-%08lx: %08lx\n",
931                       p->base, p->end, p->firstpte);
932        }
933}
934
935void print_all_maps(const char * s) {
936        u_long freemaps;
937        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
938        map *free;
939        printk("%s",s);
940        print_maps(mm->mappings, "  Currently defined mappings:\n");
941        print_maps(mm->physavail, "  Currently available physical areas:\n");
942        print_maps(mm->physused, "  Currently used physical areas:\n");
943        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
944        print_maps(mm->virtused, "  Currently used virtual areas:\n");
945        print_maps(mm->physperm, "  Permanently used physical areas:\n");
946        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
947        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
948        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
949        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
950        printk("  %ld free maps.\n", freemaps);
951}
952
953void print_hash_table(void) {
954        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
955        hash_entry *p=(hash_entry *) mm->sdr1;
956        u_int i, valid=0;
957        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
958                if (p[i].key<0) valid++;
959        }
960        printk("%u valid hash entries on pass 1.\n", valid);
961        valid = 0;
962        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
963                if (p[i].key<0) valid++;
964        }
965        printk("%u valid hash entries on pass 2.\n"
966               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
967        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
968                if (p[i].key<0) {
969                        u_int pteg=(i>>3);
970                        u_long vpn;
971                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
972                        if (p[i].key&0x40) vpn^=0x3ff;
973                        vpn |= ((p[i].key<<9)&0xffff0000)
974                          | ((p[i].key<<10)&0xfc00);
975                        printk("%08lx:%08lx, %s, %5d.%d\n",
976                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
977                               pteg, i%8);
978                }
979        }
980}
981
982#endif
Note: See TracBrowser for help on using the repository browser.