source: rtems/c/src/lib/libbsp/powerpc/shared/bootloader/mm.c @ c499856

4.115
Last change on this file since c499856 was c499856, checked in by Chris Johns <chrisj@…>, on 03/20/14 at 21:10:47

Change all references of rtems.com to rtems.org.

  • Property mode set to 100644
File size: 27.6 KB
Line 
1/*
2 *  mm.c -- Crude memory management for early boot.
3 *
4 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
5 *
6 *  Modified to compile in RTEMS development environment
7 *  by Eric Valette
8 *
9 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
10 *
11 *  The license and distribution terms for this file may be
12 *  found in the file LICENSE in this distribution or at
13 *  http://www.rtems.org/license/LICENSE.
14 */
15
16/* This code is a crude memory manager for early boot for LinuxPPC.
17 * As such, it does not try to perform many optimiztions depending
18 * on the processor, it only uses features which are common to
19 * all processors (no BATs...).
20 *
21 * On PreP platorms (the only ones on which it works for now),
22 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
23 * residual data. The holes between these areas can be virtually
24 * remapped to any of these, since for some functions it is very handy
25 * to have virtually contiguous but physically discontiguous memory.
26 *
27 * Physical memory allocation is also very crude, since it's only
28 * designed to manage a small number of large chunks. For valloc/vfree
29 * and palloc/pfree, the unit of allocation is the 4kB page.
30 *
31 * The salloc/sfree has been added after tracing gunzip and seeing
32 * how it performed a very large number of small allocations.
33 * For these the unit of allocation is 8 bytes (the s stands for
34 * small or subpage). This memory is cleared when allocated.
35 *
36 */
37
38#include <rtems/bspIo.h>
39
40#include <sys/types.h>
41#include <libcpu/spr.h>
42#include "bootldr.h"
43#include <libcpu/mmu.h>
44#include <libcpu/page.h>
45#include <limits.h>
46
47extern void (tlb_handlers)(void);
48extern void (_handler_glue)(void);
49
50/* We use our own kind of simple memory areas for the loader, but
51 * we want to avoid potential clashes with kernel includes.
52 * Here a map maps contiguous areas from base to end,
53 * the firstpte entry corresponds to physical address and has the low
54 * order bits set for caching and permission.
55 */
56
57typedef struct _map {
58        struct _map *next;
59        u_long base;
60        u_long end;
61        u_long firstpte;
62} map;
63
64/* The LSB of the firstpte entries on map lists other than mappings
65 * are constants which can be checked for debugging. All these constants
66 * have bit of weight 4 set, this bit is zero in the mappings list entries.
67 * Actually firstpte&7 value is:
68 * - 0 or 1 should not happen
69 * - 2 for RW actual virtual->physical mappings
70 * - 3 for RO actual virtual->physical mappings
71 * - 6 for free areas to be suballocated by salloc
72 * - 7 for salloc'ated areas
73 * - 4 or 5 for all others, in this case firtpte & 63 is
74 *   - 4 for unused maps (on the free list)
75 *   - 12 for free physical memory
76 *   - 13 for physical memory in use
77 *   - 20 for free virtual address space
78 *   - 21 for allocated virtual address space
79 *   - 28 for physical memory space suballocated by salloc
80 *   - 29 for physical memory that can't be freed
81 */
82
83#define MAP_FREE_SUBS 6
84#define MAP_USED_SUBS 7
85
86#define MAP_FREE 4
87#define MAP_FREE_PHYS 12
88#define MAP_USED_PHYS 13
89#define MAP_FREE_VIRT 20
90#define MAP_USED_VIRT 21
91#define MAP_SUBS_PHYS 28
92#define MAP_PERM_PHYS 29
93
94SPR_RW(SDR1);
95SPR_RO(DSISR);
96SPR_RO(PPC_DAR);
97
98/* We need a few statically allocated free maps to bootstrap the
99 * memory managment */
100static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
101                           {free_maps+2, 0, 0, MAP_FREE},
102                           {free_maps+3, 0, 0, MAP_FREE},
103                           {NULL, 0, 0, MAP_FREE}};
104struct _mm_private {
105        void *sdr1;
106        u_long hashmask;
107        map *freemaps;     /* Pool of unused map structs */
108        map *mappings;     /* Sorted list of virtual->physical mappings */
109        map *physavail;    /* Unallocated physical address space */
110        map *physused;     /* Allocated physical address space */
111        map *physperm;     /* Permanently allocated physical space */
112        map *virtavail;    /* Unallocated virtual address space */
113        map *virtused;     /* Allocated virtual address space */
114        map *sallocfree;   /* Free maps for salloc */
115        map *sallocused;   /* Used maps for salloc */
116        map *sallocphys;   /* Physical areas used by salloc */
117        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
118} mm_private = {hashmask: 0xffc0,
119                freemaps: free_maps+0};
120
121/* A simplified hash table entry declaration */
122typedef struct _hash_entry {
123        int key;
124        u_long rpn;
125} hash_entry;
126
127void print_maps(map *, const char *);
128
129/* The handler used for all exceptions although for now it is only
130 * designed to properly handle MMU interrupts to fill the hash table.
131 */
132
133void _handler(int vec, ctxt *p) {
134        map *area;
135        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
136        u_long vaddr, cause;
137        if (vec==4 || vec==7) { /* ISI exceptions are different */
138                vaddr = p->nip;
139                cause = p->msr;
140        } else { /* Valid for DSI and alignment exceptions */
141                vaddr = _read_PPC_DAR();
142                cause = _read_DSISR();
143        }
144
145        if (vec==3 || vec==4) {
146                /* Panic if the fault is not PTE not found. */
147                if (!(cause & 0x40000000)) {
148                        MMUon();
149                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
150                        hang("Memory protection violation at ", vaddr, p);
151                }
152
153                for(area=mm->mappings; area; area=area->next) {
154                        if(area->base<=vaddr && vaddr<=area->end) break;
155                }
156
157                if (area) {
158                        u_long hash, vsid, rpn;
159                        hash_entry volatile *hte, *_hte1;
160                        u_int i, alt=0, flushva;
161
162                        vsid = _read_SR((void *)vaddr);
163                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
164                        hash = vsid<<6;
165                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
166                        hash &= mm->hashmask;
167                        /* Find an empty entry in the PTEG, else
168                         * replace a random one.
169                         */
170                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
171                        for (i=0; i<8; i++) {
172                                if (hte[i].key>=0) goto found;
173                        }
174                        hash ^= mm->hashmask;
175                        alt = 0x40; _hte1 = hte;
176                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
177
178                        for (i=0; i<8; i++) {
179                                if (hte[i].key>=0) goto found;
180                        }
181                        alt = 0;
182                        hte = _hte1;
183                        /* Chose a victim entry and replace it. There might be
184                         * better policies to choose the victim, but in a boot
185                         * loader we want simplicity as long as it works.
186                         *
187                         * We would not need to invalidate the TLB entry since
188                         * the mapping is still valid. But this would be a mess
189                         * when unmapping so we make sure that the TLB is a
190                         * subset of the hash table under all circumstances.
191                         */
192                        i = mm->hashcnt;
193                        mm->hashcnt = (mm->hashcnt+1)%8;
194                        /* Note that the hash is already complemented here ! */
195                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
196                        if (hte[i].key&0x40) flushva^=0x3ff000;
197                        flushva |= ((hte[i].key<<21)&0xf0000000)
198                          | ((hte[i].key<<22)&0x0fc00000);
199                        hte[i].key=0;
200                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
201                found:
202                        hte[i].rpn = rpn;
203                        asm volatile("eieio": : );
204                        hte[i].key = 0x80000000|(vsid<<7)|alt|
205                          ((vaddr>>22)&0x3f);
206                        return;
207                } else {
208                        MMUon();
209                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
210                        hang("\nInvalid memory access attempt at ", vaddr, p);
211                }
212        } else {
213          MMUon();
214          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
215                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
216          if (vec == 7) {
217            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
218            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
219              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
220          }
221          hang("Program or alignment exception at ", vaddr, p);
222        }
223}
224
225/* Generic routines for map handling.
226 */
227
228static inline
229void free_map(map *p) {
230        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
231        if (!p) return;
232        p->next=mm->freemaps;
233        mm->freemaps=p;
234        p->firstpte=MAP_FREE;
235}
236
237/* Sorted insertion in linked list */
238static
239int insert_map(map **head, map *p) {
240        map *q = *head;
241        if (!p) return 0;
242        if (q && (q->base < p->base)) {
243                for(;q->next && q->next->base<p->base; q = q->next);
244                if ((q->end >= p->base) ||
245                    (q->next && p->end>=q->next->base)) {
246                        free_map(p);
247                        printk("Overlapping areas!\n");
248                        return 1;
249                }
250                p->next = q->next;
251                q->next = p;
252        } else { /* Insert at head */
253                if (q && (p->end >= q->base)) {
254                        free_map(p);
255                        printk("Overlapping areas!\n");
256                        return 1;
257                }
258                p->next = q;
259                *head = p;
260        }
261        return 0;
262}
263
264/* Removal from linked list */
265
266static
267map *remove_map(map **head, map *p) {
268        map *q = *head;
269
270        if (!p || !q) return NULL;
271        if (q==p) {
272                *head = q->next;
273                return p;
274        }
275        for(;q && q->next!=p; q=q->next);
276        if (q) {
277                q->next=p->next;
278                return p;
279        } else {
280                return NULL;
281        }
282}
283
284static
285map *remove_map_at(map **head, void * vaddr) {
286        map *p, *q = *head;
287
288        if (!vaddr || !q) return NULL;
289        if (q->base==(u_long)vaddr) {
290                *head = q->next;
291                return q;
292        }
293        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
294        p=q->next;
295        if (p) q->next=p->next;
296        return p;
297}
298
299static inline
300map * alloc_map_page(void) {
301        map *from, *p;
302        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
303
304        /* printk("Allocating new map page !"); */
305        /* Get the highest page */
306        for (from=mm->physavail; from && from->next; from=from->next);
307        if (!from) return NULL;
308
309        from->end -= PAGE_SIZE;
310
311        mm->freemaps = (map *) (from->end+1);
312
313        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
314                p->next = p+1;
315                p->firstpte = MAP_FREE;
316        }
317        (p-1)->next=0;
318
319        /* Take the last one as pointer to self and insert
320         * the map into the permanent map list.
321         */
322
323        p->firstpte = MAP_PERM_PHYS;
324        p->base=(u_long) mm->freemaps;
325        p->end = p->base+PAGE_SIZE-1;
326
327        insert_map(&mm->physperm, p);
328
329        if (from->end+1 == from->base)
330                free_map(remove_map(&mm->physavail, from));
331
332        return mm->freemaps;
333}
334
335static
336map * alloc_map(void) {
337        map *p;
338        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
339
340        p = mm->freemaps;
341        if (!p) {
342                p=alloc_map_page();
343        }
344
345        if(p) mm->freemaps=p->next;
346
347        return p;
348}
349
350static
351void coalesce_maps(map *p) {
352        while(p) {
353                if (p->next && (p->end+1 == p->next->base)) {
354                        map *q=p->next;
355                        p->end=q->end;
356                        p->next=q->next;
357                        free_map(q);
358                } else {
359                        p = p->next;
360                }
361        }
362}
363
364/* These routines are used to find the free memory zones to avoid
365 * overlapping destructive copies when initializing.
366 * They work from the top because of the way we want to boot.
367 * In the following the term zone refers to the memory described
368 * by one or several contiguous so called segments in the
369 * residual data.
370 */
371#define STACK_PAGES 2
372static inline u_long
373find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
374        u_long i, newmin=0, size=0;
375        for(i=0; i<res->ActualNumMemSegs; i++) {
376                if (res->Segs[i].Usage & flags
377                    && res->Segs[i].BasePage<lowpage
378                    && res->Segs[i].BasePage>newmin) {
379                        newmin=res->Segs[i].BasePage;
380                        size=res->Segs[i].PageCount;
381                }
382        }
383        return newmin+size;
384}
385
386static inline u_long
387find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
388        u_long i;
389        int progress;
390        do {
391                progress=0;
392                for (i=0; i<res->ActualNumMemSegs; i++) {
393                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
394                              == highpage)
395                             && res->Segs[i].Usage & flags) {
396                                highpage=res->Segs[i].BasePage;
397                                progress=1;
398                        }
399                }
400        } while(progress);
401        return highpage;
402}
403
404/* The Motorola NT firmware does not provide any setting in the residual
405 * data about memory segment usage. The following table provides enough
406 * info so that this bootloader can work.
407 */
408MEM_MAP seg_fix[] = {
409    { 0x2000, 0xFFF00, 0x00100 },
410    { 0x0020, 0x02000, 0x7E000 },
411    { 0x0008, 0x00800, 0x00168 },
412    { 0x0004, 0x00000, 0x00005 },
413    { 0x0001, 0x006F1, 0x0010F },
414    { 0x0002, 0x006AD, 0x00044 },
415    { 0x0010, 0x00005, 0x006A8 },
416    { 0x0010, 0x00968, 0x00698 },
417    { 0x0800, 0xC0000, 0x3F000 },
418    { 0x0600, 0xBF800, 0x00800 },
419    { 0x0500, 0x81000, 0x3E800 },
420    { 0x0480, 0x80800, 0x00800 },
421    { 0x0440, 0x80000, 0x00800 } };
422
423/* The Motorola NT firmware does not set up all required info in the residual
424 * data. This routine changes some things in a way that the bootloader and
425 * linux are happy.
426 */
427void
428fix_residual( RESIDUAL *res )
429{
430#if 0
431    PPC_DEVICE *hostbridge;
432#endif
433    int i;
434
435    /* Missing memory segment information */
436    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
437    for (i=0; i<res->ActualNumMemSegs; i++) {
438        res->Segs[i].Usage = seg_fix[i].Usage;
439        res->Segs[i].BasePage = seg_fix[i].BasePage;
440        res->Segs[i].PageCount = seg_fix[i].PageCount;
441    }
442    /* The following should be fixed in the current version of the
443     * kernel and of the bootloader.
444     */
445#if 0
446    /* PPCBug has this zero */
447    res->VitalProductData.CacheLineSize = 0;
448    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
449    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
450        res->VitalProductData.TimeBaseDivisor = 4000;
451    }
452
453    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
454     * sets "PCIBridgeDirect". This bootloader and linux works better if
455     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
456     */
457    hostbridge=residual_find_device(PCIDEVICE, NULL,
458                                        BridgeController,
459                                        PCIBridge, -1, 0);
460    if (hostbridge) {
461        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
462        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
463    }
464#endif
465}
466
467/* This routine is the first C code called with very little stack space!
468 * Its goal is to find where the boot image can be moved. This will
469 * be the highest address with enough room.
470 */
471int early_setup(u_long image_size) {
472        register RESIDUAL *res = bd->residual;
473        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
474
475        if ( residual_fw_is_qemu( res ) ) {
476                /* save command-line - QEMU firmware sets R6/R7 to
477                 * commandline start/end (NON-PReP STD)
478                 */
479                int len = bd->r7 - bd->r6;
480                if ( len > 0 ) {
481                        if ( len > sizeof(bd->cmd_line) - 1 )
482                                len = sizeof(bd->cmd_line) - 1;
483                        codemove(bd->cmd_line, bd->r6, len, bd->cache_lsize);
484                        bd->cmd_line[len] = 0;
485                }
486        }
487
488        /* Fix residual if we are loaded by Motorola NT firmware */
489        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
490            fix_residual( res );
491
492        /* FIXME: if OF we should do something different */
493        if( !bd->of_entry && res &&
494           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
495                u_long lowpage=ULONG_MAX, highpage;
496                u_long imghigh=0, stkhigh=0;
497                /* Find the highest and large enough contiguous zone
498                   consisting of free and BootImage sections. */
499                /* Find 3 free areas of memory, one for the main image, one
500                 * for the stack (STACK_PAGES), and page one to put the map
501                 * structures. They are allocated from the top of memory.
502                 * In most cases the stack will be put just below the image.
503                 */
504                while((highpage =
505                       find_next_zone(res, lowpage, BootImage|Free))) {
506                        lowpage=find_zone_start(res, highpage, BootImage|Free);
507                        if ((highpage-lowpage)>minpages &&
508                            highpage>imghigh) {
509                                imghigh=highpage;
510                                highpage -=minpages;
511                        }
512                        if ((highpage-lowpage)>STACK_PAGES &&
513                            highpage>stkhigh) {
514                                stkhigh=highpage;
515                                highpage-=STACK_PAGES;
516                        }
517                }
518
519                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
520                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
521
522                /* The code mover is put at the lowest possible place
523                 * of free memory. If this corresponds to the loaded boot
524                 * partition image it does not matter because it overrides
525                 * the unused part of it (x86 code).
526                 */
527                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
528
529                /* Let us flush the caches in all cases. After all it should
530                 * not harm even on 601 and we don't care about performance.
531                 * Right now it's easy since all processors have a line size
532                 * of 32 bytes. Once again residual data has proved unreliable.
533                 */
534                bd->cache_lsize = 32;
535        }
536        /* For now we always assume that it's succesful, we should
537         * handle better the case of insufficient memory.
538         */
539        return 0;
540}
541
542void * valloc(u_long size) {
543        map *p, *q;
544        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
545
546        if (size==0) return NULL;
547        size=PAGE_ALIGN(size)-1;
548        for (p=mm->virtavail; p; p=p->next) {
549                if (p->base+size <= p->end) break;
550        }
551        if(!p) return NULL;
552        q=alloc_map();
553        q->base=p->base;
554        q->end=q->base+size;
555        q->firstpte=MAP_USED_VIRT;
556        insert_map(&mm->virtused, q);
557        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
558        else p->base += size+1;
559        return (void *)q->base;
560}
561
562static
563void vflush(map *virtmap) {
564        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
565        u_long i, limit=(mm->hashmask>>3)+8;
566        hash_entry volatile *p=(hash_entry *) mm->sdr1;
567
568        /* PTE handling is simple since the processor never update
569         * the entries. Writable pages always have the C bit set and
570         * all valid entries have the R bit set. From the processor
571         * point of view the hash table is read only.
572         */
573        for (i=0; i<limit; i++) {
574                if (p[i].key<0) {
575                        u_long va;
576                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
577                        if (p[i].key&0x40) va^=0x3ff000;
578                        va |= ((p[i].key<<21)&0xf0000000)
579                          | ((p[i].key<<22)&0x0fc00000);
580                        if (va>=virtmap->base && va<=virtmap->end) {
581                                p[i].key=0;
582                                asm volatile("sync; tlbie %0; sync" : :
583                                             "r" (va));
584                        }
585                }
586        }
587}
588
589void vfree(void *vaddr) {
590        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
591        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
592
593        /* Flush memory queues */
594        asm volatile("sync": : : "memory");
595
596        virtmap = remove_map_at(&mm->virtused, vaddr);
597        if (!virtmap) return;
598
599        /* Remove mappings corresponding to virtmap */
600        for (physmap=mm->mappings; physmap; ) {
601                map *nextmap=physmap->next;
602                if (physmap->base>=virtmap->base
603                    && physmap->base<virtmap->end) {
604                        free_map(remove_map(&mm->mappings, physmap));
605                }
606                physmap=nextmap;
607        }
608
609        vflush(virtmap);
610
611        virtmap->firstpte= MAP_FREE_VIRT;
612        insert_map(&mm->virtavail, virtmap);
613        coalesce_maps(mm->virtavail);
614}
615
616void vunmap(void *vaddr) {
617        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
618        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
619
620        /* Flush memory queues */
621        asm volatile("sync": : : "memory");
622
623        /* vaddr must be within one of the vm areas in use and
624         * then must correspond to one of the physical areas
625         */
626        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
627                if (virtmap->base<=(u_long)vaddr &&
628                    virtmap->end>=(u_long)vaddr) break;
629        }
630        if (!virtmap) return;
631
632        physmap = remove_map_at(&mm->mappings, vaddr);
633        if(!physmap) return;
634        vflush(physmap);
635        free_map(physmap);
636}
637
638int vmap(void *vaddr, u_long p, u_long size) {
639        map *q;
640        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
641
642        size=PAGE_ALIGN(size);
643        if(!size) return 1;
644        /* Check that the requested area fits in one vm image */
645        for (q=mm->virtused; q; q=q->next) {
646                if ((q->base <= (u_long)vaddr) &&
647                    (q->end>=(u_long)vaddr+size -1)) break;
648        }
649        if (!q) return 1;
650        q= alloc_map();
651        if (!q) return 1;
652        q->base = (u_long)vaddr;
653        q->end = (u_long)vaddr+size-1;
654        q->firstpte = p;
655        return insert_map(&mm->mappings, q);
656}
657
658static
659void create_identity_mappings(int type, int attr) {
660        u_long lowpage=ULONG_MAX, highpage;
661        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
662        RESIDUAL * res=bd->residual;
663
664        while((highpage = find_next_zone(res, lowpage, type))) {
665                map *p;
666                lowpage=find_zone_start(res, highpage, type);
667                p=alloc_map();
668                /* Do not map page 0 to catch null pointers */
669                lowpage = lowpage ? lowpage : 1;
670                p->base=lowpage<<PAGE_SHIFT;
671                p->end=(highpage<<PAGE_SHIFT)-1;
672                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
673                insert_map(&mm->mappings, p);
674        }
675}
676
677static inline
678void add_free_map(u_long base, u_long end) {
679        map *q=NULL;
680        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
681
682        if (base<end) q=alloc_map();
683        if (!q) return;
684        q->base=base;
685        q->end=end-1;
686        q->firstpte=MAP_FREE_VIRT;
687        insert_map(&mm->virtavail, q);
688}
689
690static inline
691void create_free_vm(void) {
692        map *p;
693        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
694
695        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
696        for(p=mm->mappings; p; p=p->next) {
697                add_free_map(vaddr, p->base);
698                vaddr=p->end+1;
699        }
700        /* Special end of memory case */
701        if (vaddr) add_free_map(vaddr,0);
702}
703
704/* Memory management initialization.
705 * Set up the mapping lists.
706 */
707
708static inline
709void add_perm_map(u_long start, u_long size) {
710        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
711        map *p=alloc_map();
712        p->base = start;
713        p->end = start + size - 1;
714        p->firstpte = MAP_PERM_PHYS;
715        insert_map(& mm->physperm , p);
716}
717
718void mm_init(u_long image_size)
719{
720        u_long lowpage=ULONG_MAX, highpage;
721        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
722        RESIDUAL * res=bd->residual;
723        int i;
724        map *p;
725
726        /* The checks are simplified by the fact that the image
727         * and stack area are always allocated at the upper end
728         * of a free block.
729         */
730        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
731                lowpage=find_zone_start(res, highpage, BootImage|Free);
732                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
733                     == highpage) {
734                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
735                        add_perm_map((u_long)bd->image, image_size);
736                }
737                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
738                        highpage -= STACK_PAGES;
739                        add_perm_map(highpage<<PAGE_SHIFT,
740                                     STACK_PAGES*PAGE_SIZE);
741                }
742                /* Protect the interrupt handlers that we need ! */
743                if (lowpage<2) lowpage=2;
744                /* Check for the special case of full area! */
745                if (highpage>lowpage) {
746                        p = alloc_map();
747                        p->base = lowpage<<PAGE_SHIFT;
748                        p->end = (highpage<<PAGE_SHIFT)-1;
749                        p->firstpte=MAP_FREE_PHYS;
750                        insert_map(&mm->physavail, p);
751                }
752        }
753
754        /* Allocate the hash table */
755        mm->sdr1=__palloc(0x10000, PA_PERM|16);
756        _write_SDR1((u_long)mm->sdr1);
757        memset(mm->sdr1, 0, 0x10000);
758        mm->hashmask = 0xffc0;
759
760        /* Setup the segment registers as we want them */
761        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
762        /* Create the maps for the physical memory, firwmarecode does not
763         * seem to be necessary. ROM is mapped read-only to reduce the risk
764         * of reprogramming it because it's often Flash and some are
765         * amazingly easy to overwrite.
766         */
767        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
768                                 FirmwareStack, PTE_RAM);
769        create_identity_mappings(SystemROM, PTE_ROM);
770        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
771                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
772
773        create_free_vm();
774
775        /* Install our own MMU and trap handlers. */
776        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
777        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
778        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
779        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
780}
781
782void * salloc(u_long size) {
783        map *p, *q;
784        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
785
786        if (size==0) return NULL;
787
788        size = (size+7)&~7;
789
790        for (p=mm->sallocfree; p; p=p->next) {
791                if (p->base+size <= p->end) break;
792        }
793        if(!p) {
794                void *m;
795                m = __palloc(size, PA_SUBALLOC);
796                p = alloc_map();
797                if (!m && !p) return NULL;
798                p->base = (u_long) m;
799                p->firstpte = MAP_FREE_SUBS;
800                p->end = (u_long)m+PAGE_ALIGN(size)-1;
801                insert_map(&mm->sallocfree, p);
802                coalesce_maps(mm->sallocfree);
803                coalesce_maps(mm->sallocphys);
804        };
805        q=alloc_map();
806        q->base=p->base;
807        q->end=q->base+size-1;
808        q->firstpte=MAP_USED_SUBS;
809        insert_map(&mm->sallocused, q);
810        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
811        else p->base += size;
812        memset((void *)q->base, 0, size);
813        return (void *)q->base;
814}
815
816void sfree(void *p) {
817        map *q;
818        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
819
820        q=remove_map_at(&mm->sallocused, p);
821        if (!q) return;
822        q->firstpte=MAP_FREE_SUBS;
823        insert_map(&mm->sallocfree, q);
824        coalesce_maps(mm->sallocfree);
825}
826
827/* first/last area fit, flags is a power of 2 indicating the required
828 * alignment. The algorithms are stupid because we expect very little
829 * fragmentation of the areas, if any. The unit of allocation is the page.
830 * The allocation is by default performed from higher addresses down,
831 * unless flags&PA_LOW is true.
832 */
833
834void * __palloc(u_long size, int flags)
835{
836        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
837        map *newmap, *frommap, *p, *splitmap=0;
838        map **queue;
839        u_long qflags;
840        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
841
842        /* Asking for a size which is not a multiple of the alignment
843           is likely to be an error. */
844
845        if (size & mask) return NULL;
846        size = PAGE_ALIGN(size);
847        if(!size) return NULL;
848
849        if (flags&PA_SUBALLOC) {
850                queue = &mm->sallocphys;
851                qflags = MAP_SUBS_PHYS;
852        } else if (flags&PA_PERM) {
853                queue = &mm->physperm;
854                qflags = MAP_PERM_PHYS;
855        } else {
856                queue = &mm->physused;
857                qflags = MAP_USED_PHYS;
858        }
859        /* We need to allocate that one now so no two allocations may attempt
860         * to take the same memory simultaneously. Alloc_map_page does
861         * not call back here to avoid infinite recursion in alloc_map.
862         */
863
864        if (mask&PAGE_MASK) {
865                splitmap=alloc_map();
866                if (!splitmap) return NULL;
867        }
868
869        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
870                u_long high = p->end;
871                u_long limit  = ((p->base+mask)&~mask) + size-1;
872                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
873                        frommap = p;
874                        if (flags&PA_LOW) break;
875                }
876        }
877
878        if (!frommap) {
879                if (splitmap) free_map(splitmap);
880                return NULL;
881        }
882
883        newmap=alloc_map();
884
885        if (flags&PA_LOW) {
886                newmap->base = (frommap->base+mask)&~mask;
887        } else {
888                newmap->base = (frommap->end +1 - size) & ~mask;
889        }
890
891        newmap->end = newmap->base+size-1;
892        newmap->firstpte = qflags;
893
894        /* Add a fragment if we don't allocate until the end. */
895
896        if (splitmap) {
897                splitmap->base=newmap->base+size;
898                splitmap->end=frommap->end;
899                splitmap->firstpte= MAP_FREE_PHYS;
900                frommap->end=newmap->base-1;
901        } else if (flags & PA_LOW) {
902                frommap->base=newmap->base+size;
903        } else {
904                frommap->end=newmap->base-1;
905        }
906
907        /* Remove a fragment if it becomes empty. */
908        if (frommap->base == frommap->end+1) {
909                free_map(remove_map(&mm->physavail, frommap));
910        }
911
912        if (splitmap) {
913                if (splitmap->base == splitmap->end+1) {
914                        free_map(remove_map(&mm->physavail, splitmap));
915                } else {
916                        insert_map(&mm->physavail, splitmap);
917                }
918        }
919
920        insert_map(queue, newmap);
921        return (void *) newmap->base;
922
923}
924
925void pfree(void * p) {
926        map *q;
927        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
928        q=remove_map_at(&mm->physused, p);
929        if (!q) return;
930        q->firstpte=MAP_FREE_PHYS;
931        insert_map(&mm->physavail, q);
932        coalesce_maps(mm->physavail);
933}
934
935#ifdef DEBUG
936/* Debugging functions */
937void print_maps(map *chain, const char *s) {
938        map *p;
939        printk("%s",s);
940        for(p=chain; p; p=p->next) {
941                printk("    %08lx-%08lx: %08lx\n",
942                       p->base, p->end, p->firstpte);
943        }
944}
945
946void print_all_maps(const char * s) {
947        u_long freemaps;
948        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
949        map *free;
950        printk("%s",s);
951        print_maps(mm->mappings, "  Currently defined mappings:\n");
952        print_maps(mm->physavail, "  Currently available physical areas:\n");
953        print_maps(mm->physused, "  Currently used physical areas:\n");
954        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
955        print_maps(mm->virtused, "  Currently used virtual areas:\n");
956        print_maps(mm->physperm, "  Permanently used physical areas:\n");
957        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
958        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
959        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
960        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
961        printk("  %ld free maps.\n", freemaps);
962}
963
964void print_hash_table(void) {
965        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
966        hash_entry *p=(hash_entry *) mm->sdr1;
967        u_int i, valid=0;
968        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
969                if (p[i].key<0) valid++;
970        }
971        printk("%u valid hash entries on pass 1.\n", valid);
972        valid = 0;
973        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
974                if (p[i].key<0) valid++;
975        }
976        printk("%u valid hash entries on pass 2.\n"
977               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
978        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
979                if (p[i].key<0) {
980                        u_int pteg=(i>>3);
981                        u_long vpn;
982                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
983                        if (p[i].key&0x40) vpn^=0x3ff;
984                        vpn |= ((p[i].key<<9)&0xffff0000)
985                          | ((p[i].key<<10)&0xfc00);
986                        printk("%08lx:%08lx, %s, %5d.%d\n",
987                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
988                               pteg, i%8);
989                }
990        }
991}
992
993#endif
Note: See TracBrowser for help on using the repository browser.