source: rtems/c/src/lib/libcpu/i386/page.c @ 0c0181d

4.115
Last change on this file since 0c0181d was b01abd58, checked in by Ralf Corsepius <ralf.corsepius@…>, on 10/23/11 at 06:40:09

2011-10-23 Ralf Corsépius <ralf.corsepius@…>

  • page.c: Remove unused var "dirCount".
  • Property mode set to 100644
File size: 13.3 KB
Line 
1/*
2 * $Id$
3 *
4 * page.c :- This file contains implementation of C function to
5 *           Instanciate paging. More detailled information
6 *           can be found on Intel site and more precisely in
7 *           the following book :
8 *
9 *              Pentium Processor familly
10 *              Developper's Manual
11 *
12 *              Volume 3 : Architecture and Programming Manual
13 *
14 * Copyright (C) 1999  Emmanuel Raguet (raguet@crf.canon.fr)
15 *                     Canon Centre Recherche France.
16 *
17 *  The license and distribution terms for this file may be
18 *  found in the file LICENSE in this distribution or at
19 *  http://www.rtems.com/license/LICENSE.
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <rtems.h>
26#include <libcpu/cpu.h>
27
28#define MEMORY_SIZE 0x4000000           /* 64Mo */
29
30static int directoryEntry=0;
31static int tableEntry=0;
32static page_directory *pageDirectory;
33
34extern uint32_t   bsp_mem_size;
35
36/*************************************************************************/
37/************** IT IS A ONE-TO-ONE TRANSLATION ***************************/
38/*************************************************************************/
39
40
41/*
42 * Disable the paging
43 */
44void _CPU_disable_paging(void)
45{
46  unsigned int regCr0;
47
48  rtems_cache_flush_entire_data();
49  regCr0 = i386_get_cr0();
50  regCr0 &= ~(CR0_PAGING);
51  i386_set_cr0( regCr0 );
52}
53
54/*
55 * Enable the paging
56 */
57void _CPU_enable_paging(void)
58{
59  unsigned int regCr0;
60
61  regCr0 = i386_get_cr0();
62  regCr0 |= CR0_PAGING;
63  i386_set_cr0( regCr0 );
64  rtems_cache_flush_entire_data();
65}
66
67
68/*
69 * Initialize the paging with 1-to-1 mapping
70 */
71
72int init_paging(void)
73{
74  int nbPages;
75  int nbInitPages;
76  char *Tables;
77  unsigned int regCr3;
78  page_table *pageTable;
79  unsigned int physPage;
80  int nbTables=0;
81
82  nbPages = ( (bsp_mem_size - 1) / PG_SIZE ) + 1;
83  nbTables = ( (bsp_mem_size - 1) / FOUR_MB ) + 2;
84
85  /* allocate 1 page more to page alignement */
86  Tables = (char *)malloc( (nbTables + 1)*sizeof(page_table) );
87  if ( Tables == NULL ){
88    return -1; /*unable to allocate memory */
89  }
90
91  /* 4K-page alignement */
92  Tables += (PG_SIZE - (int)Tables) & 0xFFF;
93
94  /* Reset Tables */
95  memset( Tables, 0, nbTables*sizeof(page_table) );
96  pageDirectory = (page_directory *) Tables;
97  pageTable     = (page_table *)((int)Tables + PG_SIZE);
98
99  nbInitPages = 0;
100  directoryEntry = 0;
101  tableEntry = 0;
102  physPage = 0;
103
104  while ( nbInitPages != nbPages ){
105    if ( tableEntry == 0 ){
106      pageDirectory->pageDirEntry[directoryEntry].bits.page_frame_address = (unsigned int)pageTable >> 12;
107      pageDirectory->pageDirEntry[directoryEntry].bits.available      = 0;
108      pageDirectory->pageDirEntry[directoryEntry].bits.page_size      = 0;
109      pageDirectory->pageDirEntry[directoryEntry].bits.accessed       = 0;
110      pageDirectory->pageDirEntry[directoryEntry].bits.cache_disable  = 0;
111      pageDirectory->pageDirEntry[directoryEntry].bits.write_through  = 0;
112      pageDirectory->pageDirEntry[directoryEntry].bits.user           = 1;
113      pageDirectory->pageDirEntry[directoryEntry].bits.writable       = 1;
114      pageDirectory->pageDirEntry[directoryEntry].bits.present        = 1;
115    }
116    pageTable->pageTableEntry[tableEntry].bits.page_frame_address = physPage;
117    pageTable->pageTableEntry[tableEntry].bits.available      = 0;
118    pageTable->pageTableEntry[tableEntry].bits.dirty          = 0;
119    pageTable->pageTableEntry[tableEntry].bits.accessed       = 0;
120    pageTable->pageTableEntry[tableEntry].bits.cache_disable  = 0;
121    pageTable->pageTableEntry[tableEntry].bits.write_through  = 0;
122    pageTable->pageTableEntry[tableEntry].bits.user           = 1;
123    pageTable->pageTableEntry[tableEntry].bits.writable       = 1;
124    pageTable->pageTableEntry[tableEntry].bits.present        = 1;
125
126    physPage ++;
127    tableEntry ++;
128
129    if (tableEntry >= MAX_ENTRY){
130      tableEntry = 0;
131      directoryEntry ++;
132      pageTable ++;
133    }
134
135    nbInitPages++;
136  }
137
138  regCr3 &= ~(CR3_PAGE_WRITE_THROUGH);
139  regCr3 &= ~(CR3_PAGE_CACHE_DISABLE);
140  /*regCr3.cr3.page_directory_base    = (unsigned int)pageDirectory >> 12;*/
141  regCr3 = (unsigned int)pageDirectory & CR3_PAGE_DIRECTORY_MASK;
142
143  i386_set_cr3( regCr3 );
144
145  _CPU_enable_cache();
146  _CPU_enable_paging();
147
148  return 0;
149}
150
151/*
152 * Is cache enable
153 */
154int  _CPU_is_cache_enabled(void)
155{
156  unsigned int regCr0;
157
158  regCr0 = i386_get_cr0();
159  return( ~(regCr0 & CR0_PAGE_LEVEL_CACHE_DISABLE) );
160}
161
162/*
163 * Is paging enable
164 */
165int  _CPU_is_paging_enabled(void)
166{
167  unsigned int regCr0;
168
169  regCr0 = i386_get_cr0();
170  return(regCr0 & CR0_PAGING);
171}
172
173
174/*
175 * Translate the physical address in the virtual space and return
176 * the translated address in mappedAddress
177 */
178
179int _CPU_map_phys_address(
180  void **mappedAddress,
181  void  *physAddress,
182  int    size,
183  int    flag
184)
185{
186  page_table *localPageTable;
187  unsigned int lastAddress, countAddress;
188  char *Tables;
189  linear_address virtualAddress;
190  unsigned char pagingWasEnabled;
191
192  pagingWasEnabled = 0;
193
194  if (_CPU_is_paging_enabled()){
195    pagingWasEnabled = 1;
196    _CPU_disable_paging();
197  }
198
199  countAddress = (unsigned int)physAddress;
200  lastAddress = (unsigned int)physAddress + (size - 1);
201  virtualAddress.address = 0;
202
203  while (1) {
204
205    if ((countAddress & ~MASK_OFFSET) > (lastAddress & ~MASK_OFFSET))
206      break;
207
208    /* Need to allocate a new page table */
209    if (pageDirectory->pageDirEntry[directoryEntry].bits.page_frame_address == 0){
210      /* We allocate 2 pages to perform 4k-page alignement */
211      Tables = (char *)malloc(2*sizeof(page_table));
212      if ( Tables == NULL ){
213        if (pagingWasEnabled)
214          _CPU_enable_paging();
215        return -1; /* unable to allocate memory */
216      }
217      /* 4K-page alignement */
218      Tables += (PG_SIZE - (int)Tables) & 0xFFF;
219
220      /* Reset Table */
221      memset( Tables, 0, sizeof(page_table) );
222      pageDirectory->pageDirEntry[directoryEntry].bits.page_frame_address =
223        (unsigned int)Tables >> 12;
224      pageDirectory->pageDirEntry[directoryEntry].bits.available      = 0;
225      pageDirectory->pageDirEntry[directoryEntry].bits.page_size      = 0;
226      pageDirectory->pageDirEntry[directoryEntry].bits.accessed       = 0;
227      pageDirectory->pageDirEntry[directoryEntry].bits.cache_disable  = 0;
228      pageDirectory->pageDirEntry[directoryEntry].bits.write_through  = 0;
229      pageDirectory->pageDirEntry[directoryEntry].bits.user           = 1;
230      pageDirectory->pageDirEntry[directoryEntry].bits.writable       = 1;
231      pageDirectory->pageDirEntry[directoryEntry].bits.present        = 1;
232    }
233
234
235    localPageTable = (page_table *)(pageDirectory->
236                                    pageDirEntry[directoryEntry].bits.
237                                    page_frame_address << 12);
238
239    if (virtualAddress.address == 0){
240      virtualAddress.bits.directory = directoryEntry;
241      virtualAddress.bits.page      = tableEntry;
242      virtualAddress.bits.offset    = (unsigned int)physAddress & MASK_OFFSET;
243    }
244
245    localPageTable->pageTableEntry[tableEntry].bits.page_frame_address =
246      ((unsigned int)countAddress & ~MASK_OFFSET) >> 12;
247    localPageTable->pageTableEntry[tableEntry].bits.available      = 0;
248    localPageTable->pageTableEntry[tableEntry].bits.dirty          = 0;
249    localPageTable->pageTableEntry[tableEntry].bits.accessed       = 0;
250    localPageTable->pageTableEntry[tableEntry].bits.cache_disable  = 0;
251    localPageTable->pageTableEntry[tableEntry].bits.write_through  = 0;
252    localPageTable->pageTableEntry[tableEntry].bits.user           = 1;
253    localPageTable->pageTableEntry[tableEntry].bits.writable       = 0;
254    localPageTable->pageTableEntry[tableEntry].bits.present        = 1;
255
256    localPageTable->pageTableEntry[tableEntry].table_entry |= flag ;
257
258    countAddress += PG_SIZE;
259    tableEntry++;
260    if (tableEntry >= MAX_ENTRY){
261      tableEntry = 0;
262      directoryEntry++;
263    }
264  }
265
266  if (mappedAddress != 0)
267    *mappedAddress = (void *)(virtualAddress.address);
268  if (pagingWasEnabled)
269    _CPU_enable_paging();
270  return 0;
271}
272
273/*
274 * "Compress" the Directory and Page tables to avoid
275 * important loss of address range
276 */
277static void Paging_Table_Compress(void)
278{
279  unsigned int dirCount, pageCount;
280  page_table *localPageTable;
281
282  if (tableEntry == 0){
283    dirCount  = directoryEntry - 1;
284    pageCount = MAX_ENTRY - 1;
285  }
286  else {
287    dirCount  = directoryEntry;
288    pageCount = tableEntry - 1;
289  }
290
291  while (1){
292
293    localPageTable = (page_table *)(pageDirectory->
294                                    pageDirEntry[dirCount].bits.
295                                    page_frame_address << 12);
296
297    if (localPageTable->pageTableEntry[pageCount].bits.present == 1){
298      pageCount++;
299      if (pageCount >= MAX_ENTRY){
300        pageCount = 0;
301        dirCount++;
302      }
303      break;
304    }
305
306
307    if (pageCount == 0) {
308      if (dirCount == 0){
309        break;
310      }
311      else {
312        pageCount = MAX_ENTRY - 1;
313        dirCount-- ;
314      }
315    }
316    else
317      pageCount-- ;
318  }
319
320  directoryEntry = dirCount;
321  tableEntry = pageCount;
322}
323
324
325/*
326 * Unmap the virtual address from the tables
327 * (we do not deallocate the table already allocated)
328 */
329
330int _CPU_unmap_virt_address(
331  void *mappedAddress,
332  int   size
333)
334{
335
336  linear_address linearAddr;
337  page_table *localPageTable;
338  unsigned int lastAddr ;
339  unsigned char pagingWasEnabled;
340
341  pagingWasEnabled = 0;
342
343  if (_CPU_is_paging_enabled()){
344    pagingWasEnabled = 1;
345    _CPU_disable_paging();
346  }
347
348  linearAddr.address = (unsigned int)mappedAddress;
349  lastAddr = (unsigned int)mappedAddress + (size - 1);
350
351  while (1){
352
353    if ((linearAddr.address & ~MASK_OFFSET) > (lastAddr & ~MASK_OFFSET))
354      break;
355
356    if (pageDirectory->pageDirEntry[linearAddr.bits.directory].bits.present == 0){
357      if (pagingWasEnabled)
358        _CPU_enable_paging();
359      return -1;
360    }
361
362    localPageTable = (page_table *)(pageDirectory->
363                                    pageDirEntry[linearAddr.bits.directory].bits.
364                                    page_frame_address << 12);
365
366    if (localPageTable->pageTableEntry[linearAddr.bits.page].bits.present == 0){
367      if (pagingWasEnabled)
368        _CPU_enable_paging();
369      return -1;
370    }
371
372    localPageTable->pageTableEntry[linearAddr.bits.page].bits.present = 0;
373
374    linearAddr.address += PG_SIZE ;
375  }
376  Paging_Table_Compress();
377  if (pagingWasEnabled)
378    _CPU_enable_paging();
379
380  return 0;
381}
382
383/*
384 * Modify the flags PRESENT, WRITABLE, USER, WRITE_TROUGH, CACHE_DISABLE
385 * of the page's descriptor.
386 */
387
388int _CPU_change_memory_mapping_attribute(
389  void         **newAddress,
390  void          *mappedAddress,
391  unsigned int   size,
392  unsigned int   flag
393)
394{
395
396  linear_address linearAddr;
397  page_table *localPageTable;
398  unsigned int lastAddr ;
399  unsigned char pagingWasEnabled;
400
401  pagingWasEnabled = 0;
402
403  if (_CPU_is_paging_enabled()){
404    pagingWasEnabled = 1;
405    _CPU_disable_paging();
406  }
407
408  linearAddr.address  = (unsigned int)mappedAddress;
409  lastAddr = (unsigned int)mappedAddress + (size - 1);
410
411  while (1){
412
413    if ((linearAddr.address & ~MASK_OFFSET) > (lastAddr & ~MASK_OFFSET))
414      break;
415
416    if (pageDirectory->pageDirEntry[linearAddr.bits.directory].bits.present == 0){
417      if (pagingWasEnabled)
418        _CPU_enable_paging();
419      return -1;
420    }
421    localPageTable = (page_table *)(pageDirectory->
422                                    pageDirEntry[linearAddr.bits.directory].bits.
423                                    page_frame_address << 12);
424
425    if (localPageTable->pageTableEntry[linearAddr.bits.page].bits.present == 0){
426      if (pagingWasEnabled)
427        _CPU_enable_paging();
428      return -1;
429    }
430
431    localPageTable->pageTableEntry[linearAddr.bits.page].table_entry &= ~MASK_FLAGS ;
432    localPageTable->pageTableEntry[linearAddr.bits.page].table_entry |= flag ;
433
434    linearAddr.address += PG_SIZE ;
435  }
436
437  if (newAddress != NULL)
438    *newAddress = mappedAddress ;
439
440  if (pagingWasEnabled)
441    _CPU_enable_paging();
442
443  return 0;
444}
445
446/*
447 * Display the page descriptor flags
448 * CACHE_DISABLE of the whole memory
449 */
450
451#include <rtems/bspIo.h>
452
453int  _CPU_display_memory_attribute(void)
454{
455  unsigned int dirCount, pageCount;
456  unsigned int regCr0;
457  page_table *localPageTable;
458  unsigned int prevCache;
459  unsigned int prevPresent;
460  unsigned int maxPage;
461  unsigned char pagingWasEnabled;
462
463  regCr0 = i386_get_cr0();
464
465  printk("\n\n********* MEMORY CACHE CONFIGURATION *****\n");
466
467  printk("CR0 -> paging           : %s\n",((regCr0 & CR0_PAGING) ? "ENABLE ":"DISABLE"));
468  printk("       page-level cache : %s\n\n",((regCr0 & CR0_PAGE_LEVEL_CACHE_DISABLE) ? "DISABLE":"ENABLE"));
469
470  if ((regCr0 & CR0_PAGING) == 0)
471    return 0;
472
473  prevPresent = 0;
474  prevCache   = 1;
475
476  pagingWasEnabled = 0;
477
478  if (_CPU_is_paging_enabled()){
479    pagingWasEnabled = 1;
480    _CPU_disable_paging();
481  }
482
483  for (dirCount = 0; dirCount < directoryEntry+1; dirCount++) {
484
485    localPageTable = (page_table *)(pageDirectory->
486                                    pageDirEntry[dirCount].bits.
487                                    page_frame_address << 12);
488
489    maxPage = MAX_ENTRY;
490    /*if ( dirCount == (directoryEntry-1))
491      maxPage = tableEntry;*/
492    for (pageCount = 0; pageCount < maxPage; pageCount++) {
493
494      if (localPageTable->pageTableEntry[pageCount].bits.present != 0){
495        if (prevPresent == 0){
496          prevPresent = 1;
497          printk ("present page from address %x \n", ((dirCount << 22)|(pageCount << 12)));
498        }
499        if (prevCache != localPageTable->pageTableEntry[pageCount].bits.cache_disable ) {
500          prevCache = localPageTable->pageTableEntry[pageCount].
501            bits.cache_disable;
502          printk ("    cache %s from %x <phy %x>\n",
503                  (prevCache ? "DISABLE" : "ENABLE "),
504                  ((dirCount << 22)|(pageCount << 12)),
505                  localPageTable->pageTableEntry[pageCount].bits.page_frame_address << 12);
506        }
507      }
508      else {
509        if (prevPresent == 1){
510          prevPresent = 0;
511          printk ("Absent from %x \n", ((dirCount << 22)|(pageCount << 12)));
512        }
513      }
514    }
515  }
516  if (pagingWasEnabled)
517    _CPU_enable_paging();
518
519  return 0;
520}
Note: See TracBrowser for help on using the repository browser.