source: rtems/bsps/aarch64/include/bsp/aarch64-mmu.h @ 22015c0

Last change on this file since 22015c0 was bfc99a6e, checked in by Chris Johns <chrisj@…>, on 05/26/22 at 06:05:09

bsp/aarch64: Flush the cache before invalidating it

  • Any page tables need to be flushed if the cache is enabled. Disabling the cache may only be available in secure mode.
  • Property mode set to 100644
File size: 11.6 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup aarch64_start
7 *
8 * @brief AArch64 MMU configuration.
9 */
10
11/*
12 * Copyright (C) 2021 On-Line Applications Research Corporation (OAR)
13 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef LIBBSP_AARCH64_SHARED_AARCH64_MMU_H
38#define LIBBSP_AARCH64_SHARED_AARCH64_MMU_H
39
40#include <bsp/start.h>
41#include <bsp/linker-symbols.h>
42#include <rtems/score/aarch64-system-registers.h>
43#include <bspopts.h>
44#include <bsp/utility.h>
45#include <libcpu/mmu-vmsav8-64.h>
46
47#ifdef __cplusplus
48extern "C" {
49#endif /* __cplusplus */
50
51typedef struct {
52  uintptr_t begin;
53  uintptr_t end;
54  uint64_t flags;
55} aarch64_mmu_config_entry;
56
57#define AARCH64_MMU_DEFAULT_SECTIONS \
58  { \
59    .begin = (uintptr_t) bsp_section_fast_text_begin, \
60    .end = (uintptr_t) bsp_section_fast_text_end, \
61    .flags = AARCH64_MMU_CODE_CACHED \
62  }, { \
63    .begin = (uintptr_t) bsp_section_fast_data_begin, \
64    .end = (uintptr_t) bsp_section_fast_data_end, \
65    .flags = AARCH64_MMU_DATA_RW_CACHED \
66  }, { \
67    .begin = (uintptr_t) bsp_section_start_begin, \
68    .end = (uintptr_t) bsp_section_start_end, \
69    .flags = AARCH64_MMU_CODE_CACHED \
70  }, { \
71    .begin = (uintptr_t) bsp_section_vector_begin, \
72    .end = (uintptr_t) bsp_section_vector_end, \
73    .flags = AARCH64_MMU_DATA_RW_CACHED \
74  }, { \
75    .begin = (uintptr_t) bsp_section_text_begin, \
76    .end = (uintptr_t) bsp_section_text_end, \
77    .flags = AARCH64_MMU_CODE_CACHED \
78  }, { \
79    .begin = (uintptr_t) bsp_section_rodata_begin, \
80    .end = (uintptr_t) bsp_section_rodata_end, \
81    .flags = AARCH64_MMU_DATA_RO_CACHED \
82  }, { \
83    .begin = (uintptr_t) bsp_section_data_begin, \
84    .end = (uintptr_t) bsp_section_data_end, \
85    .flags = AARCH64_MMU_DATA_RW_CACHED \
86  }, { \
87    .begin = (uintptr_t) bsp_section_bss_begin, \
88    .end = (uintptr_t) bsp_section_bss_end, \
89    .flags = AARCH64_MMU_DATA_RW_CACHED \
90  }, { \
91    .begin = (uintptr_t) bsp_section_rtemsstack_begin, \
92    .end = (uintptr_t) bsp_section_rtemsstack_end, \
93    .flags = AARCH64_MMU_DATA_RW_CACHED \
94  }, { \
95    .begin = (uintptr_t) bsp_section_work_begin, \
96    .end = (uintptr_t) bsp_section_work_end, \
97    .flags = AARCH64_MMU_DATA_RW_CACHED \
98  }, { \
99    .begin = (uintptr_t) bsp_section_stack_begin, \
100    .end = (uintptr_t) bsp_section_stack_end, \
101    .flags = AARCH64_MMU_DATA_RW_CACHED \
102  }, { \
103    .begin = (uintptr_t) bsp_section_nocache_begin, \
104    .end = (uintptr_t) bsp_section_nocache_end, \
105    .flags = AARCH64_MMU_DEVICE \
106  }, { \
107    .begin = (uintptr_t) bsp_section_nocachenoload_begin, \
108    .end = (uintptr_t) bsp_section_nocachenoload_end, \
109    .flags = AARCH64_MMU_DEVICE \
110  }, { \
111    .begin = (uintptr_t) bsp_translation_table_base, \
112    .end = (uintptr_t) bsp_translation_table_end, \
113    .flags = AARCH64_MMU_DATA_RW_CACHED \
114  }, { \
115/*
116 * The vector table must be in writable and executable memory as it stores both
117 * exception code and the mutable pointer to which it jumps
118 */ \
119    .begin = (uintptr_t) bsp_start_vector_table_begin, \
120    .end = (uintptr_t) bsp_start_vector_table_end, \
121    .flags = AARCH64_MMU_CODE_RW_CACHED \
122  }
123
124/* setup straight mapped block entries */
125BSP_START_TEXT_SECTION static inline void aarch64_mmu_page_table_set_blocks(
126  uint64_t *page_table,
127  uint64_t base,
128  uint32_t bits_offset,
129  uint64_t default_attr
130)
131{
132  uint64_t page_flag = 0;
133
134  if ( bits_offset == MMU_PAGE_BITS ) {
135    page_flag = MMU_DESC_TYPE_PAGE;
136  }
137
138  for ( uint64_t i = 0; i < ( 1 << MMU_BITS_PER_LEVEL ); i++ ) {
139    page_table[i] = base | ( i << bits_offset );
140    page_table[i] |= default_attr | page_flag;
141  }
142}
143
144BSP_START_TEXT_SECTION static inline rtems_status_code
145aarch64_mmu_page_table_alloc( uint64_t **page_table )
146{
147  /* First page table is already in use as TTB0 */
148  static uintptr_t *current_page_table =
149    (uintptr_t *) bsp_translation_table_base;
150
151  current_page_table += MMU_PAGE_SIZE;
152  *page_table = (uint64_t *) current_page_table;
153
154  /* Out of linker-allocated page tables? */
155  uintptr_t consumed_pages = (uintptr_t) current_page_table;
156  consumed_pages -= (uintptr_t) bsp_translation_table_base;
157  consumed_pages /= MMU_PAGE_SIZE;
158
159  if ( consumed_pages > AARCH64_MMU_TRANSLATION_TABLE_PAGES ) {
160    *page_table = NULL;
161    return RTEMS_NO_MEMORY;
162  }
163
164  return RTEMS_SUCCESSFUL;
165}
166
167BSP_START_TEXT_SECTION static inline uintptr_t aarch64_mmu_get_index(
168  uintptr_t root_address,
169  uintptr_t vaddr,
170  uint32_t shift
171)
172{
173  uintptr_t mask = ( 1 << ( MMU_BITS_PER_LEVEL + 1 ) ) - 1;
174
175  return ( ( vaddr - root_address ) >> shift ) & mask;
176}
177
178BSP_START_TEXT_SECTION static inline rtems_status_code
179aarch64_mmu_get_sub_table(
180  uint64_t *page_table_entry,
181  uint64_t **sub_table,
182  uintptr_t physical_root_address,
183  uint32_t shift
184)
185{
186  /* check if the index already has a page table */
187  if ( ( *page_table_entry & MMU_DESC_TYPE_TABLE ) == MMU_DESC_TYPE_TABLE ) {
188    /* extract page table address */
189    uint64_t table_pointer = *page_table_entry & MMU_DESC_PAGE_TABLE_MASK;
190    /* This cast should be safe since the address was inserted in this mode */
191    *sub_table = (uint64_t *) (uintptr_t) table_pointer;
192  } else {
193    /* allocate new page table and set block */
194    rtems_status_code sc = aarch64_mmu_page_table_alloc( sub_table );
195
196    if ( sc != RTEMS_SUCCESSFUL ) {
197      return sc;
198    }
199
200    aarch64_mmu_page_table_set_blocks(
201      *sub_table,
202      physical_root_address,
203      shift - MMU_BITS_PER_LEVEL,
204      *page_table_entry & ~MMU_DESC_PAGE_TABLE_MASK
205    );
206    *page_table_entry = (uintptr_t) *sub_table;
207    *page_table_entry |= MMU_DESC_TYPE_TABLE | MMU_DESC_VALID;
208  }
209
210  return RTEMS_SUCCESSFUL;
211}
212
213BSP_START_TEXT_SECTION static inline rtems_status_code aarch64_mmu_map_block(
214  uint64_t *page_table,
215  uintptr_t root_address,
216  uintptr_t addr,
217  uint64_t size,
218  uint32_t level,
219  uint64_t flags
220)
221{
222  uint32_t shift = ( 2 - level ) * MMU_BITS_PER_LEVEL + MMU_PAGE_BITS;
223  uintptr_t granularity = 1 << shift;
224  uint64_t page_flag = 0;
225
226  if ( level == 2 ) {
227    page_flag = MMU_DESC_TYPE_PAGE;
228  }
229
230  while ( size > 0 ) {
231    uintptr_t index = aarch64_mmu_get_index( root_address, addr, shift );
232    uintptr_t block_bottom = RTEMS_ALIGN_DOWN( addr, granularity );
233    uint64_t chunk_size = granularity;
234
235    /* check for perfect block match */
236    if ( block_bottom == addr ) {
237      if ( size >= chunk_size ) {
238        /* when page_flag is set the last level must be a page descriptor */
239        if ( page_flag || ( page_table[index] & MMU_DESC_TYPE_TABLE ) != MMU_DESC_TYPE_TABLE ) {
240          /* no sub-table, apply block properties */
241          page_table[index] = addr | flags | page_flag;
242          size -= chunk_size;
243          addr += chunk_size;
244          continue;
245        }
246      } else {
247        /* block starts on a boundary, but is short */
248        chunk_size = size;
249
250        /* it isn't possible to go beyond page table level 2 */
251        if ( page_flag ) {
252          /* no sub-table, apply block properties */
253          page_table[index] = addr | flags | page_flag;
254          size -= chunk_size;
255          addr += chunk_size;
256          continue;
257        }
258      }
259    } else {
260      uintptr_t block_top = RTEMS_ALIGN_UP( addr, granularity );
261      chunk_size = block_top - addr;
262
263      if ( chunk_size > size ) {
264        chunk_size = size;
265      }
266    }
267
268    /* Deal with any subtable modification  */
269    uintptr_t new_root_address = root_address + index * granularity;
270    uint64_t *sub_table = NULL;
271    rtems_status_code sc;
272
273    sc = aarch64_mmu_get_sub_table(
274      &page_table[index],
275      &sub_table,
276      new_root_address,
277      shift
278    );
279
280    if ( sc != RTEMS_SUCCESSFUL ) {
281      return sc;
282    }
283
284    sc = aarch64_mmu_map_block(
285      sub_table,
286      new_root_address,
287      addr,
288      chunk_size,
289      level + 1,
290      flags
291    );
292
293    if ( sc != RTEMS_SUCCESSFUL ) {
294      return sc;
295    }
296
297    size -= chunk_size;
298    addr += chunk_size;
299  }
300
301  return RTEMS_SUCCESSFUL;
302}
303
304BSP_START_DATA_SECTION extern const aarch64_mmu_config_entry
305  aarch64_mmu_config_table[];
306
307BSP_START_DATA_SECTION extern const size_t
308  aarch64_mmu_config_table_size;
309
310BSP_START_TEXT_SECTION static inline void
311aarch64_mmu_set_translation_table_entries(
312  uint64_t *ttb,
313  const aarch64_mmu_config_entry *config
314)
315{
316  /* Force alignemnt to 4k page size */
317  uintptr_t begin = RTEMS_ALIGN_DOWN( config->begin, MMU_PAGE_SIZE );
318  uintptr_t end = RTEMS_ALIGN_UP( config->end, MMU_PAGE_SIZE );
319  rtems_status_code sc;
320
321  sc = aarch64_mmu_map_block(
322    ttb,
323    0x0,
324    begin,
325    end - begin,
326    0,
327    config->flags
328  );
329
330  if ( sc != RTEMS_SUCCESSFUL ) {
331    rtems_fatal_error_occurred( sc );
332  }
333}
334
335BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup_translation_table(
336  const aarch64_mmu_config_entry *config_table,
337  size_t config_count
338)
339{
340  size_t i;
341  uint64_t *ttb = (uint64_t *) bsp_translation_table_base;
342
343  aarch64_mmu_page_table_set_blocks(
344    ttb,
345    (uintptr_t) NULL,
346    MMU_TOP_LEVEL_PAGE_BITS,
347    0
348  );
349
350  _AArch64_Write_ttbr0_el1( (uintptr_t) ttb );
351
352  /* Configure entries required for each memory section */
353  for ( i = 0; i < config_count; ++i ) {
354    aarch64_mmu_set_translation_table_entries( ttb, &config_table[i] );
355  }
356}
357
358BSP_START_TEXT_SECTION static inline void
359aarch64_mmu_enable( void )
360{
361  uint64_t sctlr;
362
363  /* CPUECTLR_EL1.SMPEN is already set on ZynqMP and is not writable */
364
365  /* Flush and invalidate cache */
366  rtems_cache_flush_entire_data();
367  rtems_cache_invalidate_entire_data();
368
369  /* Enable MMU and cache */
370  sctlr = _AArch64_Read_sctlr_el1();
371  sctlr |= AARCH64_SCTLR_EL1_I | AARCH64_SCTLR_EL1_C | AARCH64_SCTLR_EL1_M;
372  _AArch64_Write_sctlr_el1( sctlr );
373}
374
375BSP_START_TEXT_SECTION static inline void
376aarch64_mmu_disable( void )
377{
378  uint64_t sctlr;
379
380  /* Enable MMU and cache */
381  sctlr = _AArch64_Read_sctlr_el1();
382  sctlr &= ~(AARCH64_SCTLR_EL1_M);
383  _AArch64_Write_sctlr_el1( sctlr );
384}
385
386BSP_START_TEXT_SECTION static inline void aarch64_mmu_setup( void )
387{
388  /* Set TCR */
389  /* 128GB/36 bits mappable (64-0x1c) */
390  _AArch64_Write_tcr_el1(
391    AARCH64_TCR_EL1_T0SZ( 0x1c ) | AARCH64_TCR_EL1_IRGN0( 0x1 ) |
392    AARCH64_TCR_EL1_ORGN0( 0x1 ) | AARCH64_TCR_EL1_SH0( 0x3 ) | AARCH64_TCR_EL1_TG0( 0x0 )
393  );
394
395  /* Set MAIR */
396  _AArch64_Write_mair_el1(
397    AARCH64_MAIR_EL1_ATTR0( 0x0 ) | AARCH64_MAIR_EL1_ATTR1( 0x4 ) |
398    AARCH64_MAIR_EL1_ATTR2( 0x44 ) | AARCH64_MAIR_EL1_ATTR3( 0xFF )
399  );
400}
401
402#ifdef __cplusplus
403}
404#endif /* __cplusplus */
405
406#endif /* LIBBSP_AARCH64_SHARED_AARCH64_MMU_H */
Note: See TracBrowser for help on using the repository browser.