Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Boot mem handling #7039

Open
wants to merge 24 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
c85ab4c
mk/clang.mk: -Wno-gnu-alignof-expression
jenswi-linaro Sep 13, 2024
d2abc41
core: arm64: increase thread stack size for debug
jenswi-linaro Sep 13, 2024
b4d4eea
core: mm: add vaddr_to_phys()
jenswi-linaro Sep 13, 2024
131e90d
core: remove CORE_MEM_TA_RAM
jenswi-linaro Sep 13, 2024
58c0db4
core: add VCORE_FREE_{PA,SZ,END_PA}
jenswi-linaro Sep 13, 2024
7f674c1
core: mm: allow unmapping VCORE_FREE
jenswi-linaro Sep 13, 2024
85789fc
core: mm: replace MEM_AREA_TA_RAM
jenswi-linaro Sep 13, 2024
ad3a65e
core: mm: unify secure core and TA memory
jenswi-linaro Sep 13, 2024
9b5af7b
core: virt: phys_mem_core_alloc() use both pools
jenswi-linaro Sep 13, 2024
2554780
core: arm: core_mmu_v7.c: increase MAX_XLAT_TABLES by 2
jenswi-linaro Sep 14, 2024
0bbdf91
core: mm: map memory using requested block size
jenswi-linaro Sep 13, 2024
0e1de01
core: arm,pager: make __vcore_init_ro_start follow __vcore_init_rx_end
jenswi-linaro Sep 14, 2024
db65607
core: mm,pager: map remaining physical memory
jenswi-linaro Sep 13, 2024
c0f389f
core: add CFG_BOOT_MEM and boot_mem_*() functions
jenswi-linaro Sep 13, 2024
585fe39
core: arm: add boot_cached_mem_end
jenswi-linaro Sep 20, 2024
8469b37
core: arm: enable CFG_BOOT_MEM unconditionally
jenswi-linaro Sep 13, 2024
8488ae8
core: mm: allocate temporary memory map array
jenswi-linaro Sep 13, 2024
9adee80
core: initialize guest physical memory early
jenswi-linaro Sep 13, 2024
8517692
core: merge core_mmu_init_phys_mem() and core_mmu_init_virtualization()
jenswi-linaro Sep 13, 2024
5cc4816
core: arm: add CFG_NS_VIRTUALIZATION boot log
jenswi-linaro Sep 20, 2024
643bb7c
[fix] core: mm: map memory using requested block size
jenswi-linaro Sep 27, 2024
50d754b
[review] core: mm: replace MEM_AREA_TA_RAM
jenswi-linaro Sep 27, 2024
6eff1c9
[review] core: mm: replace MEM_AREA_TA_RAM
jenswi-linaro Sep 27, 2024
a6a21d1
[review] core: mm: replace MEM_AREA_TA_RAM
jenswi-linaro Sep 27, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/arch/arm/include/kernel/thread_private_arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
#else
#define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
#endif
#if defined(CFG_CORE_SANITIZE_KADDRESS)
#if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(CFG_CORE_DEBUG_CHECK_STACKS)
#define STACK_THREAD_SIZE (10240 + CFG_STACK_THREAD_EXTRA)
#else
#define STACK_THREAD_SIZE (8192 + CFG_STACK_THREAD_EXTRA)
Expand Down
120 changes: 68 additions & 52 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
DECLARE_KEEP_PAGER(sem_cpu_sync);
#endif

/*
* Must not be in .bss since it's initialized and used from assembly before
* .bss is cleared.
*/
vaddr_t boot_cached_mem_end __nex_data = 1;

static unsigned long boot_arg_fdt __nex_bss;
static unsigned long boot_arg_nsec_entry __nex_bss;
static unsigned long boot_arg_pageable_part __nex_bss;
Expand Down Expand Up @@ -381,12 +387,8 @@ static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
void *ptr __unused)
{
switch (map->type) {
case MEM_AREA_TEE_RAM:
case MEM_AREA_TEE_RAM_RW:
case MEM_AREA_NEX_RAM_RO:
case MEM_AREA_NEX_RAM_RW:
case MEM_AREA_TEE_ASAN:
case MEM_AREA_TA_RAM:
case MEM_AREA_SEC_RAM_OVERALL:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Asan memory is still referenced with MEM_AREA_TEE_ASAN memory area ID when CFG_WITH_PAGER is enabled. Won't this switch case need to be supported?

MEM_AREA_TEE_RAM is still used when CFG_CORE_RWDATA_NOEXEC is disabled. IS that an issue? Maybe it's a bit inconsistent to enable CFG_MEMTAG but not CFG_CORE_RWDATA_NOEXEC.

Is is OK here to no more clear memtags for MEM_AREA_TEE_RAM_RW and MEM_AREA_NEX_RAM_RW areas?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Asan memory is still referenced with MEM_AREA_TEE_ASAN memory area ID when CFG_WITH_PAGER is enabled. Won't this switch case need to be supported?

mk/config.mk:999: *** CFG_CORE_SANITIZE_KADDRESS and CFG_MEMTAG are not compatible.  Stop.

I don't think we need to care about MEM_AREA_TEE_ASAN since it's not used with memory tagging enabled.

MEM_AREA_TEE_RAM is still used when CFG_CORE_RWDATA_NOEXEC is disabled. IS that an issue? Maybe it's a bit inconsistent to enable CFG_MEMTAG but not CFG_CORE_RWDATA_NOEXEC.

That shouldn't be a problem, everything should be covered by MEM_AREA_SEC_RAM_OVERALL nonetheless.

Is is OK here to no more clear memtags for MEM_AREA_TEE_RAM_RW and MEM_AREA_NEX_RAM_RW areas?

The same here, is covered by MEM_AREA_SEC_RAM_OVERALL.

However, while looking at logs etc to double-check I noticed:

D/TC:0   dump_mmap_table:923 type SEC_RAM_OVERALL va 0x12100000..0x122fffff pa 0x0e100000..0x0e2fffff size 0x00200000 (pgdir)
...
D/TC:0   mmap_clear_memtag:392 Clearing tags for VA 0x12000000..0x121fffff

We may have this error in more than one place, see commit f01690c ("core: fix mapping init debug trace") for the dump_mmap_table() print. I'll investigate further.

DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
map->va, map->va + map->size - 1);
memtag_set_tags((void *)map->va, map->size, 0);
Expand Down Expand Up @@ -496,7 +498,7 @@ static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
#endif
}

static void init_runtime(unsigned long pageable_part)
static void init_pager_runtime(unsigned long pageable_part)
{
size_t n;
size_t init_size = (size_t)(__init_end - __init_start);
Expand All @@ -521,12 +523,6 @@ static void init_runtime(unsigned long pageable_part)

tmp_hashes = __init_end + embdata->hashes_offset;

init_asan();

/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);

/*
* This needs to be initialized early to support address lookup
* in MEM_AREA_TEE_RAM
Expand All @@ -540,17 +536,17 @@ static void init_runtime(unsigned long pageable_part)
asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);

/*
* Need physical memory pool initialized to be able to allocate
* secure physical memory below.
* The pager is about the be enabled below, eventual temporary boot
* memory allocation must be removed now.
*/
core_mmu_init_phys_mem();
boot_mem_release_tmp_alloc();

carve_out_asan_mem();

mm = nex_phys_mem_ta_alloc(pageable_size);
assert(mm);
paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
pageable_size);
paged_store = phys_to_virt(tee_mm_get_smem(mm),
MEM_AREA_SEC_RAM_OVERALL, pageable_size);
/*
* Load pageable part in the dedicated allocated area:
* - Move pageable non-init part into pageable area. Note bootloader
Expand Down Expand Up @@ -652,27 +648,9 @@ static void init_runtime(unsigned long pageable_part)

print_pager_pool_size();
}
#else

static void init_runtime(unsigned long pageable_part __unused)
#else /*!CFG_WITH_PAGER*/
static void init_pager_runtime(unsigned long pageable_part __unused)
{
init_asan();

/*
* By default whole OP-TEE uses malloc, so we need to initialize
* it early. But, when virtualization is enabled, malloc is used
* only by TEE runtime, so malloc should be initialized later, for
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif

IMSG_RAW("\n");
}
#endif

Expand Down Expand Up @@ -889,10 +867,6 @@ static void update_external_dt(void)

void init_tee_runtime(void)
{
#ifndef CFG_WITH_PAGER
/* Pager initializes TA RAM early */
core_mmu_init_phys_mem();
#endif
/*
* With virtualization we call this function when creating the
* OP-TEE partition instead.
Expand Down Expand Up @@ -923,6 +897,8 @@ void init_tee_runtime(void)

static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
{
vaddr_t va = 0;

thread_init_core_local_stacks();
/*
* Mask asynchronous exceptions before switch to the thread vector
Expand All @@ -938,14 +914,51 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
check_crypto_extensions();

init_asan();

/*
* Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
* set a current thread right now to avoid a chicken-and-egg problem
* (thread_init_boot_thread() sets the current thread but needs
* things set by init_runtime()).
* By default whole OP-TEE uses malloc, so we need to initialize
* it early. But, when virtualization is enabled, malloc is used
* only by TEE runtime, so malloc should be initialized later, for
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
thread_get_core_local()->curr_thread = 0;
init_runtime(pageable_part);
#ifdef CFG_WITH_PAGER
/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
#endif
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif

core_mmu_save_mem_map();
core_mmu_init_phys_mem();
va = boot_mem_release_unused();
if (!IS_ENABLED(CFG_WITH_PAGER)) {
/*
* We must update boot_cached_mem_end to reflect the memory
* just unmapped by boot_mem_release_unused().
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;
}

IMSG_RAW("\n");

if (IS_ENABLED(CFG_WITH_PAGER)) {
/*
* Pager: init_runtime() calls thread_kernel_enable_vfp()
* so we must set a current thread right now to avoid a
* chicken-and-egg problem (thread_init_boot_thread() sets
* the current thread but needs things set by
* init_runtime()).
*/
thread_get_core_local()->curr_thread = 0;
init_pager_runtime(pageable_part);
}

if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
/*
Expand Down Expand Up @@ -1010,6 +1023,10 @@ void __weak boot_init_primary_late(unsigned long fdt __unused,
#ifdef CFG_CORE_ASLR
DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
(unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
#endif
#ifdef CFG_NS_VIRTUALIZATION
DMSG("NS-virtualization enabled, supporting %u guests",
CFG_VIRT_GUEST_COUNT);
#endif
if (IS_ENABLED(CFG_MEMTAG))
DMSG("Memory tagging %s",
Expand All @@ -1026,12 +1043,8 @@ void __weak boot_init_primary_late(unsigned long fdt __unused,

boot_primary_init_intc();
init_vfp_nsec();
if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
IMSG("Initializing virtualization support");
core_mmu_init_virtualization();
} else {
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
init_tee_runtime();
}
}

/*
Expand All @@ -1040,6 +1053,9 @@ void __weak boot_init_primary_late(unsigned long fdt __unused,
*/
void __weak boot_init_primary_final(void)
{
if (!IS_ENABLED(CFG_WITH_PAGER))
boot_mem_release_tmp_alloc();

if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
call_driver_initcalls();
call_finalcalls();
Expand Down
Loading
Loading