diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c index 0770bb69f05..f7eb8f411cf 100644 --- a/core/arch/arm/mm/core_mmu_lpae.c +++ b/core/arch/arm/mm/core_mmu_lpae.c @@ -803,8 +803,7 @@ static void core_init_mmu_prtn_tee(struct mmu_partition *prtn, memset(prtn->base_tables, 0, BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE); for (n = 0; n < mem_map->count; n++) - if (!core_mmu_is_dynamic_vaspace(mem_map->map + n)) - core_mmu_map_region(prtn, mem_map->map + n); + core_mmu_map_region(prtn, mem_map->map + n); /* * Primary mapping table is ready at index `get_core_pos()` diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index a2984b4aac9..44726895075 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -781,8 +781,7 @@ void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map) memset(ttb1, 0, L1_TBL_SIZE); for (n = 0; n < mem_map->count; n++) - if (!core_mmu_is_dynamic_vaspace(mem_map->map + n)) - core_mmu_map_region(prtn, mem_map->map + n); + core_mmu_map_region(prtn, mem_map->map + n); } void core_init_mmu(struct memory_map *mem_map) diff --git a/core/arch/riscv/mm/core_mmu_arch.c b/core/arch/riscv/mm/core_mmu_arch.c index a81d153902e..703a4d09038 100644 --- a/core/arch/riscv/mm/core_mmu_arch.c +++ b/core/arch/riscv/mm/core_mmu_arch.c @@ -308,8 +308,7 @@ static void core_init_mmu_prtn_tee(struct mmu_partition *prtn, memset(prtn->pool_pgts, 0, RISCV_MMU_MAX_PGTS * RISCV_MMU_PGT_SIZE); for (n = 0; n < mem_map->count; n++) - if (!core_mmu_is_dynamic_vaspace(mem_map->map + n)) - core_mmu_map_region(prtn, mem_map->map + n); + core_mmu_map_region(prtn, mem_map->map + n); /* * Primary mapping table is ready at index `get_core_pos()` diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index d239cfb0412..0fe5e2aba21 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2002,7 +2002,14 @@ void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm) panic("Page is already mapped"); core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr); - paddr += block_size; + /* + * Dynamic vaspace regions don't have a physical + * address initially but we need to allocate and + * initialize the translation tables now for later + * updates to work properly. + */ + if (paddr) + paddr += block_size; vaddr += block_size; size_left -= block_size;