From 3b97c0a114bdc8afe9a2d37802330297e4a2a803 Mon Sep 17 00:00:00 2001 From: Jaroslaw Stelter Date: Tue, 12 Dec 2023 12:26:49 +0100 Subject: [PATCH 1/2] zephyr: lib: alloc: Use cached memory for L3 Heap This patch implements recommended hardware flow for Intel ACE platforms. The L3 heap should be accessed via cached pointers including management data. Signed-off-by: Jaroslaw Stelter --- zephyr/lib/alloc.c | 76 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 8 deletions(-) diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 4c4baa325e6e..54317f2d02ec 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -116,8 +116,7 @@ static inline uintptr_t get_l3_heap_start(void) * - main_fw_load_offset * - main fw size in manifest */ - return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *) - ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); + return (uintptr_t)(ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); } /** @@ -145,14 +144,50 @@ static bool is_l3_heap_pointer(void *ptr) uintptr_t l3_heap_start = get_l3_heap_start(); uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size(); - if (is_cached(ptr)) - ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr); - if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end)) return true; return false; } + +static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) +{ + k_spinlock_key_t key; + void *ret; +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + struct sys_memory_stats stats; +#endif + if (!cpu_is_primary(arch_proc_id())) { + tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + return NULL; + } + + key = k_spin_lock(&h->lock); + ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); + k_spin_unlock(&h->lock, key); + +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + sys_heap_runtime_stats_get(&h->heap, &stats); + tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", + stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); +#endif + + return ret; +} + +static void l3_heap_free(struct k_heap *h, void *mem) +{ + if (!cpu_is_primary(arch_proc_id())) { + tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + return; + } + + k_spinlock_key_t key = k_spin_lock(&h->lock); + + sys_heap_free(&h->heap, mem); + k_spin_unlock(&h->lock, key); +} + #endif static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) @@ -251,6 +286,17 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) if (caps & SOF_MEM_CAPS_L3) { #if CONFIG_L3_HEAP heap = &l3_heap; + /* Uncached L3_HEAP should be not used */ + if (!zone_is_cached(zone)) { + tr_err(&zephyr_tr, "L3_HEAP available for cached zones only!"); + return NULL; + } + ptr = (__sparse_force void *)l3_heap_alloc_aligned(heap, 0, bytes); + + if (!ptr && zone == SOF_MEM_ZONE_SYS) + k_panic(); + + return ptr; #else k_panic(); #endif @@ -335,10 +381,24 @@ EXPORT_SYMBOL(rzalloc); void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, uint32_t align) { + struct k_heap *heap; + + /* choose a heap */ + if (caps & SOF_MEM_CAPS_L3) { +#if CONFIG_L3_HEAP + heap = &l3_heap; + return (__sparse_force void *)l3_heap_alloc_aligned(heap, align, bytes); +#else + k_panic(); +#endif + } else { + heap = &sof_heap; + } + if (flags & SOF_MEM_FLAG_COHERENT) - return heap_alloc_aligned(&sof_heap, align, bytes); + return heap_alloc_aligned(heap, align, bytes); - return (__sparse_force void *)heap_alloc_aligned_cached(&sof_heap, align, bytes); + return (__sparse_force void *)heap_alloc_aligned_cached(heap, align, bytes); } EXPORT_SYMBOL(rballoc_align); @@ -352,7 +412,7 @@ void rfree(void *ptr) #if CONFIG_L3_HEAP if (is_l3_heap_pointer(ptr)) { - heap_free(&l3_heap, ptr); + l3_heap_free(&l3_heap, ptr); return; } #endif From f3fda2105a2e60b457a74f0d14078f8dfb4f5a87 Mon Sep 17 00:00:00 2001 From: Jaroslaw Stelter Date: Wed, 31 Jan 2024 11:24:55 +0100 Subject: [PATCH 2/2] lib: alloc: Use aligned allocation for L3_HEAP usage. L3_HEAP is used in library manager for library storage buffer allocation and in D3 enter/exit flows to allocate IMR context storage buffer. Both buffers should be aligned so use rballoc_align() routine to get correctly aligned buffers. Signed-off-by: Jaroslaw Stelter --- src/library_manager/lib_manager.c | 8 +++----- zephyr/lib/cpu.c | 14 ++++++++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/library_manager/lib_manager.c b/src/library_manager/lib_manager.c index a9cd472421c4..0fd6bed40020 100644 --- a/src/library_manager/lib_manager.c +++ b/src/library_manager/lib_manager.c @@ -539,16 +539,14 @@ static void __sparse_cache *lib_manager_allocate_store_mem(uint32_t size, void __sparse_cache *local_add; #if CONFIG_L3_HEAP uint32_t caps = SOF_MEM_CAPS_L3 | SOF_MEM_CAPS_DMA; - - /* allocate new buffer: cached alias */ - local_add = (__sparse_force void __sparse_cache *)rmalloc(SOF_MEM_ZONE_SYS, 0, caps, size); #else - uint32_t addr_align = PAGE_SZ; uint32_t caps = SOF_MEM_CAPS_DMA; +#endif + uint32_t addr_align = PAGE_SZ; /* allocate new buffer: cached alias */ local_add = (__sparse_force void __sparse_cache *)rballoc_align(0, caps, size, addr_align); -#endif + if (!local_add) { tr_err(&lib_manager_tr, "lib_manager_allocate_store_mem(): alloc failed"); return NULL; diff --git a/zephyr/lib/cpu.c b/zephyr/lib/cpu.c index 3dde19dc38bc..87ca92b9d36f 100644 --- a/zephyr/lib/cpu.c +++ b/zephyr/lib/cpu.c @@ -96,10 +96,16 @@ void cpu_notify_state_entry(enum pm_state state) storage_buffer_size += LP_SRAM_SIZE; /* allocate IMR buffer and store it in the global pointer */ - global_imr_ram_storage = rmalloc(SOF_MEM_ZONE_SYS_RUNTIME, - 0, - SOF_MEM_CAPS_L3, - storage_buffer_size); + global_imr_ram_storage = rballoc_align(0, SOF_MEM_CAPS_L3, + storage_buffer_size, + PLATFORM_DCACHE_ALIGN); + + /* If no IMR buffer we can not recover */ + if (!global_imr_ram_storage) { + tr_err(&zephyr_tr, "failed to allocate global_imr_ram_storage"); + k_panic(); + } + #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */ } }