From de6ed629803db0ba9172f0e0f5a36dcc9e156219 Mon Sep 17 00:00:00 2001 From: Jaroslaw Stelter Date: Tue, 12 Dec 2023 12:26:49 +0100 Subject: [PATCH] zephyr: lib: alloc: Use cached memory for L3 Heap This patch implements recommended hardware flow for Intel ACE platforms. The L3 heap should be accessed via cached pointers including management data. Signed-off-by: Jaroslaw Stelter --- zephyr/lib/alloc.c | 97 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 6 deletions(-) diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 4c4baa325e6e..6c63f6ce2330 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -116,8 +116,8 @@ static inline uintptr_t get_l3_heap_start(void) * - main_fw_load_offset * - main fw size in manifest */ - return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *) - ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); + return (uintptr_t)((__sparse_force void __sparse_cache *) + ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); } /** @@ -145,14 +145,89 @@ static bool is_l3_heap_pointer(void *ptr) uintptr_t l3_heap_start = get_l3_heap_start(); uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size(); - if (is_cached(ptr)) - ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr); - if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end)) return true; return false; } + +/** + * Flush cached L3 heap management data. + */ +static inline void l3_heap_flush(struct k_heap *h) +{ + if (h == &l3_heap) + sys_cache_data_flush_and_invd_range((void *)get_l3_heap_start(), + get_l3_heap_size()); +} + +/** + * Invalidate cached L3 heap management data. + */ +static inline void l3_heap_invalidate(struct k_heap *h) +{ + if (h == &l3_heap) + sys_cache_data_invd_range((void *)get_l3_heap_start(), + get_l3_heap_size()); +} + +static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) +{ + k_spinlock_key_t key; + void *ret; +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + struct sys_memory_stats stats; +#endif + + key = k_spin_lock(&h->lock); + l3_heap_invalidate(h); + ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); + l3_heap_flush(h); + k_spin_unlock(&h->lock, key); + +#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 + sys_heap_runtime_stats_get(&h->heap, &stats); + tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", + stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); +#endif + + return ret; +} + +static void __sparse_cache *l3_heap_alloc_aligned_cached(struct k_heap *h, + size_t min_align, size_t bytes) +{ + void __sparse_cache *ptr; + + /* + * Zephyr sys_heap stores metadata at start of each + * heap allocation. To ensure no allocated cached buffer + * overlaps the same cacheline with the metadata chunk, + * align both allocation start and size of allocation + * to cacheline. As cached and non-cached allocations are + * mixed, same rules need to be followed for both type of + * allocations. + */ + min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align); + bytes = ALIGN_UP(bytes, min_align); + + ptr = (__sparse_force void __sparse_cache *)l3_heap_alloc_aligned(h, min_align, bytes); + + if (ptr) + ptr = z_soc_cached_ptr((__sparse_force void *)ptr); + + return ptr; +} + +static void l3_heap_free(struct k_heap *h, void *mem) +{ + k_spinlock_key_t key = k_spin_lock(&h->lock); + sys_cache_data_flush_and_invd_range(mem, get_l3_heap_size()); + sys_heap_free(&h->heap, mem); + l3_heap_flush(h); + k_spin_unlock(&h->lock, key); +} + #endif static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) @@ -251,6 +326,15 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) if (caps & SOF_MEM_CAPS_L3) { #if CONFIG_L3_HEAP heap = &l3_heap; + /* Uncached L3_HEAP should be not used */ + if (!zone_is_cached(zone)) + k_panic(); + ptr = (__sparse_force void *)l3_heap_alloc_aligned_cached(heap, 0, bytes); + + if (!ptr && zone == SOF_MEM_ZONE_SYS) + k_panic(); + + return ptr; #else k_panic(); #endif @@ -352,7 +436,7 @@ void rfree(void *ptr) #if CONFIG_L3_HEAP if (is_l3_heap_pointer(ptr)) { - heap_free(&l3_heap, ptr); + l3_heap_free(&l3_heap, ptr); return; } #endif @@ -367,6 +451,7 @@ static int heap_init(void) #if CONFIG_L3_HEAP sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size()); + l3_heap_flush(&l3_heap); #endif return 0;