From a8ec3be3c063370dd9381ff4b48b0850eddf4928 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 9 Jan 2025 15:41:47 +0200 Subject: [PATCH] core: introduce a system work queue, use it for EDF Currently the EDF scheduler under Zephyr defines a delayed work queue to be run on the primary core. However such a work queue will be useful to multiple subsystems. Extract it from the EDF scheduler and make it globally available. Signed-off-by: Guennadi Liakhovetski --- zephyr/CMakeLists.txt | 1 + zephyr/Kconfig | 6 ++++++ zephyr/edf_schedule.c | 26 +++----------------------- zephyr/include/sof/lib/cpu.h | 5 +++++ zephyr/lib/primary.c | 36 ++++++++++++++++++++++++++++++++++++ 5 files changed, 51 insertions(+), 23 deletions(-) create mode 100644 zephyr/lib/primary.c diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index a49a5b0ebcbb..46adcb5454fb 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -528,6 +528,7 @@ zephyr_library_sources( lib/alloc.c lib/cpu.c lib/pm_runtime.c + lib/primary.c # Common library functions - Will be moved to Zephyr over time lib.c diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 803d668857f3..957b92aeb2c1 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -89,4 +89,10 @@ config VIRTUAL_HEAP help Enabling this option will use the virtual memory heap allocator to allocate buffers. It is based on a set of buffers whose size is predetermined. + +config SOF_PRIMARY_WQ_STACK_SZ + int "Primary work queue stack size" + default 8192 + help + Stack size for the primary core system work queue. endif diff --git a/zephyr/edf_schedule.c b/zephyr/edf_schedule.c index ca196842cb1a..51b8e9356633 100644 --- a/zephyr/edf_schedule.c +++ b/zephyr/edf_schedule.c @@ -13,9 +13,6 @@ #include #include -static struct k_work_q edf_workq; -static K_THREAD_STACK_DEFINE(edf_workq_stack, 8192); - /* * since only IPC is using the EDF scheduler - we schedule the work in the * next timer_domain time slice @@ -39,9 +36,7 @@ static void edf_work_handler(struct k_work *work) if (deadline > now) timeout = K_TICKS(deadline - now); - k_work_reschedule_for_queue(&edf_workq, - &task->z_delayed_work, - timeout); + sof_primary_wq_reschedule(&task->z_delayed_work, timeout); task->state = SOF_TASK_STATE_QUEUED; } else { task_complete(task); @@ -56,9 +51,7 @@ static int schedule_edf_task(void *data, struct task *task, uint64_t start, /* start time is microseconds from now */ k_timeout_t start_time = K_USEC(start + EDF_SCHEDULE_DELAY); - k_work_reschedule_for_queue(&edf_workq, - &task->z_delayed_work, - start_time); + sof_primary_wq_reschedule(&task->z_delayed_work, start_time); task->state = SOF_TASK_STATE_QUEUED; return 0; @@ -100,22 +93,9 @@ static struct scheduler_ops schedule_edf_ops = { int scheduler_init_edf(void) { - struct k_thread *thread = &edf_workq.thread; - scheduler_init(SOF_SCHEDULE_EDF, &schedule_edf_ops, NULL); - k_work_queue_start(&edf_workq, - edf_workq_stack, - K_THREAD_STACK_SIZEOF(edf_workq_stack), - EDF_ZEPHYR_PRIORITY, NULL); - - k_thread_suspend(thread); - - k_thread_cpu_mask_clear(thread); - k_thread_cpu_mask_enable(thread, PLATFORM_PRIMARY_CORE_ID); - k_thread_name_set(thread, "edf_workq"); - - k_thread_resume(thread); + sof_primary_wq_init(); return 0; } diff --git a/zephyr/include/sof/lib/cpu.h b/zephyr/include/sof/lib/cpu.h index c23405e85121..632ba5ac5cd3 100644 --- a/zephyr/include/sof/lib/cpu.h +++ b/zephyr/include/sof/lib/cpu.h @@ -93,6 +93,11 @@ static inline int cpu_secondary_cores_prepare_d0ix(void) { return 0; }; #endif /* CONFIG_MULTICORE && CONFIG_SMP */ +#include +struct k_work_delayable; +void sof_primary_wq_init(void); +void sof_primary_wq_reschedule(struct k_work_delayable *dwork, k_timeout_t delay); + #endif #endif /* __SOF_LIB_CPU_H__ */ diff --git a/zephyr/lib/primary.c b/zephyr/lib/primary.c new file mode 100644 index 000000000000..49ece45f2e6d --- /dev/null +++ b/zephyr/lib/primary.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2025 Intel Corporation. + */ + +#include +#include + +#include + +static struct k_work_q sof_prime_wq; +static K_THREAD_STACK_DEFINE(sof_prime_wq_stack, CONFIG_SOF_PRIMARY_WQ_STACK_SZ); +static atomic_val_t sof_prime_wq_init; + +void sof_primary_wq_reschedule(struct k_work_delayable *dwork, k_timeout_t delay) +{ + k_work_reschedule_for_queue(&sof_prime_wq, dwork, delay); +} + +void sof_primary_wq_init(void) +{ + if (atomic_set(&sof_prime_wq_init, 1)) + return; + + struct k_thread *thread = &sof_prime_wq.thread; + + k_work_queue_start(&sof_prime_wq, sof_prime_wq_stack, + K_THREAD_STACK_SIZEOF(sof_prime_wq_stack), 1, NULL); + + k_thread_suspend(thread); + + k_thread_cpu_pin(thread, PLATFORM_PRIMARY_CORE_ID); + k_thread_name_set(thread, "sof_prime_wq"); + + k_thread_resume(thread); +}