88#include <sof/audio/component.h>
99#include <sof/audio/module_adapter/module/generic.h>
1010#include <rtos/task.h>
11+ #include <rtos/userspace_helper.h>
1112#include <stdint.h>
1213#include <sof/schedule/dp_schedule.h>
1314#include <sof/schedule/ll_schedule.h>
1718#include <rtos/interrupt.h>
1819#include <zephyr/kernel.h>
1920#include <zephyr/sys_clock.h>
21+ #include <zephyr/sys/sem.h>
22+ #include <zephyr/sys/mutex.h>
2023#include <sof/lib/notifier.h>
2124#include <ipc4/base_fw.h>
2225
@@ -34,20 +37,59 @@ struct scheduler_dp_data {
3437
3538struct task_dp_pdata {
3639 k_tid_t thread_id ; /* zephyr thread ID */
37- struct k_thread thread ; /* memory space for a thread */
40+ struct k_thread * thread ; /* pointer to the kernels' thread object */
41+ struct k_thread thread_struct ; /* thread object for kernel threads */
3842 uint32_t deadline_clock_ticks ; /* dp module deadline in Zephyr ticks */
3943 k_thread_stack_t __sparse_cache * p_stack ; /* pointer to thread stack */
4044 size_t stack_size ; /* size of the stack in bytes */
41- struct k_sem sem ; /* semaphore for task scheduling */
45+ struct k_sem * sem ; /* pointer to semaphore for task scheduling */
46+ struct k_sem sem_struct ; /* semaphore for task scheduling for kernel threads */
4247 struct processing_module * mod ; /* the module to be scheduled */
4348 uint32_t ll_cycles_to_start ; /* current number of LL cycles till delayed start */
4449};
4550
51+ #ifdef CONFIG_USERSPACE
52+ /* Single CPU-wide lock
53+ * The irq_lock is not available for USERSPACE (non-privileged) threads.
54+ * Therefore semaphore is used to control critical section.
55+ */
56+ #define DP_LOCK_INIT (i , _ ) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
57+ #define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))
58+
59+ /* User threads don't need access to this array. Access is performed from
60+ * the kernel space via a syscall. Array must be placed in special section
61+ * to be qualified as initialized by the gen_kobject_list.py script.
62+ */
63+ static
64+ STRUCT_SECTION_ITERABLE_ARRAY (k_sem , dp_lock , CONFIG_MP_MAX_NUM_CPUS ) = { DP_LOCK_INIT_LIST };
65+
66+ static inline unsigned int scheduler_dp_lock (uint16_t core )
67+ {
68+ k_sem_take (& dp_lock [core ], K_FOREVER );
69+ return core ;
70+ }
71+
72+ static inline void scheduler_dp_unlock (unsigned int key )
73+ {
74+ k_sem_give (& dp_lock [key ]);
75+ }
76+
77+ static inline void scheduler_dp_grant (k_tid_t thread_id , uint16_t core )
78+ {
79+ k_thread_access_grant (thread_id , & dp_lock [core ]);
80+ }
81+
82+ #else /* CONFIG_USERSPACE */
83+
84+ static inline void scheduler_dp_grant (k_tid_t thread_id , uint16_t core )
85+ {
86+ }
87+
4688/* Single CPU-wide lock
4789 * as each per-core instance if dp-scheduler has separate structures, it is enough to
4890 * use irq_lock instead of cross-core spinlocks
4991 */
50- static inline unsigned int scheduler_dp_lock (void )
92+ static inline unsigned int scheduler_dp_lock (uint16_t core )
5193{
5294 return irq_lock ();
5395}
@@ -56,6 +98,7 @@ static inline void scheduler_dp_unlock(unsigned int key)
5698{
5799 irq_unlock (key );
58100}
101+ #endif
59102
60103/* dummy LL task - to start LL on secondary cores */
61104static enum task_state scheduler_dp_ll_tick_dummy (void * data )
@@ -226,7 +269,7 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
226269 unsigned int lock_key ;
227270 struct scheduler_dp_data * dp_sch = scheduler_get_data (SOF_SCHEDULE_DP );
228271
229- lock_key = scheduler_dp_lock ();
272+ lock_key = scheduler_dp_lock (cpu_get_id () );
230273 list_for_item (tlist , & dp_sch -> tasks ) {
231274 curr_task = container_of (tlist , struct task , list );
232275 pdata = curr_task -> priv_data ;
@@ -256,7 +299,7 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
256299
257300 /* trigger the task */
258301 curr_task -> state = SOF_TASK_STATE_RUNNING ;
259- k_sem_give (& pdata -> sem );
302+ k_sem_give (pdata -> sem );
260303 }
261304 }
262305 }
@@ -271,7 +314,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
271314
272315
273316 /* this is asyn cancel - mark the task as canceled and remove it from scheduling */
274- lock_key = scheduler_dp_lock ();
317+ lock_key = scheduler_dp_lock (cpu_get_id () );
275318
276319 task -> state = SOF_TASK_STATE_CANCEL ;
277320 list_item_del (& task -> list );
@@ -281,7 +324,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
281324 schedule_task_cancel (& dp_sch -> ll_tick_src );
282325
283326 /* if the task is waiting on a semaphore - let it run and self-terminate */
284- k_sem_give (& pdata -> sem );
327+ k_sem_give (pdata -> sem );
285328 scheduler_dp_unlock (lock_key );
286329
287330 /* wait till the task has finished, if there was any task created */
@@ -294,6 +337,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
294337static int scheduler_dp_task_free (void * data , struct task * task )
295338{
296339 struct task_dp_pdata * pdata = task -> priv_data ;
340+ int ret ;
297341
298342 scheduler_dp_task_cancel (data , task );
299343
@@ -305,12 +349,19 @@ static int scheduler_dp_task_free(void *data, struct task *task)
305349 pdata -> thread_id = NULL ;
306350 }
307351
352+ #ifdef CONFIG_USERSPACE
353+ if (pdata -> sem != & pdata -> sem_struct )
354+ k_object_free (pdata -> sem );
355+ if (pdata -> thread != & pdata -> thread_struct )
356+ k_object_free (pdata -> thread );
357+ #endif
358+
308359 /* free task stack */
309- rfree ((__sparse_force void * )pdata -> p_stack );
360+ ret = user_stack_free ((__sparse_force void * )pdata -> p_stack );
310361 pdata -> p_stack = NULL ;
311362
312363 /* all other memory has been allocated as a single malloc, will be freed later by caller */
313- return 0 ;
364+ return ret ;
314365}
315366
316367/* Thread function called in component context, on target core */
@@ -329,14 +380,14 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
329380 * the thread is started immediately after creation, it will stop on semaphore
330381 * Semaphore will be released once the task is ready to process
331382 */
332- k_sem_take (& task_pdata -> sem , K_FOREVER );
383+ k_sem_take (task_pdata -> sem , K_FOREVER );
333384
334385 if (task -> state == SOF_TASK_STATE_RUNNING )
335386 state = task_run (task );
336387 else
337388 state = task -> state ; /* to avoid undefined variable warning */
338389
339- lock_key = scheduler_dp_lock ();
390+ lock_key = scheduler_dp_lock (task -> core );
340391 /*
341392 * check if task is still running, may have been canceled by external call
342393 * if not, set the state returned by run procedure
@@ -382,7 +433,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
382433 uint64_t deadline_clock_ticks ;
383434 int ret ;
384435
385- lock_key = scheduler_dp_lock ();
436+ lock_key = scheduler_dp_lock (cpu_get_id () );
386437
387438 if (task -> state != SOF_TASK_STATE_INIT &&
388439 task -> state != SOF_TASK_STATE_CANCEL &&
@@ -392,19 +443,36 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
392443 }
393444
394445 /* create a zephyr thread for the task */
395- pdata -> thread_id = k_thread_create (& pdata -> thread , (__sparse_force void * )pdata -> p_stack ,
446+ pdata -> thread_id = k_thread_create (pdata -> thread , (__sparse_force void * )pdata -> p_stack ,
396447 pdata -> stack_size , dp_thread_fn , task , NULL , NULL ,
397- CONFIG_DP_THREAD_PRIORITY , K_USER , K_FOREVER );
448+ CONFIG_DP_THREAD_PRIORITY , task -> flags , K_FOREVER );
449+ if (!pdata -> thread_id ) {
450+ tr_err (& dp_tr , "DP thread creation failed" );
451+ scheduler_dp_unlock (lock_key );
452+ return - ECHILD ;
453+ }
398454
455+ k_thread_access_grant (pdata -> thread_id , pdata -> sem );
456+ scheduler_dp_grant (pdata -> thread_id , cpu_get_id ());
399457 /* pin the thread to specific core */
400458 ret = k_thread_cpu_pin (pdata -> thread_id , task -> core );
401459 if (ret < 0 ) {
402460 tr_err (& dp_tr , "zephyr task pin to core failed" );
403461 goto err ;
404462 }
405463
406- /* start the thread, it should immediately stop at a semaphore, so clean it */
407- k_sem_reset (& pdata -> sem );
464+ #ifdef CONFIG_USERSPACE
465+ if (task -> flags & K_USER ) {
466+ ret = user_memory_init_shared (pdata -> thread_id , pdata -> mod );
467+ if (ret < 0 ) {
468+ tr_err (& dp_tr , "user_memory_init_shared() failed" );
469+ goto err ;
470+ }
471+ }
472+ #endif /* CONFIG_USERSPACE */
473+
474+ /* start the thread, it should immediately stop at a semaphore, so clean it */
475+ k_sem_init (pdata -> sem , 0 , 1 );
408476 k_thread_start (pdata -> thread_id );
409477
410478 /* if there's no DP tasks scheduled yet, run ll tick source task */
@@ -474,7 +542,8 @@ int scheduler_dp_task_init(struct task **task,
474542 const struct task_ops * ops ,
475543 struct processing_module * mod ,
476544 uint16_t core ,
477- size_t stack_size )
545+ size_t stack_size ,
546+ uint32_t options )
478547{
479548 void __sparse_cache * p_stack = NULL ;
480549 struct sys_heap * const user_heap = mod -> dev -> drv -> user_heap ;
@@ -505,9 +574,7 @@ int scheduler_dp_task_init(struct task **task,
505574 }
506575
507576 /* allocate stack - must be aligned and cached so a separate alloc */
508- stack_size = Z_KERNEL_STACK_SIZE_ADJUST (stack_size );
509- p_stack = (__sparse_force void __sparse_cache * )
510- rballoc_align (SOF_MEM_FLAG_KERNEL , stack_size , Z_KERNEL_STACK_OBJ_ALIGN );
577+ p_stack = user_stack_allocate (stack_size , options );
511578 if (!p_stack ) {
512579 tr_err (& dp_tr , "stack alloc failed" );
513580 ret = - ENOMEM ;
@@ -516,33 +583,57 @@ int scheduler_dp_task_init(struct task **task,
516583
517584 /* internal SOF task init */
518585 ret = schedule_task_init (& task_memory -> task , uid , SOF_SCHEDULE_DP , 0 , ops -> run ,
519- mod , core , 0 );
586+ mod , core , options );
520587 if (ret < 0 ) {
521588 tr_err (& dp_tr , "schedule_task_init failed" );
522589 goto err ;
523590 }
524591
592+ /* Point to ksem semaphore for kernel threads synchronization */
593+ /* It will be overwritten for K_USER threads to dynamic ones. */
594+ task_memory -> pdata .sem = & task_memory -> pdata .sem_struct ;
595+ task_memory -> pdata .thread = & task_memory -> pdata .thread_struct ;
596+
597+ #ifdef CONFIG_USERSPACE
598+ if (options & K_USER ) {
599+ task_memory -> pdata .sem = k_object_alloc (K_OBJ_SEM );
600+ if (!task_memory -> pdata .sem ) {
601+ tr_err (& dp_tr , "Semaphore object allocation failed" );
602+ ret = - ENOMEM ;
603+ goto err ;
604+ }
605+
606+ task_memory -> pdata .thread = k_object_alloc (K_OBJ_THREAD );
607+ if (!task_memory -> pdata .thread ) {
608+ tr_err (& dp_tr , "Thread object allocation failed" );
609+ ret = - ENOMEM ;
610+ goto err ;
611+ }
612+ }
613+ #endif /* CONFIG_USERSPACE */
614+
525615 /* initialize other task structures */
526616 task_memory -> task .ops .complete = ops -> complete ;
527617 task_memory -> task .ops .get_deadline = ops -> get_deadline ;
528618 task_memory -> task .state = SOF_TASK_STATE_INIT ;
529619 task_memory -> task .core = core ;
530620
531- /* initialize semaprhore */
532- k_sem_init (& task_memory -> pdata .sem , 0 , 1 );
533-
534621 /* success, fill the structures */
535622 task_memory -> task .priv_data = & task_memory -> pdata ;
536623 task_memory -> pdata .p_stack = p_stack ;
537624 task_memory -> pdata .stack_size = stack_size ;
538625 task_memory -> pdata .mod = mod ;
539626 * task = & task_memory -> task ;
540627
541-
542628 return 0 ;
543629err :
544630 /* cleanup - free all allocated resources */
545- rfree ((__sparse_force void * )p_stack );
631+ if (user_stack_free ((__sparse_force void * )p_stack ))
632+ tr_err (& dp_tr , "user_stack_free failed!" );
633+
634+ /* k_object_free looks for a pointer in the list, any invalid value can be passed */
635+ k_object_free (task_memory -> pdata .sem );
636+ k_object_free (task_memory -> pdata .thread );
546637 module_driver_heap_free (user_heap , task_memory );
547638 return ret ;
548639}
@@ -555,7 +646,7 @@ void scheduler_get_task_info_dp(struct scheduler_props *scheduler_props, uint32_
555646 struct scheduler_dp_data * dp_sch =
556647 (struct scheduler_dp_data * )scheduler_get_data (SOF_SCHEDULE_DP );
557648
558- lock_key = scheduler_dp_lock ();
649+ lock_key = scheduler_dp_lock (cpu_get_id () );
559650 scheduler_get_task_info (scheduler_props , data_off_size , & dp_sch -> tasks );
560651 scheduler_dp_unlock (lock_key );
561652}
0 commit comments