Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/include/sof/ipc/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ struct ipc {

#ifdef __ZEPHYR__
struct k_work_delayable z_delayed_work;
struct k_work_q ipc_send_wq;
#endif

void *private;
Expand Down
11 changes: 4 additions & 7 deletions src/ipc/ipc-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ void ipc_send_queued_msg(void)
}

#ifdef __ZEPHYR__
static struct k_work_q ipc_send_wq;
static K_THREAD_STACK_DEFINE(ipc_send_wq_stack, CONFIG_STACK_SIZE_IPC_TX);
#endif

Expand All @@ -184,7 +183,7 @@ static void schedule_ipc_worker(void)
#ifdef __ZEPHYR__
struct ipc *ipc = ipc_get();

k_work_schedule_for_queue(&ipc_send_wq, &ipc->z_delayed_work, K_USEC(IPC_PERIOD_USEC));
k_work_schedule_for_queue(&ipc->ipc_send_wq, &ipc->z_delayed_work, K_USEC(IPC_PERIOD_USEC));
#endif
}

Expand Down Expand Up @@ -306,12 +305,10 @@ int ipc_init(struct sof *sof)
#endif

#ifdef __ZEPHYR__
struct k_thread *thread = &ipc_send_wq.thread;
struct k_thread *thread = &sof->ipc->ipc_send_wq.thread;

k_work_queue_start(&ipc_send_wq,
ipc_send_wq_stack,
K_THREAD_STACK_SIZEOF(ipc_send_wq_stack),
1, NULL);
k_work_queue_start(&sof->ipc->ipc_send_wq, ipc_send_wq_stack,
K_THREAD_STACK_SIZEOF(ipc_send_wq_stack), 1, NULL);

k_thread_suspend(thread);

Expand Down
54 changes: 28 additions & 26 deletions src/ipc/ipc4/logging.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <ipc4/base_fw.h>
#include <ipc4/error_status.h>
#include <ipc4/logging.h>
#include <rtos/kernel.h>
#if !CONFIG_LIBRARY
#include <zephyr/logging/log_backend.h>
#include <zephyr/logging/log.h>
Expand Down Expand Up @@ -51,19 +52,18 @@ LOG_MODULE_REGISTER(mtrace, CONFIG_SOF_LOG_LEVEL);
*/
#define IPC4_MTRACE_AGING_TIMER_MIN_MS 100

SOF_DEFINE_REG_UUID(mtrace_task);

static uint64_t mtrace_notify_last_sent;

static uint32_t mtrace_bytes_pending;

static uint32_t mtrace_aging_timer = IPC4_MTRACE_NOTIFY_AGING_TIMER_MS;

struct task mtrace_task;

#define MTRACE_IPC_CORE PLATFORM_PRIMARY_CORE_ID

static void mtrace_log_hook(size_t written, size_t space_left)
static struct k_mutex log_mutex;
static struct k_work_delayable log_work;

static void mtrace_log_hook_unlocked(size_t written, size_t space_left)
{
uint64_t delta;

Expand All @@ -84,36 +84,41 @@ static void mtrace_log_hook(size_t written, size_t space_left)
ipc_send_buffer_status_notify();
mtrace_notify_last_sent = k_uptime_get();
mtrace_bytes_pending = 0;
} else if (mtrace_bytes_pending) {
k_work_schedule_for_queue(&ipc_get()->ipc_send_wq, &log_work,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks good. The ipc send_wq is pinned to core 0, so this does indeed work.

K_MSEC(mtrace_aging_timer - delta));
}
}

static enum task_state mtrace_task_run(void *data)
static void mtrace_log_hook(size_t written, size_t space_left)
{
if (k_uptime_get() - mtrace_notify_last_sent >= mtrace_aging_timer &&
mtrace_bytes_pending)
mtrace_log_hook(0, 0);
k_mutex_lock(&log_mutex, K_FOREVER);

mtrace_log_hook_unlocked(written, space_left);

/* task will be re-run based on mtrace_task_deadline() */
return SOF_TASK_STATE_RESCHEDULE;
k_mutex_unlock(&log_mutex);
}

static uint64_t mtrace_task_deadline(void *data)
static void log_work_handler(struct k_work *work)
{
return k_uptime_ticks() + k_ms_to_ticks_ceil64(mtrace_aging_timer);
k_mutex_lock(&log_mutex, K_FOREVER);

if (k_uptime_get() - mtrace_notify_last_sent >= mtrace_aging_timer &&
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why you need to check this? Shouldn't Zephyr's Delayable Work call this handler no sooner than a timeout?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.. and there is yet another check in mtrace_log_hook_unlocked(). This check look quite redundant to me too.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think both are needed. This one is needed because it could happen that after the work was queued another log was issued and then the space was tight so an IPC got sent out and the time updated, so now the time isn't larger than the threshold. And the other one is needed for normal hook operation - if it isn't called from the delayed work, but enough time has passed since the last notification, we'll notify now.

mtrace_bytes_pending)
mtrace_log_hook_unlocked(0, 0);

k_mutex_unlock(&log_mutex);
}

static struct k_work_sync ipc4_log_work_sync;

int ipc4_logging_enable_logs(bool first_block,
bool last_block,
uint32_t data_offset_or_size,
const char *data)
{
const struct log_backend *log_backend = log_backend_adsp_mtrace_get();
const struct ipc4_log_state_info *log_state;
struct task_ops ops = {
.run = mtrace_task_run,
.get_deadline = mtrace_task_deadline,
.complete = NULL,
};

if (!(first_block && last_block)) {
LOG_ERR("log_state data is expected to be sent as one chunk");
Expand All @@ -136,6 +141,9 @@ int ipc4_logging_enable_logs(bool first_block,
if (log_state->enable) {
adsp_mtrace_log_init(mtrace_log_hook);

k_mutex_init(&log_mutex);
k_work_init_delayable(&log_work, log_work_handler);

log_backend_activate(log_backend, mtrace_log_hook);

mtrace_aging_timer = log_state->aging_timer_period;
Expand All @@ -144,14 +152,10 @@ int ipc4_logging_enable_logs(bool first_block,
LOG_WRN("Too small aging timer value, limiting to %u\n",
mtrace_aging_timer);
}

schedule_task_init_edf(&mtrace_task, SOF_UUID(mtrace_task_uuid),
&ops, NULL, MTRACE_IPC_CORE, 0);
schedule_task(&mtrace_task, mtrace_aging_timer * 1000, 0);
} else {
k_work_flush_delayable(&log_work, &ipc4_log_work_sync);
adsp_mtrace_log_init(NULL);
log_backend_deactivate(log_backend);
schedule_task_cancel(&mtrace_task);
}

return 0;
Expand Down Expand Up @@ -208,9 +212,7 @@ int ipc4_logging_enable_logs(bool first_block,

int ipc4_logging_shutdown(void)
{
struct ipc4_log_state_info log_state = { 0 };

/* log_state.enable set to 0 above */
struct ipc4_log_state_info log_state = { .enable = 0, };

return ipc4_logging_enable_logs(true, true, sizeof(log_state), (char *)&log_state);
}
1 change: 0 additions & 1 deletion uuid-registry.txt
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ bc06c037-12aa-417c-9a9789282e321a76 mixer
39656eb2-3b71-4049-8d3ff92cd5c43c09 mixin
3c56505a-24d7-418f-bddcc1f5a3ac2ae0 mixout
ee2585f2-e7d8-43dc-90ab4224e00c3e84 modules
bb2aa22e-1ab6-4650-85016e67fcc04f4e mtrace_task
0d9f2256-8e4f-47b3-8448239a334f1191 multiband_drc
c607ff4d-9cb6-49dc-b6787da3c63ea557 mux
64ce6e35-857a-4878-ace8e2a2f42e3069 mux4
Expand Down
Loading