Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 69 additions & 19 deletions include/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,33 +6,83 @@
#ifndef SCHED_H_
#define SCHED_H_

#include <thread.h>
#include <types.h>

typedef enum {
SSI_SOFTIRQ, /* Kernel thread */
SSI_INTR_THREAD,
SSI_ROOT_THREAD,
SSI_IPC_THREAD,
SSI_NORMAL_THREAD,
SSI_IDLE,
/* Forward declaration - full definition in thread.h */
struct tcb;

NUM_SCHED_SLOTS
} sched_slot_id_t;
/**
* @file sched.h
* @brief Priority Bitmap Scheduler
*
* 32-level priority scheduler with O(1) highest-priority selection.
* Uses Cortex-M CLZ instruction for efficient bitmap scanning.
*
* Priority 0 is highest, priority 31 is lowest (idle).
* Multiple threads at same priority use round-robin scheduling.
*/

/* Number of priority levels (0 = highest, 31 = lowest) */
#define SCHED_PRIORITY_LEVELS 32

struct sched_slot;
/* Priority assignments for system threads */
#define SCHED_PRIO_SOFTIRQ 0 /* Kernel softirq thread */
#define SCHED_PRIO_INTR 1 /* Interrupt handler threads */
#define SCHED_PRIO_ROOT 2 /* Root thread */
#define SCHED_PRIO_IPC 3 /* IPC fast path */
#define SCHED_PRIO_NORMAL_MIN 4 /* Normal threads start here */
#define SCHED_PRIO_NORMAL_MAX 30 /* Normal threads end here */
#define SCHED_PRIO_IDLE 31 /* Idle thread (always lowest) */

typedef tcb_t *(*sched_handler_t)(struct sched_slot *slot);
/* Default priority for user threads */
#define SCHED_PRIO_DEFAULT 16

typedef struct sched_slot {
tcb_t *ss_scheduled;
sched_handler_t ss_handler;
} sched_slot_t;
/**
* Linked list node for ready queue.
* Embedded in TCB for zero-allocation enqueueing.
*/
typedef struct sched_link {
struct sched_link *prev, *next;
} sched_link_t;

/* Scheduler initialization */
void sched_init(void);

tcb_t* schedule_select(void);
/* Core scheduling functions */
struct tcb *schedule_select(void);
int schedule(void);
void sched_slot_dispatch(sched_slot_id_t slot_id, tcb_t *thread);
void sched_slot_set_handler(sched_slot_id_t slot_id, sched_handler_t handler);

/**
* Enqueue thread to ready queue at its priority level.
* Thread must have valid priority set.
*/
void sched_enqueue(struct tcb *thread);

/**
* Dequeue thread from ready queue.
* Called when thread blocks or is destroyed.
*/
void sched_dequeue(struct tcb *thread);

/**
* Check if thread is currently in a ready queue.
*/
int sched_is_queued(struct tcb *thread);

/**
* Yield current thread's timeslice.
* Rotates thread to back of its priority queue for round-robin.
*/
void sched_yield(void);

/**
* Change thread priority safely.
* Handles queue migration atomically if thread is queued.
* Use this instead of directly modifying thread->priority.
*
* @param thread Thread to modify
* @param new_prio New priority level (0 = highest, 31 = lowest)
*/
void sched_set_priority(struct tcb *thread, uint8_t new_prio);

#endif /* SCHED_H_ */
23 changes: 16 additions & 7 deletions include/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,22 @@ typedef struct {
* Contains pointers to thread's UTCB (User TCB) and address space
*/
struct tcb {
l4_thread_t t_globalid;
l4_thread_t t_localid;

thread_state_t state;

memptr_t stack_base;
size_t stack_size;
/* Hot scheduler fields - Cache Line 0 */
struct {
struct tcb *prev, *next;
} sched_link; /* 8 bytes (0-7) */

thread_state_t state; /* 4 bytes (8-11) */
uint8_t priority; /* 1 byte (12) - effective priority */
uint8_t base_priority; /* 1 byte (13) - natural priority */
uint8_t _sched_pad[2]; /* 2 bytes (14-15) - Alignment */

l4_thread_t t_globalid; /* 4 bytes (16-19) */
l4_thread_t t_localid; /* 4 bytes (20-23) */

memptr_t stack_base; /* 4 bytes (24-27) */
size_t stack_size; /* 4 bytes (28-31) */
/* End of Cache Line 0 (32 bytes) */

context_t ctx;

Expand Down
38 changes: 7 additions & 31 deletions kernel/interrupt.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <thread.h>
#include <ipc.h>
#include <sched.h>
#include <platform/irq.h>
#include <interrupt.h>
#include <interrupt_ipc.h>
Expand Down Expand Up @@ -72,18 +73,6 @@ static void user_irq_queue_push(struct user_irq *uirq)
}
}

static struct user_irq *user_irq_queue_pop(void)
{
if (user_irq_queue_is_empty())
return NULL;

struct user_irq *uirq = user_irq_queue.head;
user_irq_queue.head = uirq->next;
uirq->next = NULL;

return uirq;
}

static void user_irq_queue_delete(int irq)
{
struct user_irq *uirq = user_irqs[irq];
Expand Down Expand Up @@ -173,6 +162,11 @@ static int irq_handler_enable(int irq)

irq_handler_ipc(uirq);

/* Wake up the interrupt thread directly */
thr->priority = SCHED_PRIO_INTR;
thr->state = T_RUNNABLE;
sched_enqueue(thr);

return 0;
}

Expand All @@ -191,24 +185,6 @@ static void irq_schedule(int irq)
irq_handler_enable(irq);
}

static tcb_t *irq_handler_sched(struct sched_slot *slot)
{
tcb_t *thr = NULL;

irq_disable();
struct user_irq *uirq = user_irq_queue_pop();

if (uirq && (thr = uirq->thr) &&
thr->state == T_RECV_BLOCKED) {
thr->state = T_RUNNABLE;
sched_slot_dispatch(SSI_INTR_THREAD, thr);
}

irq_enable();

return thr;
}

void __interrupt_handler(int irq)
{
struct user_irq *uirq = user_irq_fetch(irq);
Expand All @@ -227,7 +203,6 @@ void __interrupt_handler(int irq)
void interrupt_init(void)
{
user_irq_reset_all();
sched_slot_set_handler(SSI_INTR_THREAD, irq_handler_sched);
}

INIT_HOOK(interrupt_init, INIT_LEVEL_KERNEL_EARLY);
Expand Down Expand Up @@ -300,6 +275,7 @@ void user_interrupt_handler_update(tcb_t *thr)
/* reply ipc immediately */
irq_handler_ipc(uirq);
thr->state = T_RUNNABLE;
sched_enqueue(thr);
break;
}
break;
Expand Down
53 changes: 36 additions & 17 deletions kernel/ipc.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,16 @@ extern tcb_t *caller;
extern tcb_t *thread_map[];
extern int thread_count;

/**
* Make thread runnable and enqueue to scheduler.
* Used after IPC operations that unblock threads.
*/
static inline void thread_make_runnable(tcb_t *thr)
{
thr->state = T_RUNNABLE;
sched_enqueue(thr);
}

uint32_t ipc_read_mr(tcb_t *from, int i)
{
if (i >= 8)
Expand Down Expand Up @@ -179,25 +189,26 @@ static void do_ipc(tcb_t *from, tcb_t *to)

to->utcb->sender = from->t_globalid;

to->state = T_RUNNABLE;
/* Temporarily boost receiver priority for IPC fast path.
* base_priority is preserved; effective priority restored
* when thread is descheduled (in thread_switch).
*/
sched_set_priority(to, SCHED_PRIO_IPC);
thread_make_runnable(to);
to->ipc_from = L4_NILTHREAD;
((uint32_t *) to->ctx.sp)[REG_R0] = from->t_globalid;

/* If from has receive phases, lock myself */
from_recv_tid = ((uint32_t *) from->ctx.sp)[REG_R1];
if (from_recv_tid == L4_NILTHREAD) {
from->state = T_RUNNABLE;
thread_make_runnable(from);
} else {
from->state = T_RECV_BLOCKED;
from->ipc_from = from_recv_tid;

dbg_printf(DL_IPC, "IPC: %t receiving\n", from->t_globalid);
}

/* Dispatch communicating threads */
sched_slot_dispatch(SSI_NORMAL_THREAD, from);
sched_slot_dispatch(SSI_IPC_THREAD, to);

dbg_printf(DL_IPC,
"IPC: %t→%t done\n", from->t_globalid, to->t_globalid);
}
Expand All @@ -207,6 +218,9 @@ uint32_t ipc_timeout(void *data)
ktimer_event_t *event = (ktimer_event_t *) data;
tcb_t *thr = (tcb_t *) event->data;

dbg_printf(DL_KDB, "IPC: timeout tid=%t st=%d\n",
thr->t_globalid, thr->state);

if (thr->timeout_event == (uint32_t)data) {

if (thr->state == T_RECV_BLOCKED)
Expand All @@ -215,7 +229,7 @@ uint32_t ipc_timeout(void *data)
if (thr->state == T_SEND_BLOCKED)
user_ipc_error(thr, UE_IPC_TIMEOUT | UE_IPC_PHASE_SEND);

thr->state = T_RUNNABLE;
thread_make_runnable(thr);
thr->timeout_event = 0;
}

Expand All @@ -233,6 +247,9 @@ static void sys_ipc_timeout(uint32_t timeout)

kevent = ktimer_event_create(ticks, ipc_timeout, caller);

dbg_printf(DL_KDB, "IPC: sched timeout ticks=%d ev=%p\n",
ticks, kevent);

caller->timeout_event = (uint32_t) kevent;
}

Expand All @@ -247,6 +264,8 @@ void sys_ipc(uint32_t *param1)

if (to_tid == L4_NILTHREAD &&
from_tid == L4_NILTHREAD) {
dbg_printf(DL_KDB, "IPC: sleep tid=%t timeout=%p\n",
caller->t_globalid, timeout);
caller->state = T_INACTIVE;
if (timeout)
sys_ipc_timeout(timeout);
Expand All @@ -258,11 +277,11 @@ void sys_ipc(uint32_t *param1)

if (to_tid == TID_TO_GLOBALID(THREAD_LOG)) {
user_log(caller);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
} else if (to_tid == TID_TO_GLOBALID(THREAD_IRQ_REQUEST)) {
user_interrupt_config(caller);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
} else if (to_thr &&
(to_thr->state == T_RECV_BLOCKED ||
Expand Down Expand Up @@ -297,7 +316,7 @@ void sys_ipc(uint32_t *param1)
to_tid, sp - stack_size, mp ? mp->name : "N/A");
user_ipc_error(caller,
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}

Expand All @@ -317,10 +336,10 @@ void sys_ipc(uint32_t *param1)
(void *) ipc_read_mr(caller, 1),
regs, to_thr);

caller->state = T_RUNNABLE;
thread_make_runnable(caller);

/* Start thread */
to_thr->state = T_RUNNABLE;
thread_make_runnable(to_thr);

return;
} else {
Expand All @@ -340,7 +359,7 @@ void sys_ipc(uint32_t *param1)
if (typed_last > IPC_MR_COUNT) {
user_ipc_error(caller,
UE_IPC_MSG_OVERFLOW | UE_IPC_PHASE_SEND);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}

Expand All @@ -365,7 +384,7 @@ void sys_ipc(uint32_t *param1)
dbg_printf(DL_IPC,
"IPC: REJECT unaligned map to INACTIVE %p\n",
map_base);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}

Expand All @@ -385,7 +404,7 @@ void sys_ipc(uint32_t *param1)
}

/* Keep thread INACTIVE, sender continues */
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}
} else {
Expand All @@ -400,7 +419,7 @@ void sys_ipc(uint32_t *param1)
caller->t_globalid, to_tid);
user_ipc_error(caller,
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}

Expand All @@ -414,7 +433,7 @@ void sys_ipc(uint32_t *param1)
caller->t_globalid, to_tid);
user_ipc_error(caller,
UE_IPC_ABORTED | UE_IPC_PHASE_SEND);
caller->state = T_RUNNABLE;
thread_make_runnable(caller);
return;
}

Expand Down
Loading
Loading