rt/src/rt.c

665 lines
21 KiB
C

#include <rt/atomic.h>
#include <rt/container.h>
#include <rt/context.h>
#include <rt/cycle.h>
#include <rt/idle.h>
#include <rt/list.h>
#include <rt/log.h>
#include <rt/mpu.h>
#include <rt/mutex.h>
#include <rt/sem.h>
#include <rt/start.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <rt/trap.h>
static inline struct rt_task *task_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, list);
}
static inline struct rt_task *task_from_sleep_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, sleep_list);
}
static inline struct rt_mutex *mutex_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_mutex, list);
}
static bool task_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return task_from_list(a)->priority < task_from_list(b)->priority;
}
static void insert_by_priority(struct rt_list *list, struct rt_task *task)
{
rt_list_insert_by(list, &task->list, task_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_ready_bits)
static uint32_t rt_ready_bits = 0;
RT_MPU_PRIV_BSS(rt_ready_lists)
static struct rt_list *rt_ready_lists[RT_TASK_PRIORITY_MAX + 1];
static uint32_t min_ready_priority(void)
{
#if RT_TASK_READY_CTZ_ENABLE
return (uint32_t)__builtin_ctz(rt_ready_bits);
#else // !RT_TASK_READY_CTZ_ENABLE
static const unsigned char debruijn_ctz[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
};
const uint32_t min_bit = rt_ready_bits & -rt_ready_bits;
return (uint32_t)debruijn_ctz[(min_bit * UINT32_C(0x077CB531)) >> 27];
#endif // RT_TASK_READY_CTZ_ENABLE
}
static bool mutex_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
const struct rt_mutex *const ma = mutex_from_list(a);
const struct rt_mutex *const mb = mutex_from_list(b);
// Only mutexes that have waiters should be compared.
return task_from_list(rt_list_front(&ma->wait_list))->priority <
task_from_list(rt_list_front(&mb->wait_list))->priority;
}
static void insert_mutex_by_priority(struct rt_list *list,
struct rt_mutex *mutex)
{
rt_list_insert_by(list, &mutex->list, mutex_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_pending_syscalls)
static rt_atomic(struct rt_syscall_record *) rt_pending_syscalls = NULL;
RT_TASK(rt_idle, RT_STACK_MIN, RT_TASK_PRIORITY_IDLE);
/* rt_active_task must be readable from user code.
* Task structures themselves are privileged. */
struct rt_task *rt_active_task = NULL;
void rt_task_yield(void)
{
rt_syscall_0(RT_SYSCALL_YIELD);
}
__attribute__((noreturn)) void rt_task_exit(void)
{
rt_logf("syscall: %s exit\n", rt_task_name());
rt_syscall_0(RT_SYSCALL_EXIT);
// Should not be reached.
rt_trap();
}
struct rt_task *rt_task_self(void)
{
return rt_active_task;
}
const char *rt_task_name(void)
{
return rt_active_task->name;
}
void rt_task_ready(struct rt_task *task)
{
task->state = RT_TASK_STATE_READY;
struct rt_list *const list = rt_ready_lists[task->priority];
if (list == NULL)
{
rt_ready_lists[task->priority] = &task->list;
rt_ready_bits |= UINT32_C(1) << task->priority;
}
else
{
rt_list_push_back(list, &task->list);
}
}
static void task_unready(struct rt_task *task)
{
if (rt_list_is_empty(&task->list))
{
rt_ready_lists[task->priority] = NULL;
rt_ready_bits &= ~(UINT32_C(1) << task->priority);
}
else
{
if (rt_ready_lists[task->priority] == &task->list)
{
rt_ready_lists[task->priority] = task->list.next;
}
rt_list_remove(&task->list);
}
}
static void task_wait(struct rt_task *task, struct rt_list *list)
{
task_unready(task);
insert_by_priority(list, task);
}
RT_MPU_PRIV_BSS(rt_context_prev)
void **rt_context_prev;
void *rt_start_context(void)
{
#if RT_CYCLE_ENABLE
rt_cycle_init();
#endif // !RT_CYCLE_ENABLE
rt_task_cycle_resume();
// Initially all tasks are ready, including the idle task.
struct rt_task *const first_task =
task_from_list(rt_ready_lists[min_ready_priority()]);
first_task->state = RT_TASK_STATE_RUNNING;
rt_active_task = first_task;
rt_logf("rt_start_context: %s with priority %u\n", first_task->name,
first_task->priority);
return first_task->ctx;
}
static void *sched(void)
{
struct rt_task *const next_task =
task_from_list(rt_ready_lists[min_ready_priority()]);
next_task->state = RT_TASK_STATE_RUNNING;
if (next_task == rt_active_task)
{
// The same task should still run, so no context switch is required.
return NULL;
}
rt_context_prev = &rt_active_task->ctx;
rt_active_task = next_task;
rt_logf("sched: switching to %s with priority %u\n", rt_task_name(),
next_task->priority);
return next_task->ctx;
}
RT_MPU_PRIV_BSS(rt_woken_tick)
static unsigned long rt_woken_tick;
static bool wake_tick_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return (task_from_sleep_list(a)->wake_tick - rt_woken_tick) <
(task_from_sleep_list(b)->wake_tick - rt_woken_tick);
}
RT_MPU_PRIV_DATA(rt_sleep_list)
static RT_LIST(rt_sleep_list);
static void sleep_until(struct rt_task *task, unsigned long wake_tick)
{
task->wake_tick = wake_tick;
rt_list_insert_by(&rt_sleep_list, &task->sleep_list, wake_tick_less_than);
}
static void wake_sem_waiters(struct rt_sem *sem)
{
int waiters = -rt_atomic_load(&sem->value, RT_ATOMIC_ACQUIRE);
if (waiters < 0)
{
waiters = 0;
}
while (sem->num_waiters > (size_t)waiters)
{
struct rt_task *const task =
task_from_list(rt_list_front(&sem->wait_list));
rt_list_remove(&task->list);
rt_list_remove(&task->sleep_list);
task->blocker.sem = NULL;
rt_task_ready(task);
--sem->num_waiters;
}
}
static bool trylock_or_wait(struct rt_mutex *mutex, uintptr_t new_holder)
{
uintptr_t e = rt_atomic_load(&mutex->holder, RT_ATOMIC_RELAXED);
bool unlocked;
do
{
unlocked = e == RT_MUTEX_UNLOCKED;
if (!unlocked)
{
if ((e & RT_MUTEX_WAITED_MASK) != 0)
{
// The mutex is locked and already waited on, so we are done.
break;
}
else
{
// If the mutex is not waited on, we need to set the waited bit.
new_holder = e | RT_MUTEX_WAITED_MASK;
}
}
/* Note, we are ignoring the case where the mutex is already held by
* new_holder. The mutex's public interfaces will already allow it to
* be locked by its current holder or trap, if the mutex is not
* recursive. This function is only invoked when a locking task needs
* to make a system call because it couldn't lock the mutex, or when a
* task is in the wait list of a mutex and is being awoken, which can
* only occur if it previously failed to acquire the mutex. */
} while (!rt_atomic_compare_exchange_weak(&mutex->holder, &e, new_holder,
RT_ATOMIC_ACQUIRE,
RT_ATOMIC_RELAXED));
return unlocked;
}
static void wake_mutex_waiter(struct rt_mutex *mutex)
{
if (rt_list_is_empty(&mutex->wait_list))
{
/* If the mutex has no waiters, there's nothing to do. This can happen
* if the holder unlocked while there were still waiters, but they
* timed out before the unlock syscall ran. */
return;
}
/* Acquire the mutex on behalf of the first waiter. This should succeed
* because the mutex was just unlocked and no other tasks have run yet,
* unless an interrupt ran and took the mutex in between these two steps
* and then refused to release it. */
struct rt_task *const task =
task_from_list(rt_list_front(&mutex->wait_list));
const bool has_other_waiters = task->list.next != &mutex->wait_list;
uintptr_t new_holder = (uintptr_t)task;
if (has_other_waiters)
{
new_holder |= RT_MUTEX_WAITED_MASK;
}
if (trylock_or_wait(mutex, new_holder))
{
rt_list_remove(&task->list);
rt_list_remove(&task->sleep_list);
task->blocker.mutex = NULL;
rt_task_ready(task);
if (has_other_waiters)
{
insert_mutex_by_priority(&task->mutex_list, mutex);
/* The new holder is the highest priority among these waiters, so
* recalculating the donated priority here is not necessary, but one
* of the waiters may have priority donated to it in the future. */
}
}
}
/* Update the task's donated priority based on the mutexes it holds, and return
* a new mutex that needs it donation priority recalculated, or NULL, if there
* is no further donation adjustment necessary. */
static struct rt_mutex *task_donate(struct rt_task *task)
{
// Recalculate the task's priority starting from its base priority.
uint32_t priority = task->base_priority;
/* If the task is holding any donating mutexes, donate the highest priority
* among them to this task if necessary. */
if (!rt_list_is_empty(&task->mutex_list))
{
struct rt_mutex *const next_mutex =
mutex_from_list(rt_list_front(&task->mutex_list));
const uint32_t donated_priority =
task_from_list(rt_list_front(&next_mutex->wait_list))->priority;
if (priority > donated_priority)
{
priority = donated_priority;
}
}
if (priority == task->priority)
{
// The task priority didn't change; nothing else to do.
return NULL;
}
if ((task->state == RT_TASK_STATE_RUNNING) ||
(task->state == RT_TASK_STATE_READY))
{
task_unready(task);
task->priority = priority;
rt_task_ready(task);
}
else if ((task->state == RT_TASK_STATE_BLOCKED_ON_SEM_WAIT) ||
(task->state == RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT))
{
task->priority = priority;
rt_list_remove(&task->list);
insert_by_priority(&task->blocker.sem->wait_list, task);
}
else if ((task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_LOCK) ||
(task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK))
{
task->priority = priority;
rt_list_remove(&task->list);
insert_by_priority(&task->blocker.mutex->wait_list, task);
return task->blocker.mutex;
}
return NULL;
}
static void mutex_donate(struct rt_mutex *mutex)
{
do
{
uintptr_t holder = rt_atomic_load(&mutex->holder, RT_ATOMIC_RELAXED);
if ((holder == RT_MUTEX_UNLOCKED) ||
(holder == RT_MUTEX_HOLDER_INTERRUPT))
{
/* If the mutex is not held or held by an interrupt, then no
* donation is needed. */
return;
}
struct rt_task *const task =
(struct rt_task *)(holder & RT_MUTEX_HOLDER_MASK);
if (!rt_list_is_empty(&mutex->wait_list))
{
// Re-sort the mutex in the holder's mutex list.
rt_list_remove(&mutex->list);
insert_mutex_by_priority(&task->mutex_list, mutex);
}
/* Update the holder's priority and get the next mutex to calculate
* donation on, if any. */
mutex = task_donate(task);
} while (mutex != NULL);
}
static void tick_syscall(void)
{
const unsigned long ticks_to_advance = rt_tick_count() - rt_woken_tick;
while (!rt_list_is_empty(&rt_sleep_list))
{
struct rt_task *const task =
task_from_sleep_list(rt_list_front(&rt_sleep_list));
if (ticks_to_advance < (task->wake_tick - rt_woken_tick))
{
break;
}
// Check if the task is blocked on a timed operation.
if (task->state == RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT)
{
/* If the waking task was blocked on a sem_timedwait, remove it
* from the semaphore's wait list. */
rt_list_remove(&task->list);
struct rt_sem *const sem = task->blocker.sem;
task->blocker.sem = NULL;
rt_sem_add_n(sem, 1);
--sem->num_waiters;
/* TODO: Is wake_sem_waiters necessary here?
* Example: Two tasks are waiting on a semaphore and one
* times out and a post occurs at the same time. Will the task
* that didn't time out always wake without this? */
wake_sem_waiters(sem);
*task->syscall_return = 0;
task->syscall_return = NULL;
}
else if (task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK)
{
/* If the task was blocked on a mutex_timedlock, remove it from
* the mutex's wait list and re-calculate donated priorities. */
rt_list_remove(&task->list);
struct rt_mutex *const mutex = task->blocker.mutex;
task->blocker.mutex = NULL;
/* If the mutex now has no waiters, clear the waited bit and
* remove it from the holder's mutex list. */
if (rt_list_is_empty(&mutex->wait_list))
{
rt_atomic_fetch_and(&mutex->holder, RT_MUTEX_HOLDER_MASK,
RT_ATOMIC_RELAXED);
rt_list_remove(&mutex->list);
}
mutex_donate(mutex);
*task->syscall_return = 0;
task->syscall_return = NULL;
}
rt_list_remove(&task->sleep_list);
rt_task_ready(task);
}
rt_woken_tick += ticks_to_advance;
}
// Unprivileged tasks need to read the tick count.
static rt_atomic_ulong rt_tick;
RT_MPU_PRIV_BSS(rt_tick_pending)
static rt_atomic_flag rt_tick_pending;
void rt_tick_advance(void)
{
const unsigned long old_tick =
rt_atomic_fetch_add(&rt_tick, 1, RT_ATOMIC_RELAXED);
RT_MPU_PRIV_DATA(rt_tick_record)
static struct rt_syscall_record rt_tick_record = {
.syscall = RT_SYSCALL_PENDABLE_TICK,
};
if (!rt_atomic_flag_test_and_set(&rt_tick_pending, RT_ATOMIC_ACQUIRE))
{
(void)old_tick;
rt_logf("syscall: tick %lu\n", old_tick + 1);
rt_syscall_push(&rt_tick_record);
rt_syscall_pend();
}
}
unsigned long rt_tick_count(void)
{
return rt_atomic_load(&rt_tick, RT_ATOMIC_RELAXED);
}
void rt_syscall_push(struct rt_syscall_record *record)
{
record->next = rt_atomic_load(&rt_pending_syscalls, RT_ATOMIC_RELAXED);
while (!rt_atomic_compare_exchange_weak(&rt_pending_syscalls, &record->next,
record, RT_ATOMIC_RELEASE,
RT_ATOMIC_RELAXED))
{
}
}
void rt_task_cycle_pause(void)
{
#if RT_TASK_CYCLE_ENABLE
// TODO: Make this safe to call from any interrupt.
const uint32_t task_cycles = rt_cycle() - rt_active_task->start_cycle;
rt_active_task->total_cycles += task_cycles;
#endif
}
void rt_task_cycle_resume(void)
{
#if RT_TASK_CYCLE_ENABLE
rt_active_task->start_cycle = rt_cycle();
#endif
}
static void yield(void)
{
struct rt_list **const list = &rt_ready_lists[min_ready_priority()];
task_from_list(*list)->state = RT_TASK_STATE_READY;
*list = (*list)->next;
}
void *rt_syscall_run(enum rt_syscall syscall, uintptr_t arg0, uintptr_t arg1,
uintptr_t arg2)
{
rt_task_cycle_pause();
switch (syscall)
{
case RT_SYSCALL_SLEEP:
{
const unsigned long ticks = arg0;
rt_active_task->state = RT_TASK_STATE_ASLEEP;
task_unready(rt_active_task);
sleep_until(rt_active_task, rt_woken_tick + ticks);
break;
}
case RT_SYSCALL_SLEEP_PERIODIC:
{
const unsigned long last_wake_tick = arg0, period = arg1,
ticks_since_last_wake =
rt_woken_tick - last_wake_tick;
/* If there have been at least as many ticks as the period since the
* last wake, then the desired wake up tick has already occurred. */
if (ticks_since_last_wake < period)
{
rt_active_task->state = RT_TASK_STATE_ASLEEP;
task_unready(rt_active_task);
sleep_until(rt_active_task, last_wake_tick + period);
}
break;
}
case RT_SYSCALL_SEM_WAIT:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
rt_active_task->state = RT_TASK_STATE_BLOCKED_ON_SEM_WAIT;
rt_active_task->blocker.sem = sem;
task_wait(rt_active_task, &sem->wait_list);
++sem->num_waiters;
/* Evaluate semaphore wakes here as well in case a post occurred
* before the wait syscall was handled. */
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_TIMEDWAIT:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
const unsigned long ticks = arg1;
rt_active_task->state = RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT;
rt_active_task->blocker.sem = sem;
rt_active_task->syscall_return = (uintptr_t *)arg2;
task_wait(rt_active_task, &sem->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
++sem->num_waiters;
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_POST:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
rt_sem_add_n(sem, (int)arg1);
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_MUTEX_LOCK:
{
/* Try to lock the mutex again. The mutex might have become unlocked if
* a context switch occurs after the fast path failed but before the
* lock syscall was made, and the holder unlocks. If this fails, we
* know there is a holder and this task must block. */
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
if (trylock_or_wait(mutex, (uintptr_t)rt_active_task))
{
break;
}
rt_active_task->state = RT_TASK_STATE_BLOCKED_ON_MUTEX_LOCK;
rt_active_task->blocker.mutex = mutex;
task_wait(rt_active_task, &mutex->wait_list);
/* When adding a new waiter, we must donate its priority to the
* task holding the mutex, and transitively to any mutexes that
* task is blocked on. */
mutex_donate(mutex);
break;
}
case RT_SYSCALL_MUTEX_TIMEDLOCK:
{
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
if (trylock_or_wait(mutex, (uintptr_t)rt_active_task))
{
break;
}
const unsigned long ticks = arg1;
rt_active_task->state = RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK;
rt_active_task->blocker.mutex = mutex;
rt_active_task->syscall_return = (uintptr_t *)arg2;
task_wait(rt_active_task, &mutex->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
mutex_donate(mutex);
break;
}
case RT_SYSCALL_MUTEX_UNLOCK:
{
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
rt_atomic_store(&mutex->holder, 0, RT_ATOMIC_RELEASE);
rt_list_remove(&mutex->list);
/* When unlocking, the only donated priority that can change is the
* unlocking task's, because the task isn't blocked waiting on any
* other mutex, otherwise it wouldn't be running. */
task_donate(rt_active_task);
wake_mutex_waiter(mutex);
break;
}
case RT_SYSCALL_YIELD:
yield();
break;
case RT_SYSCALL_EXIT:
rt_active_task->state = RT_TASK_STATE_EXITED;
task_unready(rt_active_task);
break;
}
void *const new_ctx = sched();
rt_task_cycle_resume();
return new_ctx;
}
void *rt_syscall_run_pending(void)
{
rt_task_cycle_pause();
/* Take all elements on the pending syscall stack at once. Syscalls added
* after this step will be on a new stack. */
struct rt_syscall_record *record =
rt_atomic_exchange(&rt_pending_syscalls, NULL, RT_ATOMIC_ACQUIRE);
while (record != NULL)
{
/* Store the next record in the list now because some syscall records
* may be re-enabled immediately after they are handled. */
struct rt_syscall_record *next_record = record->next;
switch (record->syscall)
{
case RT_SYSCALL_PENDABLE_SEM_POST:
{
struct rt_sem *const sem = record->args.sem_post.sem;
/* Allow another post syscall from an interrupt to occur while
* wakes are evaluated so that no posts are missed. */
rt_atomic_flag_clear(&sem->post_pending, RT_ATOMIC_RELEASE);
rt_sem_add_n(sem, record->args.sem_post.n);
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_PENDABLE_TICK:
rt_atomic_flag_clear(&rt_tick_pending, RT_ATOMIC_RELEASE);
// Only force a yield among tasks that are already awake.
yield();
tick_syscall();
break;
}
record = next_record;
}
void *const new_ctx = sched();
rt_task_cycle_resume();
return new_ctx;
}