rt/src/rt.c

644 lines
20 KiB
C

#include <rt/atomic.h>
#include <rt/container.h>
#include <rt/context.h>
#include <rt/cycle.h>
#include <rt/idle.h>
#include <rt/list.h>
#include <rt/log.h>
#include <rt/mpu.h>
#include <rt/mutex.h>
#include <rt/sem.h>
#include <rt/start.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <rt/trap.h>
static inline struct rt_task *task_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, list);
}
static inline struct rt_task *task_from_sleep_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, sleep_list);
}
static inline struct rt_mutex *mutex_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_mutex, list);
}
static bool task_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return task_from_list(a)->priority < task_from_list(b)->priority;
}
static void insert_by_priority(struct rt_list *list, struct rt_task *task)
{
rt_list_insert_by(list, &task->list, task_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_ready_bits)
static uint32_t rt_ready_bits = 0;
RT_MPU_PRIV_BSS(rt_ready_lists)
static struct rt_list *rt_ready_lists[RT_TASK_PRIORITY_MAX + 1];
static uint32_t min_ready_priority(void)
{
#if RT_TASK_READY_CTZ_ENABLE
return (uint32_t)__builtin_ctz(rt_ready_bits);
#else // !RT_TASK_READY_CTZ_ENABLE
static const unsigned char debruijn_ctz[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
};
const uint32_t min_bit = rt_ready_bits & -rt_ready_bits;
return debruijn_ctz[(min_bit * 0x077CB531U) >> 27];
#endif // RT_TASK_READY_CTZ_ENABLE
}
static bool mutex_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
const struct rt_mutex *const ma = mutex_from_list(a);
const struct rt_mutex *const mb = mutex_from_list(b);
// Only mutexes that have waiters should be compared.
return task_from_list(rt_list_front(&ma->wait_list))->priority <
task_from_list(rt_list_front(&mb->wait_list))->priority;
}
static void insert_mutex_by_priority(struct rt_list *list,
struct rt_mutex *mutex)
{
rt_list_insert_by(list, &mutex->list, mutex_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_pending_syscalls)
static rt_atomic(struct rt_syscall_record *) rt_pending_syscalls = NULL;
RT_TASK(rt_idle, RT_STACK_MIN, RT_TASK_PRIORITY_IDLE);
/* rt_active_task must be readable from user code.
* Task structures themselves are privileged. */
static struct rt_task *rt_active_task = NULL;
void rt_task_yield(void)
{
rt_syscall(NULL);
}
struct rt_task *rt_task_self(void)
{
return rt_active_task;
}
const char *rt_task_name(void)
{
return rt_active_task->name;
}
void rt_task_ready(struct rt_task *task)
{
task->state = RT_TASK_STATE_READY;
struct rt_list *const list = rt_ready_lists[task->priority];
if (list == NULL)
{
rt_ready_lists[task->priority] = &task->list;
rt_ready_bits |= 1U << task->priority;
}
else
{
rt_list_push_back(list, &task->list);
}
}
static void task_unready(struct rt_task *task)
{
if (rt_list_is_empty(&task->list))
{
rt_ready_lists[task->priority] = NULL;
rt_ready_bits &= ~(1U << task->priority);
}
else
{
if (rt_ready_lists[task->priority] == &task->list)
{
rt_ready_lists[task->priority] = task->list.next;
}
rt_list_remove(&task->list);
}
}
static void task_wait(struct rt_task *task, struct rt_list *list)
{
task_unready(task);
insert_by_priority(list, task);
task->wait_list_head = list;
}
__attribute__((noreturn)) void rt_task_exit(void)
{
rt_logf("syscall: %s exit\n", rt_task_name());
struct rt_syscall_record exit_record;
exit_record.op = RT_SYSCALL_TASK_EXIT;
rt_syscall(&exit_record);
// Should not be reached.
rt_trap();
}
RT_MPU_PRIV_BSS(rt_context_prev)
void **rt_context_prev;
#if RT_MPU_ENABLE
RT_MPU_PRIV_BSS(rt_mpu_config)
struct rt_mpu_config *rt_mpu_config;
#endif
void *rt_start_context(void)
{
#if RT_CYCLE_ENABLE
rt_cycle_init();
#endif
rt_task_cycle_resume();
// Initially all tasks are ready, including the idle task.
struct rt_task *const first_task =
task_from_list(rt_ready_lists[min_ready_priority()]);
first_task->state = RT_TASK_STATE_RUNNING;
rt_active_task = first_task;
#if RT_MPU_ENABLE
rt_mpu_config = &first_task->mpu_config;
#endif
rt_logf("rt_start_context: %s with priority %u\n", rt_task_name(),
first_task->priority);
return first_task->ctx;
}
static void *sched(bool yield)
{
struct rt_list **const list = &rt_ready_lists[min_ready_priority()];
if (yield)
{
task_from_list(*list)->state = RT_TASK_STATE_READY;
*list = (*list)->next;
}
struct rt_task *const next_task = task_from_list(*list);
next_task->state = RT_TASK_STATE_RUNNING;
if (next_task == rt_active_task)
{
// The same task should still run, so no context switch is required.
return NULL;
}
rt_context_prev = &rt_active_task->ctx;
rt_active_task = next_task;
#if RT_MPU_ENABLE
rt_mpu_config = &next_task->mpu_config;
#endif
rt_logf("sched: switching to %s with priority %u\n", rt_task_name(),
next_task->priority);
return next_task->ctx;
}
RT_MPU_PRIV_BSS(rt_woken_tick)
static unsigned long rt_woken_tick;
static bool wake_tick_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return (task_from_sleep_list(a)->wake_tick - rt_woken_tick) <
(task_from_sleep_list(b)->wake_tick - rt_woken_tick);
}
RT_MPU_PRIV_DATA(rt_sleep_list)
static RT_LIST(rt_sleep_list);
static void sleep_until(struct rt_task *task, unsigned long wake_tick)
{
task->wake_tick = wake_tick;
rt_list_insert_by(&rt_sleep_list, &task->sleep_list, wake_tick_less_than);
}
static void wake_sem_waiters(struct rt_sem *sem)
{
int waiters = -rt_atomic_load(&sem->value, RT_ATOMIC_ACQUIRE);
if (waiters < 0)
{
waiters = 0;
}
while (sem->num_waiters > (size_t)waiters)
{
struct rt_task *const task =
task_from_list(rt_list_front(&sem->wait_list));
rt_list_remove(&task->list);
rt_list_remove(&task->sleep_list);
task->wait_list_head = NULL;
rt_task_ready(task);
--sem->num_waiters;
}
}
static void wake_mutex_waiter(struct rt_mutex *mutex)
{
if (rt_list_is_empty(&mutex->wait_list))
{
// If the mutex has no waiters, there's nothing to do.
return;
}
// Attempt to acquire the mutex on behalf of the first waiter.
struct rt_task *const task =
task_from_list(rt_list_front(&mutex->wait_list));
const bool has_new_holder = rt_mutex_trylock_with_task(mutex, task);
if (has_new_holder)
{
rt_list_remove(&task->list);
rt_list_remove(&task->sleep_list);
task->wait_list_head = NULL;
task->blocking_mutex = NULL;
rt_task_ready(task);
}
if (!rt_list_is_empty(&mutex->wait_list))
{
/* If the mutex still has waiters, we need to set the waited bit so the
* new holder will make a system call on unlock. */
rt_atomic_fetch_or(&mutex->holder, RT_MUTEX_WAITED_MASK,
RT_ATOMIC_RELAXED);
if (has_new_holder)
{
insert_mutex_by_priority(&task->mutex_list, mutex);
/* The new holder is the highest priority among these waiters, so
* recalculating the donated priority here is not necessary, but one
* of the waiters may have priority donated to it in the future. */
}
}
}
/* Update the task's donated priority based on the mutexes it holds, and return
* whether the task's effective priority changed. */
static bool task_donate(struct rt_task *task)
{
// Recalculate the task's priority starting from its base priority.
uint32_t priority = task->base_priority;
/* If the task is holding any donating mutexes, donate the highest priority
* among them to this task if necessary. */
if (!rt_list_is_empty(&task->mutex_list))
{
struct rt_mutex *const next_mutex =
mutex_from_list(rt_list_front(&task->mutex_list));
const uint32_t donated_priority =
task_from_list(rt_list_front(&next_mutex->wait_list))->priority;
if (priority > donated_priority)
{
priority = donated_priority;
}
}
if (priority == task->priority)
{
// The task priority didn't change; nothing else to do.
return false;
}
/* If the task's priority changed and it is in a ready list, re-insert it
* by its new priority. */
if ((task->state == RT_TASK_STATE_RUNNING) ||
(task->state == RT_TASK_STATE_READY))
{
task_unready(task);
task->priority = priority;
rt_task_ready(task);
}
else
{
task->priority = priority;
// If the task is in a wait list, re-insert it by its new priority.
if (task->wait_list_head != NULL)
{
rt_list_remove(&task->list);
insert_by_priority(task->wait_list_head, task);
}
}
return true;
}
static void mutex_donate(struct rt_mutex *mutex)
{
do
{
struct rt_task *holder = rt_mutex_holder(mutex);
if (holder == NULL)
{
// If the mutex is not held then no donation is needed.
return;
}
if (!rt_list_is_empty(&mutex->wait_list))
{
// Re-sort the mutex in the holder's mutex list.
rt_list_remove(&mutex->list);
insert_mutex_by_priority(&holder->mutex_list, mutex);
}
// Update the holder's priority. If it didn't change, we're done.
if (!task_donate(holder))
{
return;
}
/* If the holder changed priority and is itself blocked on another
* mutex, we need to propagate the new priority to that mutex. */
mutex = holder->blocking_mutex;
} while (mutex != NULL);
}
static void tick_syscall(void)
{
const unsigned long ticks_to_advance = rt_tick_count() - rt_woken_tick;
while (!rt_list_is_empty(&rt_sleep_list))
{
struct rt_task *const task =
task_from_sleep_list(rt_list_front(&rt_sleep_list));
if (ticks_to_advance < (task->wake_tick - rt_woken_tick))
{
break;
}
// Check if the task is blocked on a timed operation.
if (task->wait_list_head != NULL)
{
// Unblock the task.
rt_list_remove(&task->list);
task->wait_list_head = NULL;
if (task->blocking_mutex != NULL)
{
/* If the task was blocked on a mutex_timedlock, remove it from
* the mutex's wait list and re-calculate donated priorities. */
struct rt_mutex *const mutex = task->blocking_mutex;
task->blocking_mutex = NULL;
/* If the mutex now has no waiters, clear the waited bit and
* remove it from the holder's mutex list. */
if (rt_list_is_empty(&mutex->wait_list))
{
rt_atomic_fetch_and(&mutex->holder, RT_MUTEX_HOLDER_MASK,
RT_ATOMIC_RELAXED);
rt_list_remove(&mutex->list);
}
mutex_donate(mutex);
*task->timeout_ptr.mutex = NULL;
task->timeout_ptr.mutex = NULL;
}
else
{
/* If the waking task was blocked on a sem_timedwait, remove it
* from the semaphore's wait list. */
struct rt_sem *const sem = *task->timeout_ptr.sem;
rt_sem_add_n(sem, 1);
--sem->num_waiters;
/* TODO: Is wake_sem_waiters necessary here?
* Example: Two tasks are waiting on a semaphore and one
* times out and a post occurs at the same time. Will the task
* that didn't time out always wake without this? */
wake_sem_waiters(sem);
*task->timeout_ptr.sem = NULL;
task->timeout_ptr.sem = NULL;
}
}
rt_list_remove(&task->sleep_list);
rt_task_ready(task);
}
rt_woken_tick += ticks_to_advance;
}
// Unprivileged tasks need to read the tick count.
static rt_atomic_ulong rt_tick;
RT_MPU_PRIV_BSS(rt_tick_pending)
static rt_atomic_flag rt_tick_pending;
void rt_tick_advance(void)
{
const unsigned long old_tick =
rt_atomic_fetch_add(&rt_tick, 1, RT_ATOMIC_RELAXED);
RT_MPU_PRIV_DATA(rt_tick_record)
static struct rt_syscall_record rt_tick_record = {
.op = RT_SYSCALL_TICK,
};
if (!rt_atomic_flag_test_and_set(&rt_tick_pending, RT_ATOMIC_ACQUIRE))
{
(void)old_tick;
rt_logf("syscall: tick %lu\n", old_tick + 1);
rt_syscall_push(&rt_tick_record);
rt_syscall_pend();
}
}
unsigned long rt_tick_count(void)
{
return rt_atomic_load(&rt_tick, RT_ATOMIC_RELAXED);
}
void rt_syscall_push(struct rt_syscall_record *record)
{
record->next = rt_atomic_load(&rt_pending_syscalls, RT_ATOMIC_RELAXED);
while (!rt_atomic_compare_exchange_weak(&rt_pending_syscalls, &record->next,
record, RT_ATOMIC_RELEASE,
RT_ATOMIC_RELAXED))
{
}
}
void rt_task_cycle_pause(void)
{
#if RT_TASK_CYCLE_ENABLE
// TODO: Make this safe to call from any interrupt.
const uint32_t task_cycles = rt_cycle() - rt_active_task->start_cycle;
rt_active_task->total_cycles += task_cycles;
#endif
}
void rt_task_cycle_resume(void)
{
#if RT_TASK_CYCLE_ENABLE
rt_active_task->start_cycle = rt_cycle();
#endif
}
static void rt_syscall_exec(struct rt_syscall_record *record)
{
switch (record->op)
{
case RT_SYSCALL_TICK:
rt_atomic_flag_clear(&rt_tick_pending, RT_ATOMIC_RELEASE);
tick_syscall();
break;
case RT_SYSCALL_TASK_SLEEP:
{
const unsigned long ticks = record->args.task_sleep.ticks;
rt_active_task->state = RT_TASK_STATE_ASLEEP;
task_unready(rt_active_task);
sleep_until(rt_active_task, rt_woken_tick + ticks);
break;
}
case RT_SYSCALL_TASK_SLEEP_PERIODIC:
{
const unsigned long last_wake_tick =
record->args.task_sleep_periodic.last_wake_tick,
period = record->args.task_sleep_periodic.period,
ticks_since_last_wake =
rt_woken_tick - last_wake_tick;
/* If there have been at least as many ticks as the period since the
* last wake, then the desired wake up tick has already occurred. */
if (ticks_since_last_wake < period)
{
rt_active_task->state = RT_TASK_STATE_ASLEEP;
task_unready(rt_active_task);
sleep_until(rt_active_task, last_wake_tick + period);
}
break;
}
case RT_SYSCALL_SEM_WAIT:
{
struct rt_sem *const sem = record->args.sem_wait.sem;
rt_active_task->state = RT_TASK_STATE_BLOCKED;
task_wait(rt_active_task, &sem->wait_list);
++sem->num_waiters;
/* Evaluate semaphore wakes here as well in case a post occurred
* before the wait syscall was handled. */
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_TIMEDWAIT:
{
struct rt_sem *const sem = record->args.sem_timedwait.sem;
const unsigned long ticks = record->args.sem_timedwait.ticks;
rt_active_task->state = RT_TASK_STATE_BLOCKED_TIMEOUT;
rt_active_task->timeout_ptr.sem = &record->args.sem_timedwait.sem;
task_wait(rt_active_task, &sem->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
++sem->num_waiters;
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_POST:
{
struct rt_sem *const sem = record->args.sem_post.sem;
/* Allow another post syscall from an interrupt to occur while
* wakes are evaluated so that no posts are missed. */
if (record == &sem->post_record)
{
rt_atomic_flag_clear(&sem->post_pending, RT_ATOMIC_RELEASE);
}
rt_sem_add_n(sem, record->args.sem_post.n);
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_MUTEX_LOCK:
{
struct rt_mutex *const mutex = record->args.mutex_lock.mutex;
rt_active_task->state = RT_TASK_STATE_BLOCKED;
rt_active_task->blocking_mutex = mutex;
task_wait(rt_active_task, &mutex->wait_list);
/* When adding a new waiter, we must donate its priority to the
* task holding the mutex, and transitively to any mutexes that
* task is blocked on. */
mutex_donate(mutex);
/* Wake is required here and in TIMEDLOCK because the mutex might have
* been unlocked after the fast path failed but before the lock syscall
* was made, if an unrelated context switch to the holder occurs and
* the holder unlocks. The holder will see that the mutex has a pending
* waiter and invoke an unlock syscall, but this task won't be on the
* wait list yet, so we have to re-attempt to wake up here. */
wake_mutex_waiter(mutex);
break;
}
case RT_SYSCALL_MUTEX_TIMEDLOCK:
{
struct rt_mutex *const mutex = record->args.mutex_timedlock.mutex;
const unsigned long ticks = record->args.mutex_timedlock.ticks;
rt_active_task->state = RT_TASK_STATE_BLOCKED_TIMEOUT;
rt_active_task->blocking_mutex = mutex;
rt_active_task->timeout_ptr.mutex = &record->args.mutex_timedlock.mutex;
task_wait(rt_active_task, &mutex->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
mutex_donate(mutex);
wake_mutex_waiter(mutex);
break;
}
case RT_SYSCALL_MUTEX_UNLOCK:
{
struct rt_mutex *const mutex = record->args.mutex_unlock.mutex;
rt_atomic_store(&mutex->holder, 0, RT_ATOMIC_RELEASE);
rt_list_remove(&mutex->list);
/* When unlocking, the only donated priority that can change is the
* unlocking task's, because the task isn't blocked waiting on any
* other mutex, otherwise it wouldn't be running. */
task_donate(rt_active_task);
wake_mutex_waiter(mutex);
break;
}
case RT_SYSCALL_TASK_READY:
rt_task_ready(record->args.task_ready.task);
break;
case RT_SYSCALL_TASK_EXIT:
rt_active_task->state = RT_TASK_STATE_EXITED;
break;
}
}
void *rt_syscall_run(struct rt_syscall_record *record)
{
rt_task_cycle_pause();
const bool yield = record == NULL;
if (!yield)
{
rt_syscall_exec(record);
}
void *const new_ctx = sched(yield);
rt_task_cycle_resume();
return new_ctx;
}
void *rt_syscall_run_pending(void)
{
rt_task_cycle_pause();
/* Take all elements on the pending syscall stack at once. Syscalls added
* after this step will be on a new stack. */
struct rt_syscall_record *record =
rt_atomic_exchange(&rt_pending_syscalls, NULL, RT_ATOMIC_ACQUIRE);
bool yield = false;
while (record != NULL)
{
/* Store the next record in the list now because some syscall records
* may be re-enabled immediately after they are handled. */
struct rt_syscall_record *next_record = record->next;
if (record->op == RT_SYSCALL_TICK)
{
yield = true;
}
rt_syscall_exec(record);
record = next_record;
}
void *const new_ctx = sched(yield);
rt_task_cycle_resume();
return new_ctx;
}