675 lines
21 KiB
C
675 lines
21 KiB
C
#include <rt/atomic.h>
|
|
#include <rt/container.h>
|
|
#include <rt/context.h>
|
|
#include <rt/cycle.h>
|
|
#include <rt/idle.h>
|
|
#include <rt/interrupt.h>
|
|
#include <rt/list.h>
|
|
#include <rt/log.h>
|
|
#include <rt/mutex.h>
|
|
#include <rt/sem.h>
|
|
#include <rt/start.h>
|
|
#include <rt/syscall.h>
|
|
#include <rt/task.h>
|
|
#include <rt/tick.h>
|
|
|
|
static inline struct rt_task *task_from_list(const struct rt_list *l)
|
|
{
|
|
return rt_container_of(l, struct rt_task, list);
|
|
}
|
|
|
|
static inline struct rt_task *task_from_sleep_list(const struct rt_list *l)
|
|
{
|
|
return rt_container_of(l, struct rt_task, sleep_list);
|
|
}
|
|
|
|
static inline struct rt_mutex *mutex_from_list(const struct rt_list *l)
|
|
{
|
|
return rt_container_of(l, struct rt_mutex, list);
|
|
}
|
|
|
|
static bool task_priority_greater_than(const struct rt_list *a,
|
|
const struct rt_list *b)
|
|
{
|
|
return task_from_list(a)->priority > task_from_list(b)->priority;
|
|
}
|
|
|
|
static void insert_by_priority(struct rt_list *list, struct rt_task *task)
|
|
{
|
|
rt_list_insert_by(list, &task->list, task_priority_greater_than);
|
|
task->list_head = list;
|
|
}
|
|
|
|
#if RT_TASK_READY_CLZ_ENABLE
|
|
RT_MPU_PRIV_BSS(rt_ready_bits)
|
|
static unsigned rt_ready_bits = 0;
|
|
|
|
RT_MPU_PRIV_BSS(rt_ready_lists)
|
|
static struct rt_list rt_ready_lists[RT_TASK_MAX_PRIORITY + 1];
|
|
|
|
/* Ensure that the ready lists are initialized before any task constructors
|
|
* execute, because they will insert tasks into the ready lists. */
|
|
__attribute__((constructor(RT_TASK_CONSTRUCTOR_PRIORITY - 1))) static void
|
|
rt_init_ready_lists(void)
|
|
{
|
|
for (size_t i = 0; i <= RT_TASK_MAX_PRIORITY; ++i)
|
|
{
|
|
rt_list_init(&rt_ready_lists[i]);
|
|
}
|
|
}
|
|
#else
|
|
RT_MPU_PRIV_DATA(rt_ready_list)
|
|
static RT_LIST(rt_ready_list);
|
|
#endif
|
|
|
|
static struct rt_task *next_ready_task(void)
|
|
{
|
|
#if RT_TASK_READY_CLZ_ENABLE
|
|
if (rt_ready_bits == 0)
|
|
{
|
|
return NULL;
|
|
}
|
|
const unsigned max_ready_priority =
|
|
RT_TASK_MAX_PRIORITY - (unsigned)__builtin_clz(rt_ready_bits);
|
|
return task_from_list(rt_list_front(&rt_ready_lists[max_ready_priority]));
|
|
#else
|
|
if (rt_list_is_empty(&rt_ready_list))
|
|
{
|
|
return NULL;
|
|
}
|
|
return task_from_list(rt_list_front(&rt_ready_list));
|
|
#endif
|
|
}
|
|
|
|
static bool mutex_priority_greater_than(const struct rt_list *a,
|
|
const struct rt_list *b)
|
|
{
|
|
const struct rt_mutex *ma = mutex_from_list(a);
|
|
const struct rt_mutex *mb = mutex_from_list(b);
|
|
/* Only mutexes that have waiters should be compared. */
|
|
return task_from_list(rt_list_front(&ma->wait_list))->priority >
|
|
task_from_list(rt_list_front(&mb->wait_list))->priority;
|
|
}
|
|
|
|
static void insert_mutex_by_priority(struct rt_list *list,
|
|
struct rt_mutex *mutex)
|
|
{
|
|
rt_list_insert_by(list, &mutex->list, mutex_priority_greater_than);
|
|
}
|
|
|
|
/* rt_pending_syscalls can be updated from user code, so we don't put it
|
|
* in RT_MPU_PRIV_BSS. */
|
|
static struct rt_syscall_record *_Atomic rt_pending_syscalls = NULL;
|
|
|
|
RT_TASK(rt_idle, RT_STACK_MIN, 0);
|
|
|
|
/* rt_active_task must be readable from user code. */
|
|
static struct rt_task *rt_active_task = NULL;
|
|
|
|
void rt_task_yield(void)
|
|
{
|
|
rt_syscall();
|
|
}
|
|
|
|
struct rt_task *rt_task_self(void)
|
|
{
|
|
return rt_active_task;
|
|
}
|
|
|
|
const char *rt_task_name(void)
|
|
{
|
|
return rt_active_task->name;
|
|
}
|
|
|
|
void rt_task_ready(struct rt_task *task)
|
|
{
|
|
task->state = RT_TASK_STATE_READY;
|
|
#if RT_TASK_READY_CLZ_ENABLE
|
|
struct rt_list *list = &rt_ready_lists[task->priority];
|
|
rt_list_push_back(list, &task->list);
|
|
rt_ready_bits |= 1U << task->priority;
|
|
task->list_head = list;
|
|
#else
|
|
insert_by_priority(&rt_ready_list, task);
|
|
#endif
|
|
}
|
|
|
|
static void task_unready(struct rt_task *task)
|
|
{
|
|
rt_list_remove(&task->list);
|
|
#if RT_TASK_READY_CLZ_ENABLE
|
|
if (rt_list_is_empty(task->list_head))
|
|
{
|
|
rt_ready_bits &= ~(1U << task->priority);
|
|
}
|
|
#endif
|
|
task->list_head = NULL;
|
|
}
|
|
|
|
__attribute__((noreturn)) void rt_task_exit(void)
|
|
{
|
|
rt_logf("syscall: %s exit\n", rt_task_name());
|
|
struct rt_syscall_record exit_record = {
|
|
.op = RT_SYSCALL_TASK_EXIT,
|
|
};
|
|
rt_syscall_push(&exit_record);
|
|
rt_syscall();
|
|
|
|
/* Should not be reached. */
|
|
for (;;)
|
|
{
|
|
}
|
|
}
|
|
|
|
RT_MPU_PRIV_BSS(rt_context_prev)
|
|
void **rt_context_prev;
|
|
#if RT_MPU_ENABLE
|
|
RT_MPU_PRIV_BSS(rt_mpu_config)
|
|
struct rt_mpu_config *rt_mpu_config;
|
|
#endif
|
|
|
|
void *rt_start_context(void)
|
|
{
|
|
#if RT_CYCLE_ENABLE
|
|
rt_cycle_init();
|
|
#endif
|
|
rt_task_cycle_resume();
|
|
|
|
struct rt_task *const first_task = next_ready_task();
|
|
task_unready(first_task);
|
|
|
|
rt_active_task = first_task;
|
|
first_task->state = RT_TASK_STATE_RUNNING;
|
|
|
|
#if RT_MPU_ENABLE
|
|
rt_mpu_config = &first_task->mpu_config;
|
|
#endif
|
|
|
|
rt_logf("rt_start_context: %s with priority %u\n", rt_task_name(),
|
|
first_task->priority);
|
|
|
|
return first_task->ctx;
|
|
}
|
|
|
|
static void *sched(bool yield)
|
|
{
|
|
struct rt_task *next_task = next_ready_task();
|
|
if (!next_task)
|
|
{
|
|
/*
|
|
* Note, if a task other than the idle task is running, then the ready
|
|
* list will never be empty, because if the idle task is not running,
|
|
* then it is ready. This also means that the active task's state
|
|
* doesn't need to be checked or adjusted here, because it will always
|
|
* be RUNNING. For active tasks other than idle, the state can be
|
|
* anything at this point.
|
|
*/
|
|
rt_logf("sched: no new tasks to run, continuing %s\n", rt_task_name());
|
|
return NULL;
|
|
}
|
|
|
|
/* If the active task invoked a system call to suspend itself, its state
|
|
* will be something other than RUNNING here. */
|
|
const bool still_running = rt_active_task->state == RT_TASK_STATE_RUNNING;
|
|
|
|
/* If the active task is still running and has higher priority than the
|
|
* next task, then continue executing the active task. If not yielding,
|
|
* then also continue the active task even if the next task has equal
|
|
* priority. */
|
|
if (still_running &&
|
|
((rt_active_task->priority > next_task->priority) ||
|
|
(!yield && (rt_active_task->priority == next_task->priority))))
|
|
{
|
|
rt_logf("sched: %s is still highest priority (%u %s %u)\n",
|
|
rt_task_name(), rt_active_task->priority, yield ? ">" : "≥",
|
|
next_task->priority);
|
|
return NULL;
|
|
}
|
|
|
|
/* The next task will be used, so remove it from the corresponding ready
|
|
* list and clear the ready bit for its priority if necessary. */
|
|
task_unready(next_task);
|
|
|
|
/* If a task made a system call to suspend itself but was then woken up by
|
|
* its own or another system call and is still the highest priority task,
|
|
* it should continue running, so don't context switch. */
|
|
if (rt_active_task == next_task)
|
|
{
|
|
rt_logf("sched: %s was suspended and reawakened\n", rt_task_name());
|
|
rt_active_task->state = RT_TASK_STATE_RUNNING;
|
|
return NULL;
|
|
}
|
|
|
|
/* If the active task is still running but we are switching to a new task,
|
|
* add the active task to the ready list and mark it as READY. */
|
|
if (still_running)
|
|
{
|
|
rt_logf("sched: %s is still runnable\n", rt_task_name());
|
|
rt_task_ready(rt_active_task);
|
|
}
|
|
|
|
rt_context_prev = &rt_active_task->ctx;
|
|
next_task->state = RT_TASK_STATE_RUNNING;
|
|
rt_active_task = next_task;
|
|
|
|
#if RT_MPU_ENABLE
|
|
rt_mpu_config = &rt_active_task->mpu_config;
|
|
#endif
|
|
|
|
rt_logf("sched: switching to %s with priority %u\n", rt_task_name(),
|
|
rt_active_task->priority);
|
|
|
|
return rt_active_task->ctx;
|
|
}
|
|
|
|
/*
|
|
* These globals may only be manipulated in the system call handler.
|
|
*/
|
|
RT_MPU_PRIV_BSS(rt_woken_tick)
|
|
static unsigned long rt_woken_tick;
|
|
|
|
static bool wake_tick_less_than(const struct rt_list *a,
|
|
const struct rt_list *b)
|
|
{
|
|
return (task_from_sleep_list(a)->wake_tick - rt_woken_tick) <
|
|
(task_from_sleep_list(b)->wake_tick - rt_woken_tick);
|
|
}
|
|
|
|
RT_MPU_PRIV_DATA(rt_sleep_list)
|
|
static RT_LIST(rt_sleep_list);
|
|
|
|
static void sleep_until(struct rt_task *task, unsigned long wake_tick)
|
|
{
|
|
task->wake_tick = wake_tick;
|
|
rt_list_insert_by(&rt_sleep_list, &task->sleep_list, wake_tick_less_than);
|
|
}
|
|
|
|
static void wake_sem_waiters(struct rt_sem *sem)
|
|
{
|
|
int waiters = -rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
|
|
if (waiters < 0)
|
|
{
|
|
waiters = 0;
|
|
}
|
|
while (sem->num_waiters > (size_t)waiters)
|
|
{
|
|
struct rt_task *const task =
|
|
task_from_list(rt_list_front(&sem->wait_list));
|
|
rt_list_remove(&task->list);
|
|
rt_list_remove(&task->sleep_list);
|
|
rt_task_ready(task);
|
|
--sem->num_waiters;
|
|
}
|
|
}
|
|
|
|
static void wake_mutex_waiter(struct rt_mutex *mutex)
|
|
{
|
|
if (rt_list_is_empty(&mutex->wait_list))
|
|
{
|
|
/* If the mutex has no waiters, there's nothing to do. */
|
|
return;
|
|
}
|
|
|
|
/* Attempt to acquire the mutex on behalf of the first waiter. */
|
|
struct rt_task *const task =
|
|
task_from_list(rt_list_front(&mutex->wait_list));
|
|
|
|
const bool has_new_holder = rt_mutex_trylock_with_task(mutex, task);
|
|
|
|
if (has_new_holder)
|
|
{
|
|
rt_list_remove(&task->list);
|
|
rt_list_remove(&task->sleep_list);
|
|
task->blocking_mutex = NULL;
|
|
rt_task_ready(task);
|
|
}
|
|
|
|
if (!rt_list_is_empty(&mutex->wait_list))
|
|
{
|
|
/* If the mutex still has waiters, we need to set the waiter bit so the
|
|
* new holder will make a system call on unlock. */
|
|
rt_atomic_fetch_or(&mutex->holder, RT_MUTEX_WAITER_MASK,
|
|
RT_ATOMIC_RELAXED);
|
|
if (has_new_holder)
|
|
{
|
|
insert_mutex_by_priority(&task->mutex_list, mutex);
|
|
/* The new holder is the highest priority among these waiters, so
|
|
* recalculating the donated priority here is not necessary, but one
|
|
* of the waiters may have priority donated to it in the future. */
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Update the task's donated priority based on the mutexes it holds, and return
|
|
* whether the task's effective priority changed. */
|
|
static bool task_donate(struct rt_task *task)
|
|
{
|
|
/* Recalculate the task's priority starting from its base priority. */
|
|
unsigned int priority = task->base_priority;
|
|
|
|
/* If the task is holding any donating mutexes, donate the highest priority
|
|
* among them to this task if necessary. */
|
|
if (!rt_list_is_empty(&task->mutex_list))
|
|
{
|
|
struct rt_mutex *const next_mutex =
|
|
mutex_from_list(rt_list_front(&task->mutex_list));
|
|
const unsigned int donated_priority =
|
|
task_from_list(rt_list_front(&next_mutex->wait_list))->priority;
|
|
if (priority < donated_priority)
|
|
{
|
|
priority = donated_priority;
|
|
}
|
|
}
|
|
|
|
if (priority == task->priority)
|
|
{
|
|
/* The task priority didn't change; nothing else to do. */
|
|
return false;
|
|
}
|
|
|
|
/* If the task's priority changed and it is in a wait list or a ready list,
|
|
* re-insert it by its new priority. */
|
|
if (task->state == RT_TASK_STATE_READY)
|
|
{
|
|
task_unready(task);
|
|
task->priority = priority;
|
|
rt_task_ready(task);
|
|
}
|
|
else
|
|
{
|
|
/* The task might be the active task (e.g., if its priority is being
|
|
* lowered after unlocking a mutex). */
|
|
task->priority = priority;
|
|
if (task->list_head != NULL)
|
|
{
|
|
rt_list_remove(&task->list);
|
|
insert_by_priority(task->list_head, task);
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static void mutex_donate(struct rt_mutex *mutex)
|
|
{
|
|
do
|
|
{
|
|
const uintptr_t holder =
|
|
rt_atomic_load(&mutex->holder, RT_ATOMIC_RELAXED) &
|
|
~RT_MUTEX_WAITER_MASK;
|
|
if (holder == 0)
|
|
{
|
|
/* If the mutex is not held then no donation is needed. */
|
|
return;
|
|
}
|
|
|
|
struct rt_task *const task = (struct rt_task *)holder;
|
|
|
|
if (!rt_list_is_empty(&mutex->wait_list))
|
|
{
|
|
/* Re-sort the mutex in the holder's mutex list. */
|
|
rt_list_remove(&mutex->list);
|
|
insert_mutex_by_priority(&task->mutex_list, mutex);
|
|
}
|
|
|
|
/* Update the holder's priority. If it didn't change, we're done. */
|
|
if (!task_donate(task))
|
|
{
|
|
return;
|
|
}
|
|
|
|
/* If the holder changed priority and is itself blocked on another
|
|
* mutex, we need to propagate the new priority to that mutex. */
|
|
mutex = task->blocking_mutex;
|
|
} while (mutex != NULL);
|
|
}
|
|
|
|
static void tick_syscall(void)
|
|
{
|
|
const unsigned long ticks_to_advance = rt_tick_count() - rt_woken_tick;
|
|
while (!rt_list_is_empty(&rt_sleep_list))
|
|
{
|
|
struct rt_task *const task =
|
|
task_from_sleep_list(rt_list_front(&rt_sleep_list));
|
|
if (ticks_to_advance < (task->wake_tick - rt_woken_tick))
|
|
{
|
|
break;
|
|
}
|
|
|
|
/* Check if the task is blocked on a timed operation. */
|
|
if (!rt_list_is_empty(&task->list))
|
|
{
|
|
/* Unblock the task. */
|
|
rt_list_remove(&task->list);
|
|
if (task->blocking_mutex != NULL)
|
|
{
|
|
/* If the task was blocked on a mutex_timedlock, remove it from
|
|
* the mutex's wait list and re-calculate donated priorities. */
|
|
struct rt_mutex *const mutex = task->blocking_mutex;
|
|
task->blocking_mutex = NULL;
|
|
/* If the mutex now has no waiters, clear the waiter bit and
|
|
* remove it from the holder's mutex list. */
|
|
if (rt_list_is_empty(&mutex->wait_list))
|
|
{
|
|
rt_atomic_fetch_and(&mutex->holder, ~RT_MUTEX_WAITER_MASK,
|
|
RT_ATOMIC_RELAXED);
|
|
rt_list_remove(&mutex->list);
|
|
}
|
|
mutex_donate(mutex);
|
|
*task->timeout_ptr.mutex = NULL;
|
|
task->timeout_ptr.mutex = NULL;
|
|
}
|
|
else
|
|
{
|
|
/* If the waking task was blocked on a sem_timedwait, remove it
|
|
* from the semaphore's wait list. */
|
|
struct rt_sem *const sem = *task->timeout_ptr.sem;
|
|
rt_sem_add_n(sem, 1);
|
|
--sem->num_waiters;
|
|
/* TODO: Is wake_sem_waiters necessary here?
|
|
* Example: Two tasks are waiting on a semaphore and one
|
|
* times out and a post occurs at the same time. Will the task
|
|
* that didn't time out always wake without this? */
|
|
wake_sem_waiters(sem);
|
|
*task->timeout_ptr.sem = NULL;
|
|
task->timeout_ptr.sem = NULL;
|
|
}
|
|
}
|
|
rt_list_remove(&task->sleep_list);
|
|
rt_task_ready(task);
|
|
}
|
|
rt_woken_tick += ticks_to_advance;
|
|
}
|
|
|
|
/* Unprivileged tasks need to read the tick count. */
|
|
static rt_atomic_ulong rt_tick;
|
|
|
|
RT_MPU_PRIV_BSS(rt_tick_pending)
|
|
static rt_atomic_flag rt_tick_pending;
|
|
|
|
void rt_tick_advance(void)
|
|
{
|
|
const unsigned long old_tick =
|
|
rt_atomic_fetch_add(&rt_tick, 1, RT_ATOMIC_RELAXED);
|
|
|
|
RT_MPU_PRIV_DATA(rt_tick_record)
|
|
static struct rt_syscall_record rt_tick_record = {
|
|
.next = NULL,
|
|
.op = RT_SYSCALL_TICK,
|
|
};
|
|
|
|
if (!rt_atomic_flag_test_and_set(&rt_tick_pending, RT_ATOMIC_ACQUIRE))
|
|
{
|
|
(void)old_tick;
|
|
rt_logf("syscall: tick %lu\n", old_tick + 1);
|
|
rt_syscall_push(&rt_tick_record);
|
|
rt_syscall_pend();
|
|
}
|
|
}
|
|
|
|
unsigned long rt_tick_count(void)
|
|
{
|
|
return rt_atomic_load(&rt_tick, RT_ATOMIC_RELAXED);
|
|
}
|
|
|
|
void rt_syscall_push(struct rt_syscall_record *record)
|
|
{
|
|
record->next = rt_atomic_load(&rt_pending_syscalls, RT_ATOMIC_RELAXED);
|
|
while (!rt_atomic_compare_exchange_weak(&rt_pending_syscalls, &record->next,
|
|
record, RT_ATOMIC_RELEASE,
|
|
RT_ATOMIC_RELAXED))
|
|
{
|
|
}
|
|
}
|
|
|
|
void rt_task_cycle_pause(void)
|
|
{
|
|
#if RT_TASK_CYCLE_ENABLE
|
|
/* TODO: Make this safe to call from any interrupt. */
|
|
const uint32_t task_cycles = rt_cycle() - rt_active_task->start_cycle;
|
|
rt_active_task->total_cycles += task_cycles;
|
|
#endif
|
|
}
|
|
|
|
void rt_task_cycle_resume(void)
|
|
{
|
|
#if RT_TASK_CYCLE_ENABLE
|
|
rt_active_task->start_cycle = rt_cycle();
|
|
#endif
|
|
}
|
|
|
|
void *rt_syscall_run(void)
|
|
{
|
|
rt_task_cycle_pause();
|
|
|
|
/*
|
|
* Take all elements on the pending syscall stack at once. Syscalls added
|
|
* after this step will be on a new stack.
|
|
*/
|
|
struct rt_syscall_record *record =
|
|
rt_atomic_exchange(&rt_pending_syscalls, NULL, RT_ATOMIC_ACQUIRE);
|
|
bool yield = record == NULL;
|
|
while (record != NULL)
|
|
{
|
|
/* Store the next record in the list now because some syscall records
|
|
* may be re-enabled immediately after they are handled. */
|
|
struct rt_syscall_record *next_record = record->next;
|
|
switch (record->op)
|
|
{
|
|
case RT_SYSCALL_TICK:
|
|
rt_atomic_flag_clear(&rt_tick_pending, RT_ATOMIC_RELEASE);
|
|
tick_syscall();
|
|
yield = true;
|
|
break;
|
|
case RT_SYSCALL_TASK_SLEEP:
|
|
{
|
|
const unsigned long ticks = record->args.task_sleep.ticks;
|
|
rt_active_task->state = RT_TASK_STATE_ASLEEP;
|
|
sleep_until(rt_active_task, rt_woken_tick + ticks);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_TASK_SLEEP_PERIODIC:
|
|
{
|
|
const unsigned long
|
|
last_wake_tick =
|
|
record->args.task_sleep_periodic.last_wake_tick,
|
|
period = record->args.task_sleep_periodic.period,
|
|
ticks_since_last_wake = rt_woken_tick - last_wake_tick;
|
|
/* If there have been at least as many ticks as the period since the
|
|
* last wake, then the desired wake up tick has already occurred. */
|
|
if (ticks_since_last_wake < period)
|
|
{
|
|
rt_active_task->state = RT_TASK_STATE_ASLEEP;
|
|
sleep_until(rt_active_task, last_wake_tick + period);
|
|
}
|
|
break;
|
|
}
|
|
case RT_SYSCALL_SEM_WAIT:
|
|
{
|
|
struct rt_sem *const sem = record->args.sem_wait.sem;
|
|
rt_active_task->state = RT_TASK_STATE_BLOCKED;
|
|
insert_by_priority(&sem->wait_list, rt_active_task);
|
|
++sem->num_waiters;
|
|
/* Evaluate semaphore wakes here as well in case a post occurred
|
|
* before the wait syscall was handled. */
|
|
wake_sem_waiters(sem);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_SEM_TIMEDWAIT:
|
|
{
|
|
struct rt_sem *const sem = record->args.sem_timedwait.sem;
|
|
const unsigned long ticks = record->args.sem_timedwait.ticks;
|
|
rt_active_task->state = RT_TASK_STATE_BLOCKED_TIMEOUT;
|
|
rt_active_task->timeout_ptr.sem = &record->args.sem_timedwait.sem;
|
|
insert_by_priority(&sem->wait_list, rt_active_task);
|
|
sleep_until(rt_active_task, rt_woken_tick + ticks);
|
|
++sem->num_waiters;
|
|
wake_sem_waiters(sem);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_SEM_POST:
|
|
{
|
|
struct rt_sem *const sem = record->args.sem_post.sem;
|
|
/* Allow another post syscall from an interrupt to occur while
|
|
* wakes are evaluated so that no posts are missed. */
|
|
if (record == &sem->post_record)
|
|
{
|
|
rt_atomic_flag_clear(&sem->post_pending, RT_ATOMIC_RELEASE);
|
|
}
|
|
rt_sem_add_n(sem, record->args.sem_post.n);
|
|
wake_sem_waiters(sem);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_MUTEX_LOCK:
|
|
{
|
|
struct rt_mutex *const mutex = record->args.mutex_lock.mutex;
|
|
rt_active_task->state = RT_TASK_STATE_BLOCKED;
|
|
rt_active_task->blocking_mutex = mutex;
|
|
insert_by_priority(&mutex->wait_list, rt_active_task);
|
|
/* When adding a new waiter, we must donate its priority to the
|
|
* task holding the mutex, and transitively to any mutexes that
|
|
* task is blocked on. */
|
|
mutex_donate(mutex);
|
|
wake_mutex_waiter(mutex);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_MUTEX_TIMEDLOCK:
|
|
{
|
|
struct rt_mutex *const mutex = record->args.mutex_timedlock.mutex;
|
|
const unsigned long ticks = record->args.mutex_timedlock.ticks;
|
|
rt_active_task->state = RT_TASK_STATE_BLOCKED_TIMEOUT;
|
|
rt_active_task->blocking_mutex = mutex;
|
|
rt_active_task->timeout_ptr.mutex =
|
|
&record->args.mutex_timedlock.mutex;
|
|
insert_by_priority(&mutex->wait_list, rt_active_task);
|
|
sleep_until(rt_active_task, rt_woken_tick + ticks);
|
|
mutex_donate(mutex);
|
|
wake_mutex_waiter(mutex);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_MUTEX_UNLOCK:
|
|
{
|
|
struct rt_mutex *const mutex = record->args.mutex_unlock.mutex;
|
|
rt_atomic_store(&mutex->holder, 0, RT_ATOMIC_RELEASE);
|
|
rt_list_remove(&mutex->list);
|
|
/* When unlocking, the only donated priority that can change is the
|
|
* unlocking task's. */
|
|
task_donate(rt_active_task);
|
|
wake_mutex_waiter(mutex);
|
|
break;
|
|
}
|
|
case RT_SYSCALL_TASK_READY:
|
|
rt_task_ready(record->args.task_ready.task);
|
|
break;
|
|
case RT_SYSCALL_TASK_EXIT:
|
|
rt_active_task->state = RT_TASK_STATE_EXITED;
|
|
break;
|
|
}
|
|
record = next_record;
|
|
}
|
|
|
|
void *const new_ctx = sched(yield);
|
|
rt_task_cycle_resume();
|
|
return new_ctx;
|
|
}
|