99 lines
2.9 KiB
C
99 lines
2.9 KiB
C
#include <rt/mutex.h>
|
|
|
|
#include <rt/assert.h>
|
|
#include <rt/interrupt.h>
|
|
#include <rt/log.h>
|
|
#include <rt/syscall.h>
|
|
#include <rt/task.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
void rt_mutex_init(struct rt_mutex *mutex)
|
|
{
|
|
rt_list_init(&mutex->wait_list);
|
|
rt_list_init(&mutex->list);
|
|
rt_atomic_store(&mutex->holder, RT_MUTEX_UNLOCKED, RT_ATOMIC_RELEASE);
|
|
}
|
|
|
|
static bool trylock(struct rt_mutex *mutex, uintptr_t new_holder)
|
|
{
|
|
uintptr_t e = RT_MUTEX_UNLOCKED;
|
|
if (rt_atomic_compare_exchange(&mutex->holder, &e, new_holder,
|
|
RT_ATOMIC_ACQUIRE, RT_ATOMIC_RELAXED))
|
|
{
|
|
rt_logf("%s mutex lock success\n",
|
|
new_holder == RT_MUTEX_HOLDER_INTERRUPT
|
|
? "interrupt"
|
|
: ((const struct rt_task *)new_holder)->name);
|
|
return true;
|
|
}
|
|
if ((e & RT_MUTEX_HOLDER_MASK) == new_holder)
|
|
{
|
|
// Already holding this mutex.
|
|
rt_assert(mutex->level >= 0,
|
|
"locked a non-recursive mutex more than once");
|
|
++mutex->level;
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static inline uintptr_t task_or_interrupt_ptr(void)
|
|
{
|
|
return rt_interrupt_is_active() ? RT_MUTEX_HOLDER_INTERRUPT
|
|
: (uintptr_t)rt_task_self();
|
|
}
|
|
|
|
bool rt_mutex_trylock(struct rt_mutex *mutex)
|
|
{
|
|
return trylock(mutex, task_or_interrupt_ptr());
|
|
}
|
|
|
|
void rt_mutex_lock(struct rt_mutex *mutex)
|
|
{
|
|
rt_assert(!rt_interrupt_is_active(), "mutex lock from an interrupt");
|
|
if (!trylock(mutex, (uintptr_t)rt_task_self()))
|
|
{
|
|
rt_syscall_mutex_lock(mutex);
|
|
}
|
|
}
|
|
|
|
bool rt_mutex_timedlock(struct rt_mutex *mutex, unsigned long ticks)
|
|
{
|
|
rt_assert(!rt_interrupt_is_active() || (ticks == 0),
|
|
"mutex timedlock from an interrupt");
|
|
return trylock(mutex, task_or_interrupt_ptr()) ||
|
|
((ticks != 0) && rt_syscall_mutex_timedlock(mutex, ticks));
|
|
}
|
|
|
|
void rt_mutex_unlock(struct rt_mutex *mutex)
|
|
{
|
|
if (mutex->level > 0)
|
|
{
|
|
// Mutex is recursive and has been locked more than once.
|
|
--mutex->level;
|
|
return;
|
|
}
|
|
|
|
const uintptr_t ptr = task_or_interrupt_ptr();
|
|
uintptr_t e = ptr;
|
|
if (rt_atomic_compare_exchange(&mutex->holder, &e, RT_MUTEX_UNLOCKED,
|
|
RT_ATOMIC_RELEASE, RT_ATOMIC_RELAXED))
|
|
{
|
|
rt_logf("%s mutex unlock with no waiters\n",
|
|
rt_interrupt_is_active() ? "interrupt" : rt_task_name());
|
|
// Unlock with no waiters, nothing to do.
|
|
return;
|
|
}
|
|
|
|
rt_assert((e & RT_MUTEX_HOLDER_MASK) == ptr,
|
|
"unlock while not holding the mutex");
|
|
|
|
/* This path should only be taken by a task, as any interrupt that unlocks
|
|
* must have done a successful trylock. There should never be any waiting
|
|
* tasks in this case, since an interrupt has been active for the entire
|
|
* duration that the mutex was held. */
|
|
rt_logf("%s mutex unlock\n", rt_task_name());
|
|
rt_syscall_mutex_unlock(mutex);
|
|
}
|