rt/src/sem.c

135 lines
4.2 KiB
C

#include <rt/sem.h>
#include <rt/interrupt.h>
#include <rt/log.h>
#include <rt/task.h>
void rt_sem_init_max(struct rt_sem *sem, int count, int max)
{
rt_atomic_store(&sem->value, count, RT_ATOMIC_RELAXED);
sem->max_value = max;
rt_list_init(&sem->wait_list);
sem->post_record.args.sem_post.sem = sem;
sem->post_record.syscall = RT_SYSCALL_PENDABLE_SEM_POST;
sem->num_waiters = 0;
rt_atomic_flag_clear(&sem->post_pending, RT_ATOMIC_RELEASE);
}
void rt_sem_init(struct rt_sem *sem, int count)
{
rt_sem_init_max(sem, count, INT_MAX);
}
void rt_sem_init_binary(struct rt_sem *sem)
{
rt_sem_init_max(sem, 0, 1);
}
static int new_value(int value, int n, int max)
{
if (value <= max - n)
{
return value + n;
}
return max;
}
static void sem_post_syscall(struct rt_sem *sem, int n)
{
/* In an interrupt, we need to use the post system call record attached to
* the semaphore because the interrupt will return before system calls are
* handled and the stack frame won't be live anymore. */
if (rt_interrupt_is_active())
{
/* If the semaphore's post record is already pending, don't attempt to
* use it again. The interrupt that is using it will still cause the
* post to occur, so no posts are missed in this case. Instead, just
* add to the semaphore value directly. The system call will run
* after this increment has taken effect. */
if (rt_atomic_flag_test_and_set(&sem->post_pending, RT_ATOMIC_ACQUIRE))
{
rt_sem_add_n(sem, n);
return;
}
sem->post_record.args.sem_post.n = n;
rt_syscall_push(&sem->post_record);
rt_syscall_pend();
}
else
{
rt_syscall_sem_post(sem, n);
}
}
void rt_sem_post_n(struct rt_sem *sem, int n)
{
int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
rt_logf("%s sem post %d, old value %d\n", rt_task_name(), n, value);
do
{
if (value < 0)
{
/* If the value is negative, then the post needs to happen in a
* system call because there are waiters. Adding to the semaphore
* value directly when there are waiters can result in priority
* inversion if a context switch occurs before wakes are resolved
* but after the value is incremented, and the semaphore is
* decremented on the fast path by another task that is lower
* priority than a previous waiter. */
sem_post_syscall(sem, n);
return;
}
} while (!rt_atomic_compare_exchange_weak(&sem->value, &value,
new_value(value, n,
sem->max_value),
RT_ATOMIC_RELEASE,
RT_ATOMIC_RELAXED));
}
void rt_sem_post(struct rt_sem *sem)
{
rt_sem_post_n(sem, 1);
}
bool rt_sem_trywait(struct rt_sem *sem)
{
int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
do
{
if (value <= 0)
{
rt_logf("%s sem trywait failed, value %d\n", rt_task_name(), value);
return false;
}
} while (!rt_atomic_compare_exchange_weak(&sem->value, &value, value - 1,
RT_ATOMIC_ACQUIRE,
RT_ATOMIC_RELAXED));
rt_logf("%s sem trywait, new value %d\n", rt_task_name(), value - 1);
return true;
}
void rt_sem_wait(struct rt_sem *sem)
{
if (!rt_sem_trywait(sem))
{
rt_syscall_sem_wait(sem);
}
}
bool rt_sem_timedwait(struct rt_sem *sem, unsigned long ticks)
{
return rt_sem_trywait(sem) ||
((ticks != 0) && rt_syscall_sem_timedwait(sem, ticks));
}
void rt_sem_add_n(struct rt_sem *sem, int n)
{
int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
while (!rt_atomic_compare_exchange_weak(&sem->value, &value,
new_value(value, n, sem->max_value),
RT_ATOMIC_RELEASE,
RT_ATOMIC_RELAXED))
{
}
}