rt/src/rwlock.c

231 lines
6.3 KiB
C

#include <rt/rwlock.h>
#include <rt/interrupt.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <limits.h>
void rt_rwlock_init(struct rt_rwlock *lock)
{
rt_mutex_init(&lock->mutex);
rt_cond_init(&lock->cond);
rt_atomic_store(&lock->value, 0, RT_ATOMIC_RELEASE);
}
#define WRITE_MASK (1U << ((sizeof(unsigned) * CHAR_BIT) - 1))
#define WRITE_PENDING_MASK (1U << ((sizeof(unsigned) * CHAR_BIT) - 2))
#define READER_MASK (~(WRITE_MASK | WRITE_PENDING_MASK))
void rt_rwlock_rdlock(struct rt_rwlock *lock)
{
if (rt_rwlock_tryrdlock(lock))
{
return;
}
/* If the lock is held by a writer, block on the mutex. Once it can be
* acquired, the writer is gone. */
rt_mutex_lock(&lock->mutex);
rt_atomic_fetch_add(&lock->value, 1, RT_ATOMIC_RELAXED);
rt_mutex_unlock(&lock->mutex);
}
bool rt_rwlock_tryrdlock(struct rt_rwlock *lock)
{
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
do
{
if ((value & WRITE_MASK) != 0)
{
return false;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value, value + 1,
RT_ATOMIC_ACQUIRE,
RT_ATOMIC_RELAXED));
return true;
}
bool rt_rwlock_timedrdlock(struct rt_rwlock *lock, unsigned long ticks)
{
if (rt_rwlock_tryrdlock(lock))
{
return true;
}
if (!rt_mutex_timedlock(&lock->mutex, ticks))
{
return false;
}
rt_atomic_fetch_add(&lock->value, 1, RT_ATOMIC_RELAXED);
rt_mutex_unlock(&lock->mutex);
return true;
}
void rt_rwlock_rdunlock(struct rt_rwlock *lock)
{
const unsigned value =
rt_atomic_fetch_sub(&lock->value, 1, RT_ATOMIC_RELAXED);
if ((value == (WRITE_PENDING_MASK | 1U)) && !rt_interrupt_is_active())
{
/* If this is the last reader and a writer is pending, signal that it
* can re-attempt to wrlock. */
rt_cond_signal(&lock->cond);
}
}
void rt_rwlock_wrlock(struct rt_rwlock *lock)
{
rt_mutex_lock(&lock->mutex);
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
for (;;)
{
unsigned newvalue, readers;
do
{
readers = value & READER_MASK;
if (readers > 0)
{
/* Set a write pending bit if there are readers, so the last
* reader knows to take the lock and signal. */
newvalue = value | WRITE_PENDING_MASK;
}
else
{
newvalue = WRITE_MASK;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value,
newvalue, RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
if (readers == 0)
{
// If there were no readers, we have exclusive access.
return;
}
do
{
// Wait until there are no readers.
rt_cond_wait(&lock->cond, &lock->mutex);
value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
} while ((value & READER_MASK) != 0);
}
}
bool rt_rwlock_trywrlock(struct rt_rwlock *lock)
{
if (!rt_mutex_trylock(&lock->mutex))
{
return false;
}
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
do
{
if ((value & READER_MASK) != 0)
{
// There are readers.
rt_mutex_unlock(&lock->mutex);
return false;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value, WRITE_MASK,
RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
return true;
}
bool rt_rwlock_timedwrlock(struct rt_rwlock *lock, unsigned long ticks)
{
if (rt_rwlock_trywrlock(lock))
{
return true;
}
const unsigned long start_tick = rt_tick_count();
if (!rt_mutex_timedlock(&lock->mutex, ticks))
{
return false;
}
unsigned long mid_tick = rt_tick_count();
const unsigned long lock_ticks = mid_tick - start_tick;
if (lock_ticks < ticks)
{
ticks -= lock_ticks;
}
else
{
ticks = 0;
}
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
for (;;)
{
unsigned newvalue, readers;
do
{
readers = value & READER_MASK;
if (readers > 0)
{
/* Set a write pending bit if there are readers, so the last
* reader knows to take the lock and signal. */
newvalue = value | WRITE_PENDING_MASK;
}
else
{
newvalue = value | WRITE_MASK;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value,
newvalue, RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
if (readers == 0)
{
// If there were no readers, we have exclusive access.
return true;
}
do
{
// Wait until there are no readers.
if (!rt_cond_timedwait(&lock->cond, &lock->mutex, ticks))
{
return false;
}
const unsigned long end_tick = rt_tick_count();
const unsigned long wait_ticks = end_tick - mid_tick;
mid_tick = end_tick;
if (wait_ticks < ticks)
{
ticks -= wait_ticks;
}
else
{
ticks = 0;
}
value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
} while ((value & READER_MASK) != 0);
}
}
void rt_rwlock_wrunlock(struct rt_rwlock *lock)
{
rt_atomic_store(&lock->value, 0, RT_ATOMIC_RELEASE);
rt_cond_signal(&lock->cond);
rt_mutex_unlock(&lock->mutex);
}
void rt_rwlock_unlock(struct rt_rwlock *lock)
{
const uintptr_t holder =
rt_atomic_load(&lock->mutex.holder, RT_ATOMIC_RELAXED) &
RT_MUTEX_HOLDER_MASK;
if ((holder == (uintptr_t)rt_task_self()) ||
(holder == RT_MUTEX_HOLDER_INTERRUPT))
{
rt_rwlock_wrunlock(lock);
}
else
{
rt_rwlock_rdunlock(lock);
}
}