rework rwlock and add {try,timed}{rd,wr}lock add {rd,wr}unlock

This commit is contained in:
Chris Copeland 2023-10-24 23:52:25 -07:00
parent 4192c24579
commit 313bda2380
Signed by: chrisnc
GPG Key ID: 14550DA72485DF30
4 changed files with 257 additions and 47 deletions

View File

@ -28,14 +28,13 @@ static void writer(void)
++x;
++y;
rt_rwlock_unlock(&lock);
rt_task_sleep(1);
}
}
static void timeout(void)
{
rt_task_drop_privilege();
rt_task_sleep(1000);
rt_task_sleep(100);
rt_trap();
}
@ -43,5 +42,5 @@ static void timeout(void)
RT_TASK(reader, STACK_SIZE, 1);
RT_TASK(reader, STACK_SIZE, 1);
RT_TASK(writer, STACK_SIZE, 1);
RT_TASK(writer, STACK_SIZE, 2);
RT_TASK(timeout, RT_STACK_MIN, 2);

View File

@ -1,5 +1,6 @@
#pragma once
#include <rt/atomic.h>
#include <rt/cond.h>
#include <rt/mutex.h>
@ -13,21 +14,33 @@ void rt_rwlock_init(struct rt_rwlock *lock);
void rt_rwlock_rdlock(struct rt_rwlock *lock);
bool rt_rwlock_tryrdlock(struct rt_rwlock *lock);
bool rt_rwlock_timedrdlock(struct rt_rwlock *lock, unsigned long ticks);
void rt_rwlock_rdunlock(struct rt_rwlock *lock);
void rt_rwlock_wrlock(struct rt_rwlock *lock);
bool rt_rwlock_trywrlock(struct rt_rwlock *lock);
bool rt_rwlock_timedwrlock(struct rt_rwlock *lock, unsigned long ticks);
void rt_rwlock_wrunlock(struct rt_rwlock *lock);
void rt_rwlock_unlock(struct rt_rwlock *lock);
struct rt_rwlock
{
rt_atomic_uint value;
struct rt_mutex mutex;
struct rt_cond rcond, wcond;
int num_readers, num_writers;
struct rt_cond cond;
};
#define RT_RWLOCK_INIT(name) \
{ \
.mutex = RT_MUTEX_INIT(name.mutex), .rcond = RT_COND_INIT(name.rcond), \
.wcond = RT_COND_INIT(name.wcond), .num_readers = 0, .num_writers = 0, \
.value = 0U, .mutex = RT_MUTEX_INIT(name.mutex), \
.cond = RT_COND_INIT(name.cond), \
}
#define RT_RWLOCK(name) struct rt_rwlock name = RT_RWLOCK_INIT(name)

View File

@ -7,10 +7,13 @@ use core::{
use crate::{
bindings::{
rt_cond, rt_mutex, rt_rwlock, rt_rwlock_rdlock, rt_rwlock_unlock, rt_rwlock_wrlock,
rt_cond, rt_mutex, rt_rwlock, rt_rwlock_rdlock, rt_rwlock_rdunlock, rt_rwlock_timedrdlock,
rt_rwlock_timedwrlock, rt_rwlock_tryrdlock, rt_rwlock_trywrlock, rt_rwlock_wrlock,
rt_rwlock_wrunlock,
},
list::list_init,
sync::semaphore::c_sem_init,
tick::Utick,
};
pub struct RwLock<T: ?Sized> {
@ -35,14 +38,10 @@ impl<T> RwLock<T> {
list: list_init(&l.mutex.list),
level: -1,
},
rcond: rt_cond {
sem: c_sem_init(&l.rcond.sem, 0, 0),
cond: rt_cond {
sem: c_sem_init(&l.cond.sem, 0, 0),
},
wcond: rt_cond {
sem: c_sem_init(&l.wcond.sem, 0, 0),
},
num_readers: 0,
num_writers: 0,
value: 0,
}),
_pin_marker: PhantomPinned,
data: UnsafeCell::new(t),
@ -58,6 +57,22 @@ impl<T: ?Sized> RwLock<T> {
RwLockReadGuard::new(self)
}
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
if unsafe { rt_rwlock_tryrdlock(self.l.get()) } {
Some(RwLockReadGuard::new(self))
} else {
None
}
}
pub fn timed_read(&self, ticks: Utick) -> Option<RwLockReadGuard<T>> {
if unsafe { rt_rwlock_timedrdlock(self.l.get(), ticks) } {
Some(RwLockReadGuard::new(self))
} else {
None
}
}
pub fn write(&self) -> RwLockWriteGuard<'_, T> {
unsafe {
rt_rwlock_wrlock(self.l.get());
@ -65,6 +80,22 @@ impl<T: ?Sized> RwLock<T> {
RwLockWriteGuard::new(self)
}
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
if unsafe { rt_rwlock_trywrlock(self.l.get()) } {
Some(RwLockWriteGuard::new(self))
} else {
None
}
}
pub fn timed_write(&self, ticks: Utick) -> Option<RwLockWriteGuard<T>> {
if unsafe { rt_rwlock_timedwrlock(self.l.get(), ticks) } {
Some(RwLockWriteGuard::new(self))
} else {
None
}
}
// Probably not useful because this RwLock must be static.
pub fn get_mut(&mut self) -> &mut T {
self.data.get_mut()
@ -104,7 +135,7 @@ impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
#[inline]
fn drop(&mut self) {
unsafe { rt_rwlock_unlock(self.lock.l.get()) }
unsafe { rt_rwlock_rdunlock(self.lock.l.get()) }
}
}
@ -147,7 +178,7 @@ impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
#[inline]
fn drop(&mut self) {
unsafe { rt_rwlock_unlock(self.lock.l.get()) }
unsafe { rt_rwlock_wrunlock(self.lock.l.get()) }
}
}

View File

@ -1,61 +1,228 @@
#include <rt/rwlock.h>
#include <rt/interrupt.h>
#include <rt/task.h>
#include <rt/tick.h>
void rt_rwlock_init(struct rt_rwlock *lock)
{
rt_mutex_init(&lock->mutex);
rt_cond_init(&lock->rcond);
rt_cond_init(&lock->wcond);
lock->num_readers = 0;
lock->num_writers = 0;
rt_cond_init(&lock->cond);
rt_atomic_store(&lock->value, 0, RT_ATOMIC_RELEASE);
}
#define WRITE_MASK (1U << ((sizeof(unsigned) * CHAR_BIT) - 1))
#define WRITE_PENDING_MASK (1U << ((sizeof(unsigned) * CHAR_BIT) - 2))
#define READER_MASK (~(WRITE_MASK | WRITE_PENDING_MASK))
void rt_rwlock_rdlock(struct rt_rwlock *lock)
{
rt_mutex_lock(&lock->mutex);
while (lock->num_writers > 0)
if (rt_rwlock_tryrdlock(lock))
{
rt_cond_wait(&lock->rcond, &lock->mutex);
return;
}
++lock->num_readers;
/* If the lock is held by a writer, block on the mutex. Once it can be
* acquired, the writer is gone. */
rt_mutex_lock(&lock->mutex);
rt_atomic_fetch_add(&lock->value, 1, RT_ATOMIC_RELAXED);
rt_mutex_unlock(&lock->mutex);
}
bool rt_rwlock_tryrdlock(struct rt_rwlock *lock)
{
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
do
{
if ((value & WRITE_MASK) != 0)
{
return false;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value, value + 1,
RT_ATOMIC_ACQUIRE,
RT_ATOMIC_RELAXED));
return true;
}
bool rt_rwlock_timedrdlock(struct rt_rwlock *lock, unsigned long ticks)
{
if (rt_rwlock_tryrdlock(lock))
{
return true;
}
if (!rt_mutex_timedlock(&lock->mutex, ticks))
{
return false;
}
rt_atomic_fetch_add(&lock->value, 1, RT_ATOMIC_RELAXED);
rt_mutex_unlock(&lock->mutex);
return true;
}
void rt_rwlock_rdunlock(struct rt_rwlock *lock)
{
const unsigned value =
rt_atomic_fetch_sub(&lock->value, 1, RT_ATOMIC_RELAXED);
if ((value & WRITE_PENDING_MASK) != 0 && !rt_interrupt_is_active())
{
/* Note: the WRITE_PENDING_MASK can only be set by a task, and only if
* there is another task which is a reader at the time wrlock was
* attempted. This means that there is another reader that will be able
* to signal the task in this case. */
rt_mutex_lock(&lock->mutex);
rt_cond_signal(&lock->cond);
rt_mutex_unlock(&lock->mutex);
}
}
void rt_rwlock_wrlock(struct rt_rwlock *lock)
{
rt_mutex_lock(&lock->mutex);
++lock->num_writers;
while (lock->num_readers > 0)
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
for (;;)
{
rt_cond_wait(&lock->wcond, &lock->mutex);
unsigned newvalue, readers;
do
{
readers = value & READER_MASK;
if (readers > 0)
{
/* Set a write pending bit if there are readers, so the last
* reader knows to take the lock and signal. */
newvalue = value | WRITE_PENDING_MASK;
}
else
{
newvalue = value | WRITE_MASK;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value,
newvalue, RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
if (readers == 0)
{
// If there were no readers, we have exclusive access.
return;
}
do
{
// Wait until there are no readers.
rt_cond_wait(&lock->cond, &lock->mutex);
value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
} while ((value & READER_MASK) != 0);
}
}
bool rt_rwlock_trywrlock(struct rt_rwlock *lock)
{
if (!rt_mutex_trylock(&lock->mutex))
{
return false;
}
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
do
{
if ((value & READER_MASK) != 0)
{
// There are readers.
rt_mutex_unlock(&lock->mutex);
return false;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value, WRITE_MASK,
RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
return true;
}
bool rt_rwlock_timedwrlock(struct rt_rwlock *lock, unsigned long ticks)
{
if (rt_rwlock_trywrlock(lock))
{
return true;
}
const unsigned long start_tick = rt_tick_count();
if (!rt_mutex_timedlock(&lock->mutex, ticks))
{
return false;
}
unsigned long mid_tick = rt_tick_count();
const unsigned long lock_ticks = mid_tick - start_tick;
if (lock_ticks < ticks)
{
ticks -= lock_ticks;
}
else
{
ticks = 0;
}
unsigned value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
for (;;)
{
unsigned newvalue, readers;
do
{
readers = value & READER_MASK;
if (readers > 0)
{
/* Set a write pending bit if there are readers, so the last
* reader knows to take the lock and signal. */
newvalue = value | WRITE_PENDING_MASK;
}
else
{
newvalue = value | WRITE_MASK;
}
} while (!rt_atomic_compare_exchange_weak(&lock->value, &value,
newvalue, RT_ATOMIC_RELAXED,
RT_ATOMIC_RELAXED));
if (readers == 0)
{
// If there were no readers, we have exclusive access.
return true;
}
do
{
// Wait until there are no readers.
if (!rt_cond_timedwait(&lock->cond, &lock->mutex, ticks))
{
return false;
}
const unsigned long end_tick = rt_tick_count();
const unsigned long wait_ticks = end_tick - mid_tick;
mid_tick = end_tick;
if (wait_ticks < ticks)
{
ticks -= wait_ticks;
}
else
{
ticks = 0;
}
value = rt_atomic_load(&lock->value, RT_ATOMIC_RELAXED);
} while ((value & READER_MASK) != 0);
}
}
void rt_rwlock_wrunlock(struct rt_rwlock *lock)
{
rt_atomic_store(&lock->value, 0, RT_ATOMIC_RELEASE);
rt_cond_signal(&lock->cond);
rt_mutex_unlock(&lock->mutex);
}
void rt_rwlock_unlock(struct rt_rwlock *lock)
{
const bool is_writer =
(uintptr_t)rt_task_self() ==
(rt_atomic_load(&lock->mutex.holder, RT_ATOMIC_RELAXED) &
RT_MUTEX_HOLDER_MASK);
if (is_writer)
if (rt_mutex_holder(&lock->mutex) == rt_task_self())
{
--lock->num_writers;
if (lock->num_writers == 0)
{
rt_cond_broadcast(&lock->rcond);
}
rt_rwlock_wrunlock(lock);
}
else
{
rt_mutex_lock(&lock->mutex);
--lock->num_readers;
if (lock->num_readers == 0)
{
rt_cond_broadcast(&lock->wcond);
}
rt_rwlock_rdunlock(lock);
}
rt_mutex_unlock(&lock->mutex);
}