refactor MPU interface, simplify start and unify creation of idle task

This commit is contained in:
Chris Copeland 2023-10-07 12:48:02 -07:00
parent 092ced3ecf
commit c4abb96de1
Signed by: chrisnc
GPG Key ID: 14550DA72485DF30
60 changed files with 448 additions and 482 deletions

View File

@ -26,6 +26,14 @@ AddOption(
help="enable sanitizers",
)
AddOption(
"--log",
dest="log",
default=False,
action="store_true",
help="enable console log",
)
if GetOption("sanitize"):
llvm_flags.append("-fsanitize=address,undefined")
@ -63,6 +71,9 @@ env = Environment(
RANLIBCOMSTR="ranlib $TARGET",
)
if GetOption("log"):
env.Append(CPPDEFINES={"RT_LOG_ENABLE": 1})
if sys.platform == "darwin":
env["LINKCOM"] = (
"$LINK -o ${TARGETS[0]} -Wl,-map,${TARGETS[1]}"

View File

@ -5,10 +5,10 @@
#include <rt/interrupt.h>
#include <rt/log.h>
#include <rt/mpu.h>
#include <rt/rt.h>
#include <rt/stack.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/trap.h>
#define PROFILE_R (__ARM_ARCH_PROFILE == 'R')
#define PROFILE_M (__ARM_ARCH_PROFILE == 'M')
@ -159,9 +159,8 @@ __attribute__((noreturn, weak)) void rt_idle(void)
}
}
__attribute__((noreturn)) void rt_start(void)
void rt_cycle_init(void)
{
#if RT_CYCLE_ENABLE
#if PROFILE_R
// Enable counters and reset the cycle counter.
pmcr_oreq(PMCR_E | PMCR_C);
@ -173,51 +172,9 @@ __attribute__((noreturn)) void rt_start(void)
DEMCR |= DEMCR_TRCENA;
DWT_CTRL |= DWT_CTRL_CYCCNTENA;
#endif // PROFILE
#endif // RT_CYCLE_ENABLE
rt_task_cycle_resume();
static RT_STACK(idle_task_stack, RT_STACK_MIN);
#if RT_MPU_ENABLE
struct rt_mpu_config *const idle_mpu_config = &rt_task_self()->mpu_config;
rt_mpu_config_init(idle_mpu_config);
rt_mpu_config_set(idle_mpu_config, RT_MPU_TASK_REGION_START_ID,
(uintptr_t)idle_task_stack, sizeof idle_task_stack,
RT_MPU_STACK_ATTR);
#endif
#if V8M
// If supported, set the process stack pointer limit.
__asm__("msr psplim, %0" : : "r"(idle_task_stack));
#endif
#if PROFILE_R
/* Switch to system mode and set the stack pointer to the top of the idle
* task stack. */
__asm__("cps %0; mov sp, %1"
:
: "i"(MODE_SYS), "r"(&idle_task_stack[sizeof idle_task_stack]));
#elif PROFILE_M
/* Set the process stack pointer to the top of the idle task stack, switch
* to the process stack pointer, and synchronize instructions.
* NOTE: this also clears the FPCA bit, but floating point context need not
* be preserved between the caller of rt_start and the idle task. */
__asm__("msr psp, %0; msr control, %1; isb"
:
: "r"(&idle_task_stack[sizeof idle_task_stack]),
"r"(CONTROL_SPSEL));
#endif
// Flush memory and enable interrupts.
__asm__("dsb; cpsie i" ::: "memory");
rt_task_yield();
rt_idle();
}
__attribute__((noreturn, weak)) void rt_stop(void)
__attribute__((noreturn, weak)) void rt_trap(void)
{
for (;;)
{
@ -285,7 +242,7 @@ __attribute__((weak)) void rt_assert(bool condition, const char *msg)
if (!condition)
{
rt_assert_msg = msg;
rt_stop();
rt_trap();
}
}

View File

@ -22,7 +22,7 @@ extern "C" {
#define RT_MPU_TASK_REGION_START_ID \
(RT_MPU_NUM_REGIONS - RT_MPU_NUM_TASK_REGIONS)
#if RT_MPU_ENABLE && !defined(__ASSEMBLER__)
#if !defined(__ASSEMBLER__)
#include <stddef.h>
#include <stdint.h>
@ -158,60 +158,35 @@ struct rt_mpu
(RT_MPU_ATTR_RW | RT_MPU_ATTR_XN | RT_MPU_ATTR_CACHED_WB_RWALLOC | \
RT_MPU_ATTR_ENABLE)
#if __ARM_ARCH_PROFILE == 'R'
// v7-r doesn't use the lower 5 bits of the base address register.
#define RT_MPU_VALID (UINT32_C(0))
#define RT_MPU_REGION_MASK (UINT32_C(0))
#elif __ARM_ARCH_PROFILE == 'M'
#define RT_MPU_VALID (UINT32_C(1) << 4)
#define RT_MPU_REGION_MASK (UINT32_C(0xF))
#endif
#define RT_MPU_PERIPHERAL_ATTR \
(RT_MPU_ATTR_XN | RT_MPU_ATTR_RW | RT_MPU_ATTR_DEVICE | RT_MPU_ATTR_ENABLE)
#define RT_MPU_BASE_ADDR(id, start_addr) \
(((id)&RT_MPU_REGION_MASK) | RT_MPU_VALID | (start_addr))
#define RT_MPU_ATTR_SIZE(start_addr, size, attr) \
(RT_MPU_SIZEBITS(size) << 1 | RT_MPU_SRD((start_addr), (size)) << 8 | \
#define RT_MPU_ATTR_SIZE(addr, size, attr) \
(RT_MPU_SIZEBITS(size) << 1 | RT_MPU_SRD((addr), (size)) << 8 | \
((attr) & ~(((size) == 0) ? RT_MPU_ATTR_ENABLE : UINT32_C(0))))
static inline void rt_mpu_config_set(struct rt_mpu_config *config, uint32_t id,
uintptr_t start_addr, size_t size,
uint32_t attr)
{
const uint32_t index = id - RT_MPU_TASK_REGION_START_ID;
config->regions[index].base_addr = RT_MPU_BASE_ADDR(id, start_addr);
config->regions[index].attr_size = RT_MPU_ATTR_SIZE(start_addr, size, attr);
}
static inline void rt_mpu_config_init(struct rt_mpu_config *config)
{
for (uint32_t i = 0; i < RT_MPU_NUM_TASK_REGIONS; ++i)
{
/* Initialize the region number and valid bit for all task regions even
* if the region will never be enabled, so that the configurations can
* be applied safely. If the region registers are 0, the
* previously-used MPU region number will be used when each
* configuration is applied, disabling already-configured regions. */
config->regions[i].base_addr =
RT_MPU_BASE_ADDR(RT_MPU_TASK_REGION_START_ID + i, 0);
config->regions[i].attr_size = RT_MPU_ATTR_SIZE(0, 0, 0);
#define RT_MPU_REGION(addr, size, attr) \
{ \
.base_addr = (uintptr_t)(addr), \
.attr_size = RT_MPU_ATTR_SIZE((uintptr_t)(addr), size, attr), \
}
}
static inline void rt_mpu_region_set(uint32_t id, uintptr_t start_addr,
size_t size, uint32_t attr)
static inline void rt_mpu_region_set(uint32_t id, uintptr_t addr, size_t size,
uint32_t attr)
{
#if __ARM_ARCH_PROFILE == 'R'
rgnr_set(id);
drbar_set(start_addr);
const uint32_t attr_size = RT_MPU_ATTR_SIZE(start_addr, size, attr);
drbar_set(addr);
const uint32_t attr_size = RT_MPU_ATTR_SIZE(addr, size, attr);
// The size, subregion disable, and enable bit are in drsr.
drsr_set(attr_size & UINT32_C(0xFFFF));
dracr_set(attr_size >> 16);
#elif __ARM_ARCH_PROFILE == 'M'
RT_MPU_REGS->regions[0].base_addr = RT_MPU_BASE_ADDR(id, start_addr);
RT_MPU_REGS->regions[0].attr_size =
RT_MPU_ATTR_SIZE(start_addr, size, attr);
#define RT_MPU_REGION_ID_VALID (UINT32_C(1) << 4)
#define RT_MPU_REGION_ID_MASK (UINT32_C(0xF))
RT_MPU_REGS->regions[0].base_addr =
addr | (RT_MPU_REGION_ID_MASK & id) | RT_MPU_REGION_ID_VALID;
RT_MPU_REGS->regions[0].attr_size = RT_MPU_ATTR_SIZE(addr, size, attr);
#endif
}
@ -307,41 +282,37 @@ struct rt_mpu
#define RT_MPU_ATTR_WT_RWALLOC \
RT_MPU_ATTR_INDIRECT(RT_MPU_ATTR_WT(1, 1), RT_MPU_ATTR_WT(1, 1))
// The stack will use the indirect attributes for index 0.
/* The preset MPU attributes for stack and peripherals will use indirect
* attributes 0 and 1 respectively. */
#define RT_MPU_STACK_ATTR \
(RT_MPU_ATTR_RW | RT_MPU_ATTR_XN | RT_MPU_ATTR_PXN | RT_MPU_ATTR_ENABLE)
(RT_MPU_ATTR_INDEX(0) | RT_MPU_ATTR_RW | RT_MPU_ATTR_XN | \
RT_MPU_ATTR_PXN | RT_MPU_ATTR_ENABLE)
#define RT_MPU_BASE_ADDR(start_addr, attr) \
(((start_addr)&RT_MPU_ADDR_MASK) | ((attr)&RT_MPU_ATTR_MASK))
#define RT_MPU_PERIPHERAL_ATTR \
(RT_MPU_ATTR_INDEX(1) | RT_MPU_ATTR_XN | RT_MPU_ATTR_RW | \
RT_MPU_ATTR_PXN | RT_MPU_ATTR_ENABLE)
#define RT_MPU_LIMIT_ADDR(start_addr, size, attr) \
((((start_addr) + (size)-1) & RT_MPU_ADDR_MASK) | \
#define RT_MPU_BASE_ADDR(addr, attr) \
(((addr) & RT_MPU_ADDR_MASK) | ((attr) & RT_MPU_ATTR_MASK))
#define RT_MPU_LIMIT_ADDR(addr, size, attr) \
((((addr) + (size)-1) & RT_MPU_ADDR_MASK) | \
((((attr) & ~(((size) == 0) ? RT_MPU_ATTR_ENABLE : UINT32_C(0))) >> \
RT_MPU_ATTR_RLAR_SHIFT) & \
RT_MPU_ATTR_MASK))
static inline void rt_mpu_config_init(struct rt_mpu_config *config)
{
memset(config, 0, sizeof *config);
}
#define RT_MPU_REGION(addr, size, attr) \
{ \
.base_addr = RT_MPU_BASE_ADDR((uintptr_t)(addr), attr), \
.limit_addr = RT_MPU_LIMIT_ADDR((uintptr_t)(addr), size, attr), \
}
static inline void rt_mpu_config_set(struct rt_mpu_config *config, uint32_t id,
uintptr_t start_addr, size_t size,
static inline void rt_mpu_region_set(uint32_t id, uintptr_t addr, size_t size,
uint32_t attr)
{
const uint32_t index = id - RT_MPU_TASK_REGION_START_ID;
config->regions[index].base_addr = RT_MPU_BASE_ADDR(start_addr, attr);
config->regions[index].limit_addr =
RT_MPU_LIMIT_ADDR(start_addr, size, attr);
}
static inline void rt_mpu_region_set(uint32_t id, uintptr_t start_addr,
size_t size, uint32_t attr)
{
RT_MPU_REGS->number = id;
RT_MPU_REGS->regions[0].base_addr = RT_MPU_BASE_ADDR(start_addr, attr);
RT_MPU_REGS->regions[0].limit_addr =
RT_MPU_LIMIT_ADDR(start_addr, size, attr);
RT_MPU_REGS->regions[0].base_addr = RT_MPU_BASE_ADDR(addr, attr);
RT_MPU_REGS->regions[0].limit_addr = RT_MPU_LIMIT_ADDR(addr, size, attr);
}
static inline void rt_mpu_attr_init(void)
@ -387,7 +358,39 @@ static inline void rt_mpu_enable(void)
#define RT_MPU_PRIV_BSS(name) __attribute__((section(".priv_bss." #name)))
#define RT_MPU_PRIV_DATA(name) __attribute__((section(".priv_data." #name)))
#endif // RT_MPU_ENABLE
#if __ARM_ARCH_PROFILE == 'M' && __ARM_ARCH <= 7
/* In armv6-m and armv7-m, the MPU region configurations must also contain
* the region ID and an ID valid bit, otherwise the region number register
* that was last set will be used when modifying the region. */
#define RT_MPU_CONFIG_POSTINIT(config) \
do \
{ \
for (size_t i = 0; i < RT_MPU_NUM_TASK_REGIONS; ++i) \
{ \
const uint32_t id = RT_MPU_TASK_REGION_START_ID + i; \
(config)->regions[i].base_addr |= \
(RT_MPU_REGION_ID_MASK & id) | RT_MPU_REGION_ID_VALID; \
} \
} while (0)
#else // __ARM_ARCH_PROFILE != 'M' || __ARM_ARCH >= 8
#define RT_MPU_CONFIG_POSTINIT(config) \
do \
{ \
} while (0)
#endif
#define RT_MPU_CONFIG_INIT(config, ...) \
do \
{ \
const struct rt_mpu_region regions[] = {__VA_ARGS__}; \
for (size_t i = 0; i < sizeof regions / sizeof regions[0]; ++i) \
{ \
(config)->regions[i] = regions[i]; \
} \
RT_MPU_CONFIG_POSTINIT(config); \
} while (0)
#endif // !defined(__ASSEMBLER__)
#ifdef __cplusplus
}

View File

@ -114,3 +114,38 @@ rt_syscall_handler:
bx r0
.size rt_syscall_handler, .-rt_syscall_handler
.section .text.rt_start,"ax",%progbits
.global rt_start
.type rt_start, %function
rt_start:
bl rt_start_context
mpuconfigure
mov r1, r0
adds r0, CONTEXT_SIZE - 16
ldmia r0!, {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
ldmia r1!, {controltemp psplimtemp r4-r7}
msr psp, r0
setpsplim
movs r0, 2
msr control, r0
isb
// Once we are using the process stack pointer, interrupts can be enabled.
cpsie i
// Copy pc to the psr slot so it can be popped.
ldr r0, [sp, 24]
str r0, [sp, 28]
ldr r0, [sp, 16]
mov r12, r0
ldr r0, [sp, 20]
mov lr, r0
pop {r0-r3}
add sp, 12
pop {pc}
.size rt_start, .-rt_start

View File

@ -150,3 +150,23 @@ rt_syscall_handler:
return
.size rt_syscall_handler, .-rt_syscall_handler
.section .text.rt_start,"ax",%progbits
.global rt_start
.type rt_start, %function
rt_start:
bl rt_start_context
mpuconfigure
loadregs
msr psp, r0
setpsplim
movs r0, 2
msr control, r0
isb
// Once we are using the process stack pointer, interrupts can be enabled.
cpsie i
pop {r0-r3, r12, lr}
ldr pc, [sp], 8
.size rt_start, .-rt_start

View File

@ -143,3 +143,25 @@ rt_syscall_handler_svc:
rfeia sp!
.size rt_syscall_handler, .-rt_syscall_handler
.section .text.rt_start,"ax",%progbits
.global rt_start
.type rt_start, %function
rt_start:
bl rt_start_context
cps MODE_SYS
mpuconfigure
mov sp, r0
#ifdef __ARM_FP
pop {r1, r4-r11}
mcr p15, 0, r1, cr1, cr0, 2
#else // !defined(__ARM_FP)
pop {r4-r11}
#endif // __ARM_FP
pop {r0-r3, r12, lr}
rfeia sp!
.size rt_start, .-rt_start

View File

@ -4,8 +4,9 @@
#include <rt/idle.h>
#include <rt/interrupt.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/start.h>
#include <rt/syscall.h>
#include <rt/trap.h>
#include <rt/stack.h>
#include <rt/task.h>
@ -38,9 +39,6 @@ struct pthread_arg
bool has_arg;
};
static pthread_t main_thread;
static volatile bool rt_started = false;
static void block_all_signals(sigset_t *old_sigset)
{
sigset_t blocked_sigset;
@ -72,8 +70,12 @@ void rt_logf(const char *format, ...)
void rt_assert(bool condition, const char *msg)
{
(void)msg;
assert(condition);
if (!condition)
{
fprintf(stderr, "%s\n", msg);
fflush(stderr);
abort();
}
}
__attribute__((noreturn)) static void *pthread_fn(void *arg)
@ -145,11 +147,7 @@ void *rt_context_init_arg(void (*fn)(uintptr_t), uintptr_t arg, void *stack,
static void syscall(void)
{
// syscalls made before rt_start are deferred.
if (rt_started)
{
pthread_kill(pthread_self(), SIGSYSCALL);
}
pthread_kill(pthread_self(), SIGSYSCALL);
}
void rt_syscall(void)
@ -234,7 +232,31 @@ __attribute__((noreturn)) void rt_idle(void)
}
}
void rt_start(void)
__attribute__((noreturn)) void rt_trap(void)
{
exit(0);
}
void rt_task_drop_privilege(void)
{
}
uint32_t rt_cycle(void)
{
#if defined(__aarch64__)
uint64_t cycles;
__asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(cycles));
return (uint32_t)cycles;
#elif defined(__x86_64__)
uint32_t cycles;
__asm__ __volatile__("rdtsc" : "=a"(cycles) : : "edx");
return cycles;
#else
return 0;
#endif
}
__attribute__((noreturn)) void rt_start(void)
{
#ifdef __APPLE__
fprintf(stderr, "running rt on macOS is not supported due to a bug in "
@ -242,12 +264,6 @@ void rt_start(void)
#endif
block_all_signals(NULL);
rt_task_cycle_resume();
static RT_STACK(idle_task_stack, RT_STACK_MIN);
pthread_t idle_thread = (pthread_t)rt_context_init(rt_idle, idle_task_stack,
sizeof idle_task_stack);
/* The tick handler must block SIGSYSCALL. */
struct sigaction tick_action = {
.sa_handler = tick_handler,
@ -287,69 +303,8 @@ void rt_start(void)
};
setitimer(ITIMER_REAL, &timer, NULL);
main_thread = pthread_self();
rt_started = true;
// Start the idle task.
pthread_kill(idle_thread, SIGRESUME);
// Sending a SIGRESUME to the main thread stops the scheduler.
sigset_t resume_sigset;
sigemptyset(&resume_sigset);
sigaddset(&resume_sigset, SIGRESUME);
int sig;
sigwait(&resume_sigset, &sig);
// Prevent new SIGTICKs.
static const struct timeval zero = {
.tv_sec = 0,
.tv_usec = 0,
};
timer.it_interval = zero;
timer.it_value = zero;
setitimer(ITIMER_REAL, &timer, NULL);
// Change handler to SIG_IGN to drop any pending signals.
struct sigaction action = {.sa_handler = SIG_IGN};
sigemptyset(&action.sa_mask);
sigaction(SIGTICK, &action, NULL);
sigaction(SIGRESUME, &action, NULL);
sigaction(SIGSYSCALL, &action, NULL);
unblock_all_signals();
// Restore the default handlers.
action.sa_handler = SIG_DFL;
sigaction(SIGTICK, &action, NULL);
sigaction(SIGRESUME, &action, NULL);
sigaction(SIGSYSCALL, &action, NULL);
}
void rt_stop(void)
{
block_all_signals(NULL);
pthread_kill(main_thread, SIGRESUME);
}
void rt_task_drop_privilege(void)
{
}
uint32_t rt_cycle(void)
{
#if defined(__aarch64__)
uint64_t cycles;
__asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(cycles));
return (uint32_t)cycles;
#elif defined(__x86_64__)
uint32_t cycles;
__asm__ __volatile__("rdtsc" : "=a"(cycles) : : "edx");
return cycles;
#else
return 0;
#endif
pthread_kill((pthread_t)rt_start_context(), SIGRESUME);
pthread_exit(NULL);
}
__attribute__((weak)) int main(void)

View File

@ -1,7 +1,7 @@
#include <rt/mutex.hpp>
#include <rt/rt.hpp>
#include <rt/sem.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
@ -11,14 +11,14 @@ static unsigned x = 0;
#define NUM_TASKS 3
#define ITERATIONS 10000U
static void stop_last(void)
static void trap_last(void)
{
static rt::sem stop_sem(NUM_TASKS - 1);
/* Only the last task to finish will call rt_stop. */
if (!stop_sem.trywait())
static rt::sem trap_sem(NUM_TASKS - 1);
/* Only the last task to finish will call rt::trap. */
if (!trap_sem.trywait())
{
rt_assert(x == (ITERATIONS * NUM_TASKS), "x has the wrong value");
rt::stop();
rt::trap();
}
}
@ -30,7 +30,7 @@ static void increment_lock(void)
rt::lock_guard lock(mutex);
++x;
}
stop_last();
trap_last();
}
static void increment_trylock(void)
@ -45,7 +45,7 @@ static void increment_trylock(void)
rt::lock_guard lock(mutex, rt::adopt_lock);
++x;
}
stop_last();
trap_last();
}
static void increment_timedlock(void)
@ -59,7 +59,7 @@ static void increment_timedlock(void)
rt::lock_guard lock(mutex, rt::adopt_lock);
++x;
}
stop_last();
trap_last();
}
static void timeout(void)

View File

@ -1,6 +1,6 @@
#include <rt/notify.hpp>
#include <rt/rt.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
@ -32,7 +32,7 @@ static void waiter(void)
rt_assert(!note.timedwait(value, 10), "wait didn't time out");
rt::stop();
rt::trap();
}
RT_TASK(notifier, RT_STACK_MIN, 1);

View File

@ -1,7 +1,7 @@
#include <rt/once.hpp>
#include <rt/rt.hpp>
#include <rt/sem.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
#include <rt/atomic.h>
@ -23,7 +23,7 @@ static void oncer(void)
sem.wait();
rt_assert(rt_atomic_load(&x, RT_ATOMIC_RELAXED) == 1,
"x has the wrong value");
rt::stop();
rt::trap();
}
static void twicer(void)

View File

@ -1,6 +1,6 @@
#include <rt/queue.hpp>
#include <rt/rt.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
#include <rt/atomic.h>
@ -45,7 +45,7 @@ static void timeout(void)
{
rt::task::drop_privilege();
rt::task::sleep(1000);
rt::stop();
rt::trap();
}
RT_TASK(popper, RT_STACK_MIN, 1);

View File

@ -1,6 +1,6 @@
#include <rt/rt.hpp>
#include <rt/rwlock.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
@ -36,10 +36,9 @@ static void timeout(void)
{
rt::task::drop_privilege();
rt::task::sleep(1000);
rt::stop();
rt::trap();
}
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(writer, RT_STACK_MIN, 1);

View File

@ -1,6 +1,6 @@
#include <rt/rt.hpp>
#include <rt/sem.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
@ -30,7 +30,7 @@ static void waiter(void)
rt_assert(!sem.timedwait(10), "wait didn't time out");
rt::stop();
rt::trap();
}
RT_TASK(poster, RT_STACK_MIN, 1);

View File

@ -1,7 +1,7 @@
#include <rt/rt.hpp>
#include <rt/sem.hpp>
#include <rt/task.hpp>
#include <rt/tick.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
@ -18,11 +18,11 @@ static void sleep_periodic(uintptr_t period)
"woke up at the wrong tick");
}
/* Only the second task to finish will call rt_stop. */
static rt::sem stop_sem(1);
if (!stop_sem.trywait())
/* Only the second task to finish will call rt::trap. */
static rt::sem trap_sem(1);
if (!trap_sem.trywait())
{
rt::stop();
rt::trap();
}
}

View File

@ -1,7 +1,7 @@
#include "water.hpp"
#include <rt/rt.hpp>
#include <rt/task.hpp>
#include <rt/trap.hpp>
#include <rt/assert.h>
#include <rt/atomic.h>
@ -26,7 +26,7 @@ static void timeout(void)
const uint32_t h = rt_atomic_load(&hydrogen_bonded, RT_ATOMIC_RELAXED);
const uint32_t o = rt_atomic_load(&oxygen_bonded, RT_ATOMIC_RELAXED);
/* The oxygen or hydrogen may not have bonded by the time rt_stop is called
/* The oxygen or hydrogen may not have bonded by the time rt::trap is called
* after making a water molecule, so allow for o and h to be one molecule's
* worth below expected value or exactly equal to it. */
const uint32_t o_lo = w - 1;
@ -39,7 +39,7 @@ static void timeout(void)
rt_assert(h >= h_lo, "not enough hydrogen was bonded");
rt_assert(h <= h_hi, "too much hydrogen was bonded");
rt::stop();
rt::trap();
}
static void oxygen_loop(void)

View File

@ -1,18 +0,0 @@
#pragma once
#include <rt/rt.h>
namespace rt
{
static inline void start(void) noexcept
{
rt_start();
}
static inline void stop(void) noexcept
{
rt_stop();
}
} // namespace rt

13
cxx/include/rt/trap.hpp Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include <rt/trap.h>
namespace rt
{
[[noreturn]] static inline void trap() noexcept
{
rt_trap();
}
} // namespace rt

View File

@ -1,8 +1,8 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/mutex.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -22,7 +22,7 @@ static void task1(void)
rt_task_sleep(10);
rt_mutex_lock(&mutex);
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
RT_TASK(task0, RT_STACK_MIN, 1);

View File

@ -1,8 +1,8 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/notify.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -19,7 +19,7 @@ static void waiter(void)
{
rt_notify_wait(&note);
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
RT_TASK(poster, RT_STACK_MIN, 1);

View File

@ -1,8 +1,8 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/queue.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -21,7 +21,7 @@ static void popper(void)
int x;
rt_queue_pop(&queue, &x);
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
RT_TASK(popper, RT_STACK_MIN, 2);

View File

@ -1,8 +1,8 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/sem.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -19,7 +19,7 @@ static void waiter(void)
{
rt_sem_wait(&sem);
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
RT_TASK(poster, RT_STACK_MIN, 1);

View File

@ -1,7 +1,7 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -15,7 +15,7 @@ static void sleep(void)
static void task1(void)
{
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
RT_TASK(sleep, RT_STACK_MIN, 2);

View File

@ -1,7 +1,7 @@
#include <rt/cycle.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile uint32_t start_cycle = 0;
static volatile uint32_t cycles = 0;
@ -15,7 +15,7 @@ static void task0(void)
static void task1(void)
{
cycles = rt_cycle() - start_cycle;
rt_stop();
rt_trap();
}
/* NOTE: Tasks of equal priority will initially be executed in the order they

View File

@ -1,8 +1,8 @@
#include <rt/assert.h>
#include <rt/mutex.h>
#include <rt/rt.h>
#include <rt/sem.h>
#include <rt/task.h>
#include <rt/trap.h>
#define MAX_SEQ 9
@ -17,7 +17,7 @@ static void sequence(int s)
++seq;
if (seq > MAX_SEQ)
{
rt_stop();
rt_trap();
}
}
@ -67,20 +67,11 @@ static void donator(void)
sequence(6);
rt_mutex_unlock(&mutex0);
sequence(7);
if (!rt_mutex_timedlock(&mutex2, 10))
{
sequence(9);
}
}
static void timeout(void)
{
rt_task_sleep(100);
rt_assert(false, "timed out");
rt_assert(!rt_mutex_timedlock(&mutex2, 10), "donator timedlock succeeded");
sequence(9);
}
RT_TASK(locker0, RT_STACK_MIN, 1);
RT_TASK(locker1, RT_STACK_MIN, 2);
RT_TASK(spinner, RT_STACK_MIN, 3);
RT_TASK(donator, RT_STACK_MIN, 4);
RT_TASK(timeout, RT_STACK_MIN, 5);

View File

@ -1,10 +1,10 @@
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static void empty(void)
{
rt_task_drop_privilege();
rt_stop();
rt_trap();
}
RT_TASK(empty, RT_STACK_MIN, 1);

View File

@ -1,5 +1,5 @@
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static volatile float v;
@ -21,7 +21,7 @@ static void timeout(void)
{
rt_task_drop_privilege();
rt_task_sleep(100);
rt_stop();
rt_trap();
}
/* These tasks use floating-point, so give them a larger stack size if the

View File

@ -1,9 +1,9 @@
#include <rt/assert.h>
#include <rt/log.h>
#include <rt/mutex.h>
#include <rt/rt.h>
#include <rt/sem.h>
#include <rt/task.h>
#include <rt/trap.h>
static RT_MUTEX(mutex);
static unsigned x = 0;
@ -11,14 +11,14 @@ static unsigned x = 0;
#define NUM_TASKS 3
#define ITERATIONS 10000U
static void stop_last(void)
static void trap_last(void)
{
static RT_SEM(stop_sem, NUM_TASKS - 1);
/* Only the last task to finish will call rt_stop. */
if (!rt_sem_trywait(&stop_sem))
static RT_SEM(trap_sem, NUM_TASKS - 1);
/* Only the last task to finish will call rt_trap. */
if (!rt_sem_trywait(&trap_sem))
{
rt_assert(x == (ITERATIONS * NUM_TASKS), "x has the wrong value");
rt_stop();
rt_trap();
}
}
@ -31,7 +31,7 @@ static void increment_lock(void)
++x;
rt_mutex_unlock(&mutex);
}
stop_last();
trap_last();
}
static void increment_trylock(void)
@ -46,7 +46,7 @@ static void increment_trylock(void)
++x;
rt_mutex_unlock(&mutex);
}
stop_last();
trap_last();
}
static void increment_timedlock(void)
@ -60,7 +60,7 @@ static void increment_timedlock(void)
++x;
rt_mutex_unlock(&mutex);
}
stop_last();
trap_last();
}
static void timeout(void)

View File

@ -1,7 +1,7 @@
#include <rt/assert.h>
#include <rt/notify.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static const int n = 10;
static RT_NOTIFY(note, 0);
@ -32,7 +32,7 @@ static void waiter(void)
rt_assert(!rt_notify_timedwait(&note, &value, 10), "wait didn't time out");
rt_stop();
rt_trap();
}
RT_TASK(notifier, RT_STACK_MIN, 1);

View File

@ -2,8 +2,8 @@
#include <rt/atomic.h>
#include <rt/log.h>
#include <rt/once.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
#define ITERATIONS 10000UL
@ -39,7 +39,7 @@ static void oncer_reset(void)
}
rt_assert(rt_atomic_load(&x, RT_ATOMIC_RELAXED) == ITERATIONS,
"x was not incremented enough");
rt_stop();
rt_trap();
}
RT_TASK(oncer, RT_STACK_MIN, 1);

View File

@ -2,8 +2,8 @@
#include <rt/atomic.h>
#include <rt/log.h>
#include <rt/queue.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
#include <stdint.h>
@ -45,7 +45,7 @@ static void timeout(void)
{
rt_task_drop_privilege();
rt_task_sleep(1000);
rt_stop();
rt_trap();
}
RT_TASK(popper, RT_STACK_MIN, 1);

View File

@ -1,8 +1,8 @@
#include <rt/assert.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/rwlock.h>
#include <rt/task.h>
#include <rt/trap.h>
static RT_RWLOCK(lock);
static unsigned x = 0;
@ -36,10 +36,9 @@ static void timeout(void)
{
rt_task_drop_privilege();
rt_task_sleep(1000);
rt_stop();
rt_trap();
}
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(reader, RT_STACK_MIN, 1);
RT_TASK(writer, RT_STACK_MIN, 1);

View File

@ -1,7 +1,7 @@
#include <rt/assert.h>
#include <rt/rt.h>
#include <rt/sem.h>
#include <rt/task.h>
#include <rt/trap.h>
static const int n = 10;
static RT_SEM(sem, 0);
@ -29,7 +29,7 @@ static void waiter(void)
rt_assert(!rt_sem_timedwait(&sem, 10), "wait didn't time out");
rt_stop();
rt_trap();
}
RT_TASK(poster, RT_STACK_MIN, 1);

View File

@ -1,5 +1,5 @@
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static void simple(uintptr_t arg)
{
@ -11,7 +11,7 @@ static void simple(uintptr_t arg)
if (arg == 1)
{
rt_stop();
rt_trap();
}
}

View File

@ -1,10 +1,10 @@
#include <rt/assert.h>
#include <rt/atomic.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/sem.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <rt/trap.h>
static const int nloops = 5;
@ -19,11 +19,11 @@ static void sleep_periodic(uintptr_t period)
"woke up at the wrong tick");
}
/* Only the second task to finish will call rt_stop. */
static RT_SEM(stop_sem, 1);
if (!rt_sem_trywait(&stop_sem))
/* Only the second task to finish will call rt_trap. */
static RT_SEM(trap_sem, 1);
if (!rt_sem_trywait(&trap_sem))
{
rt_stop();
rt_trap();
}
}

View File

@ -3,8 +3,8 @@
#include <rt/assert.h>
#include <rt/atomic.h>
#include <rt/log.h>
#include <rt/rt.h>
#include <rt/task.h>
#include <rt/trap.h>
static rt_atomic_uint32_t hydrogen_bonded = 0;
static rt_atomic_uint32_t oxygen_bonded = 0;
@ -26,7 +26,7 @@ static void timeout(void)
const uint32_t h = rt_atomic_load(&hydrogen_bonded, RT_ATOMIC_RELAXED);
const uint32_t o = rt_atomic_load(&oxygen_bonded, RT_ATOMIC_RELAXED);
/* The oxygen or hydrogen may not have bonded by the time rt_stop is called
/* The oxygen or hydrogen may not have bonded by the time rt_trap is called
* after making a water molecule, so allow for o and h to be one molecule's
* worth below expected value or exactly equal to it. */
const uint32_t o_lo = w - 1;
@ -39,7 +39,7 @@ static void timeout(void)
rt_assert(h >= h_lo, "not enough hydrogen was bonded");
rt_assert(h <= h_hi, "too much hydrogen was bonded");
rt_stop();
rt_trap();
}
static void oxygen_loop(void)

View File

@ -19,6 +19,13 @@ void *rt_context_init(void (*fn)(void), void *stack, size_t stack_size);
void *rt_context_init_arg(void (*fn)(uintptr_t), uintptr_t arg, void *stack,
size_t stack_size);
/*
* Start execution from the given context. An implementation can assume that
* the context represents the beginning of a function call. This should only be
* called by rt_start.
*/
__attribute__((noreturn)) void rt_context_start(void *ctx);
/*
* Pointer to the previous task's context field, used to store the suspending
* context during a context switch.

View File

@ -11,6 +11,11 @@ extern "C" {
#define RT_CYCLE_ENABLE 0
#endif
/*
* Initialize any hardware necessary for cycle counting.
*/
void rt_cycle_init(void);
/*
* Get the current CPU cycle.
*/

View File

@ -13,15 +13,10 @@ extern "C" {
#include <rt/arch/mpu.h>
#else
/* Provide no-op versions for the static task initialization macros when
* there's no MPU. */
#define rt_mpu_config_init(config) \
do \
{ \
} while (0)
#else /* !RT_MPU_ENABLE */
#define rt_mpu_config_set(config, id, start_addr, size, attr) \
/* Provide no-op macros for when there's no MPU. */
#define RT_MPU_CONFIG_INIT(config, ...) \
do \
{ \
} while (0)
@ -29,7 +24,7 @@ extern "C" {
#define RT_MPU_PRIV_DATA(name)
#define RT_MPU_PRIV_BSS(name)
#endif
#endif /* RT_MPU_ENABLE */
#ifdef __cplusplus
}

View File

@ -1,22 +0,0 @@
#ifndef RT_H
#define RT_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* Start rt. (Enable interrupts, turn the active context into the idle task.)
*/
void rt_start(void);
/*
* Stop rt.
*/
void rt_stop(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_H */

10
include/rt/start.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef RT_START_H
#define RT_START_H
/* Start rt. Generally should be called from assembly after initialization. */
__attribute__((noreturn)) void rt_start(void);
/* Get the first task context to run. Called by rt_start. */
void *rt_start_context(void);
#endif /* RT_START_H */

View File

@ -28,7 +28,7 @@
#if RT_TASK_READY_CLZ_ENABLE
#define RT_TASK_MAX_PRIORITY ((sizeof(unsigned) * CHAR_BIT) - 1)
#else
#define RT_TASK_MAX_PRIORITY UINT_MAX
#define RT_TASK_MAX_PRIORITY (UINT_MAX - 1)
#endif
#ifdef __cplusplus
@ -43,25 +43,6 @@ extern "C" {
#error "To use task cycle counts, the cycle counter must be enabled."
#endif
#define RT_TASK_CONSTRUCTOR_PRIORITY 1000
struct rt_task;
/*
* Initialize a task that runs fn() on the given stack, and make it runnable.
* This function can be used before or after rt_start.
*/
void rt_task_init(struct rt_task *task, void (*fn)(void), const char *name,
unsigned priority, void *stack, size_t stack_size);
/*
* Initialize a task that runs fn(arg) on the given stack, and make it runnable.
* This function can be used before or after rt_start.
*/
void rt_task_init_arg(struct rt_task *task, void (*fn)(uintptr_t),
uintptr_t arg, const char *name, unsigned priority,
void *stack, size_t stack_size);
/*
* Yield the core to another task of the same priority. If the current task is
* still the highest priority, it will continue executing.
@ -123,6 +104,9 @@ struct rt_task
unsigned base_priority;
enum rt_task_state state;
unsigned long wake_tick;
#if RT_MPU_ENABLE
struct rt_mpu_config mpu_config;
#endif
struct rt_mutex *blocking_mutex;
union
{
@ -133,9 +117,6 @@ struct rt_task
#if RT_TASK_CYCLE_ENABLE
uint64_t total_cycles;
uint32_t start_cycle;
#endif
#if RT_MPU_ENABLE
struct rt_mpu_config mpu_config;
#endif
const char *name;
};
@ -146,27 +127,31 @@ struct rt_task
*/
void rt_task_ready(struct rt_task *task);
#define RT_TASK_INIT(name_, name_str, priority_, state_) \
#define RT_TASK_INIT(name_, name_str, priority_, ...) \
{ \
.list = RT_LIST_INIT(name_.list), \
.sleep_list = RT_LIST_INIT(name_.sleep_list), .priority = (priority_), \
.base_priority = (priority_), .state = (state_), \
.base_priority = (priority_), .state = RT_TASK_STATE_INIT, \
.mutex_list = RT_LIST_INIT(name_.mutex_list), .name = (name_str), \
}
#define RT_CAT_(a, b) a##b
#define RT_CAT(a, b) RT_CAT_(a, b)
#define RT_TASK_CONSTRUCTOR_PRIORITY 1000
/* Use a constructor with a priority based on __COUNTER__ so that tasks created
* in the same file with equal priority are added to the ready list in the
* order they appear in the file. Use % 60000 to ensure that the overall
* constructor priority does not wrap around at 2^16 and is always greater than
* the priority of the list initialization constructor. */
#define RT_TASK_COMMON(fn, stack_size, priority, name, ctx_init) \
RT_TASK_COMMON_(fn, stack_size, priority, name, ctx_init, __COUNTER__)
#define RT_TASK_COMMON(fn, stack_size, priority, name, ctx_init, ...) \
RT_TASK_COMMON_(fn, stack_size, priority, name, ctx_init, __COUNTER__, \
__VA_ARGS__)
#define RT_TASK_COMMON_(fn, stack_size, priority, name, ctx_init, counter) \
#define RT_TASK_COMMON_(fn, stack_size, priority, name, ctx_init, counter, \
...) \
__attribute__((constructor(RT_TASK_CONSTRUCTOR_PRIORITY + \
(counter % 60000)))) static void \
RT_CAT(fn##_task_init_, counter)(void) \
@ -174,16 +159,17 @@ void rt_task_ready(struct rt_task *task);
static RT_STACK(fn##_task_stack, stack_size); \
RT_MPU_PRIV_DATA(fn##_task) \
static struct rt_task fn##_task = \
RT_TASK_INIT(fn##_task, name, priority, RT_TASK_STATE_INIT); \
RT_TASK_INIT(fn##_task, name, priority); \
RT_MPU_CONFIG_INIT(&fn##_task.mpu_config, \
RT_MPU_REGION(fn##_task_stack, \
sizeof fn##_task_stack, \
RT_MPU_STACK_ATTR), \
__VA_ARGS__); \
fn##_task.ctx = ctx_init; \
rt_mpu_config_init(&fn##_task.mpu_config); \
rt_mpu_config_set(&fn##_task.mpu_config, RT_MPU_TASK_REGION_START_ID, \
(uintptr_t)fn##_task_stack, stack_size, \
RT_MPU_STACK_ATTR); \
rt_task_ready(&fn##_task); \
} \
rt_static_assert(priority <= RT_TASK_MAX_PRIORITY, \
"the priority of task " #fn ", " #priority \
rt_static_assert((intmax_t)(priority) <= (intmax_t)RT_TASK_MAX_PRIORITY, \
"the priority of task \"" name "\", " #priority \
", is higher than the maximum allowed")
/*
@ -191,15 +177,17 @@ void rt_task_ready(struct rt_task *task);
* with the given priority. To create new tasks after rt is running, use
* rt_task_init*.
*/
#define RT_TASK(fn, stack_size, priority) \
#define RT_TASK(fn, stack_size, priority, ...) \
RT_TASK_COMMON(fn, stack_size, priority, #fn, \
rt_context_init((fn), fn##_task_stack, \
sizeof fn##_task_stack))
sizeof fn##_task_stack), \
__VA_ARGS__)
#define RT_TASK_ARG(fn, arg, stack_size, priority) \
#define RT_TASK_ARG(fn, arg, stack_size, priority, ...) \
RT_TASK_COMMON(fn, stack_size, priority, #fn "(" #arg ")", \
rt_context_init_arg((fn), (arg), fn##_task_stack, \
sizeof fn##_task_stack))
sizeof fn##_task_stack), \
__VA_ARGS__)
#ifdef __cplusplus
}

14
include/rt/trap.h Normal file
View File

@ -0,0 +1,14 @@
#ifndef RT_TRAP_H
#define RT_TRAP_H
#ifdef __cplusplus
extern "C" {
#endif
__attribute__((noreturn)) void rt_trap(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_TRAP_H */

View File

@ -12,7 +12,7 @@ rt::mutex!(MUTEX2);
fn sequence(s: i32) {
assert_eq!(s, SEQ.load(Ordering::Relaxed), "sequence out of order");
if SEQ.fetch_add(1, Ordering::Relaxed) >= MAX_SEQ {
rt::stop();
rt::trap();
}
}

View File

@ -2,7 +2,7 @@
fn empty() {
rt::task::drop_privilege();
rt::stop();
rt::trap();
}
rt::task!(empty, rt::task::STACK_MIN, 1);

View File

@ -21,7 +21,7 @@ fn f(arg: u32) {
fn timeout() {
rt::task::drop_privilege();
rt::task::sleep(100);
rt::stop();
rt::trap();
}
rt::task!(f(1), 2 * rt::task::STACK_MIN, 1);

View File

@ -5,15 +5,15 @@ rt::mutex!(MUTEX, u32, 0);
const NUM_TASKS: u32 = 3;
const ITERATIONS: u32 = 10000;
fn stop_last() {
rt::semaphore!(STOP_SEM, NUM_TASKS as i32 - 1);
if !STOP_SEM.try_wait() {
fn trap_last() {
rt::semaphore!(TRAP_SEM, NUM_TASKS as i32 - 1);
if !TRAP_SEM.try_wait() {
assert_eq!(
*MUTEX.try_lock().expect("mutex should be unlocked"),
ITERATIONS * NUM_TASKS,
"the mutex did not contain the expected value"
);
rt::stop();
rt::trap();
}
}
@ -22,7 +22,7 @@ fn increment_lock() {
for _ in 0..ITERATIONS {
*MUTEX.lock() += 1;
}
stop_last();
trap_last();
}
fn increment_trylock() {
@ -37,7 +37,7 @@ fn increment_trylock() {
};
*guard += 1;
}
stop_last();
trap_last();
}
fn increment_timedlock() {
@ -50,7 +50,7 @@ fn increment_timedlock() {
};
*guard += 1;
}
stop_last();
trap_last();
}
fn timeout() {

View File

@ -25,7 +25,7 @@ fn waiter() {
}
assert!(NOTE.timed_wait(10).is_none(), "wait didn't time out");
rt::stop();
rt::trap();
}
rt::task!(notifier, rt::task::STACK_MIN, 1);

View File

@ -20,15 +20,15 @@ fn oncer() {
}
}
fn oncer_stop() {
fn oncer_trap() {
oncer();
assert_eq!(
X.load(Ordering::Relaxed),
1,
"X did not have the expected value"
);
rt::stop();
rt::trap();
}
rt::task!(oncer, rt::task::STACK_MIN, 1);
rt::task!(oncer_stop, rt::task::STACK_MIN, 1);
rt::task!(oncer_trap, rt::task::STACK_MIN, 1);

View File

@ -39,7 +39,7 @@ fn popper() {
fn timeout() {
rt::task::drop_privilege();
rt::task::sleep(1000);
rt::stop();
rt::trap();
}
rt::task!(pusher(0), rt::task::STACK_MIN, 1);

View File

@ -29,7 +29,7 @@ fn writer() {
fn timeout() {
rt::task::drop_privilege();
rt::task::sleep(50);
rt::stop();
rt::trap();
}
rt::task!(reader, rt::task::STACK_MIN, 1);

View File

@ -21,7 +21,7 @@ fn waiter() {
}
assert!(!SEM.timed_wait(10), "wait didn't time out");
rt::stop();
rt::trap();
}
rt::task!(poster, rt::task::STACK_MIN, 1);

View File

@ -7,7 +7,7 @@ fn simple(arg: usize) {
}
if arg == 1 {
rt::stop();
rt::trap();
}
}

View File

@ -15,10 +15,10 @@ fn sleep_periodic(period: rt::tick::Utick) {
);
}
// Only the second task to finish will call rt_stop.
rt::semaphore!(STOP_SEM, 1);
if !STOP_SEM.try_wait() {
rt::stop();
// Only the second task to finish will call rt::trap.
rt::semaphore!(TRAP_SEM, 1);
if !TRAP_SEM.try_wait() {
rt::trap();
}
}

View File

@ -14,7 +14,7 @@ fn timeout() {
let h = HYDROGEN_BONDED.load(Ordering::Relaxed);
let o = OXYGEN_BONDED.load(Ordering::Relaxed);
/* The oxygen or hydrogen may not have bonded by the time rt_stop is called
/* The oxygen or hydrogen may not have bonded by the time rt::trap is called
* after making a water molecule, so allow for o and h to be one molecule's
* worth below expected value or exactly equal to it. */
let o_lo = w - 1;
@ -27,7 +27,7 @@ fn timeout() {
assert!(h >= h_lo, "not enough hydrogen was bonded");
assert!(h <= h_hi, "too much hydrogen was bonded");
rt::stop();
rt::trap();
}
fn oxygen_loop() {

View File

@ -1,4 +1,4 @@
#![no_std]
#![cfg_attr(not(test), no_std)]
pub mod cycle;
pub mod sync;
@ -8,21 +8,32 @@ pub mod tick;
mod bindings;
mod list;
use bindings::{rt_start, rt_stop};
use bindings::rt_trap;
pub fn start() {
unsafe { rt_start() }
}
pub fn stop() {
unsafe { rt_stop() }
pub fn trap() -> ! {
unsafe { rt_trap() }
}
/*
* These items must be re-exported so rt's macros can use them. The task macro uses
* rt_syscall_record to make a task ready.
* These items must be re-exported so rt's macros can use them. The task macro
* uses rt_syscall_record to make a task ready.
*/
#[doc(hidden)]
pub use bindings::rt_syscall_record;
#[doc(hidden)]
pub use paste;
#[cfg(test)]
use crate::bindings::rt_start_context;
#[cfg(test)]
use std::sync::Once;
#[cfg(test)]
static TEST_INIT: Once = Once::new();
#[cfg(test)]
fn test_init() {
TEST_INIT.call_once(|| {
unsafe { rt_start_context() };
});
}

View File

@ -137,8 +137,11 @@ macro_rules! mutex {
#[cfg(test)]
mod tests {
use crate::test_init;
#[test]
fn fast_path() {
test_init();
mutex!(MUTEX, i32, 0);
*MUTEX.lock() += 1;
assert!(*MUTEX.lock() == 1);
@ -146,6 +149,7 @@ mod tests {
#[test]
fn try_lock() {
test_init();
mutex!(MUTEX, i32, 0);
{
let guard = MUTEX.try_lock();

View File

@ -43,8 +43,11 @@ macro_rules! once {
mod tests {
#[test]
fn fast_path() {
let mut x = 0i32;
once!(ONCE);
ONCE.call_once(|| {});
ONCE.call_once(|| { x += 1; });
assert!(ONCE.is_completed());
ONCE.call_once(|| { x += 1; });
assert_eq!(x, 1);
}
}

View File

@ -159,8 +159,11 @@ macro_rules! rwlock {
#[cfg(test)]
mod tests {
use crate::test_init;
#[test]
fn fast_path() {
test_init();
rwlock!(LOCK, i32, 0);
*LOCK.write() += 1;
assert!(*LOCK.read() == 1);

View File

@ -5,8 +5,9 @@
#include <rt/mutex.h>
#include <rt/notify.h>
#include <rt/queue.h>
#include <rt/rt.h>
#include <rt/rwlock.h>
#include <rt/sem.h>
#include <rt/start.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <rt/trap.h>

110
src/rt.c
View File

@ -1,14 +1,14 @@
#include <rt/rt.h>
#include <rt/atomic.h>
#include <rt/container.h>
#include <rt/context.h>
#include <rt/cycle.h>
#include <rt/idle.h>
#include <rt/interrupt.h>
#include <rt/list.h>
#include <rt/log.h>
#include <rt/mutex.h>
#include <rt/sem.h>
#include <rt/start.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/tick.h>
@ -41,7 +41,7 @@ static void insert_by_priority(struct rt_list *list, struct rt_task *task)
}
#if RT_TASK_READY_CLZ_ENABLE
RT_MPU_PRIV_DATA(rt_ready_bits)
RT_MPU_PRIV_BSS(rt_ready_bits)
static unsigned rt_ready_bits = 0;
RT_MPU_PRIV_BSS(rt_ready_lists)
@ -101,31 +101,26 @@ static void insert_mutex_by_priority(struct rt_list *list,
* in RT_MPU_PRIV_BSS. */
static struct rt_syscall_record *_Atomic rt_pending_syscalls = NULL;
/* The idle task is initially running.
* The caller of rt_start() will become the idle task. */
static struct rt_task rt_idle_task =
RT_TASK_INIT(rt_idle_task, "idle", 0, RT_TASK_STATE_RUNNING);
RT_TASK(rt_idle, RT_STACK_MIN, 0);
/* Unprivileged tasks need to read the active task pointer to access their
* syscall record and name. Some platforms support privileged read+write
* and unprivileged read access permissions, but not all. */
static struct rt_task *rt_active_task = &rt_idle_task;
/* rt_active_task must be readable from user code. */
static struct rt_task *rt_active_task = NULL;
void rt_task_yield(void)
{
rt_syscall();
}
const char *rt_task_name(void)
{
return rt_active_task->name;
}
struct rt_task *rt_task_self(void)
{
return rt_active_task;
}
const char *rt_task_name(void)
{
return rt_active_task->name;
}
void rt_task_ready(struct rt_task *task)
{
task->state = RT_TASK_STATE_READY;
@ -173,6 +168,29 @@ RT_MPU_PRIV_BSS(rt_mpu_config)
struct rt_mpu_config *rt_mpu_config;
#endif
void *rt_start_context(void)
{
#if RT_CYCLE_ENABLE
rt_cycle_init();
#endif
rt_task_cycle_resume();
struct rt_task *const first_task = next_ready_task();
task_unready(first_task);
rt_active_task = first_task;
first_task->state = RT_TASK_STATE_RUNNING;
#if RT_MPU_ENABLE
rt_mpu_config = &first_task->mpu_config;
#endif
rt_logf("rt_start_context: %s with priority %u\n", rt_task_name(),
first_task->priority);
return first_task->ctx;
}
static void *sched(bool yield)
{
struct rt_task *next_task = next_ready_task();
@ -231,8 +249,8 @@ static void *sched(bool yield)
}
rt_context_prev = &rt_active_task->ctx;
next_task->state = RT_TASK_STATE_RUNNING;
rt_active_task = next_task;
rt_active_task->state = RT_TASK_STATE_RUNNING;
#if RT_MPU_ENABLE
rt_mpu_config = &rt_active_task->mpu_config;
@ -654,61 +672,3 @@ void *rt_syscall_run(void)
rt_task_cycle_resume();
return new_ctx;
}
static void task_init(struct rt_task *task, const char *name, unsigned priority,
void *stack, size_t stack_size)
{
rt_logf("%s created\n", name);
rt_list_init(&task->list);
rt_list_init(&task->sleep_list);
task->ctx = NULL;
task->list_head = NULL;
task->priority = priority;
task->base_priority = priority;
task->state = RT_TASK_STATE_INIT;
task->wake_tick = 0;
task->blocking_mutex = NULL;
task->timeout_ptr.sem = NULL;
rt_list_init(&task->mutex_list);
#if RT_TASK_CYCLE_ENABLE
task->total_cycles = 0;
task->start_cycle = 0;
#endif
#if RT_MPU_ENABLE
rt_mpu_config_init(&task->mpu_config);
rt_mpu_config_set(&task->mpu_config, RT_MPU_TASK_REGION_START_ID,
(uintptr_t)stack, stack_size, RT_MPU_STACK_ATTR);
#else
(void)stack;
(void)stack_size;
#endif
task->name = name;
struct rt_syscall_record ready_record = {
.args.task_ready = {task},
.op = RT_SYSCALL_TASK_READY,
};
rt_syscall_push(&ready_record);
if (rt_interrupt_is_active())
{
rt_syscall_pend();
}
else
{
rt_syscall();
}
}
void rt_task_init(struct rt_task *task, void (*fn)(void), const char *name,
unsigned priority, void *stack, size_t stack_size)
{
task->ctx = rt_context_init(fn, stack, stack_size);
task_init(task, name, priority, stack, stack_size);
}
void rt_task_init_arg(struct rt_task *task, void (*fn)(uintptr_t),
uintptr_t arg, const char *name, unsigned priority,
void *stack, size_t stack_size)
{
task->ctx = rt_context_init_arg(fn, arg, stack, stack_size);
task_init(task, name, priority, stack, stack_size);
}