clean up comments, use #pragma once everywhere

This commit is contained in:
Chris Copeland 2023-10-14 13:50:37 -07:00
parent 786a75e251
commit d8182e4f1c
Signed by: chrisnc
GPG Key ID: 14550DA72485DF30
44 changed files with 217 additions and 414 deletions

View File

@ -81,7 +81,6 @@ struct context
#elif PROFILE_M
#define CONTROL_NPRIV (UINT32_C(1) << 0)
#define CONTROL_SPSEL (UINT32_C(1) << 1)
#define PSR_THUMB (UINT32_C(1) << 24)
@ -205,11 +204,9 @@ static inline uint32_t spsr_mode(void)
static inline bool mode_is_interrupt(uint32_t mode)
{
/*
* NOTE: this assumes that nested interrupts don't use system mode.
/* NOTE: this assumes that nested interrupts don't use system mode.
* Interrupt nesting should use supervisor mode, which doesn't require each
* task stack to accommodate interrupts.
*/
* task stack to accommodate interrupts. */
return (mode != MODE_USR) && (mode != MODE_SYS);
}
#endif
@ -311,11 +308,9 @@ void rt_task_drop_privilege(void)
#if PROFILE_R
__asm__("dsb; cps %0" : : "i"(MODE_USR) : "memory");
#elif PROFILE_M && (__ARM_ARCH >= 7) && RT_MPU_ENABLE
/*
* We only track per-task privilege in M-profile if the MPU is enabled.
/* We only track per-task privilege in M-profile if the MPU is enabled.
* Also, dropping privilege on v6-m prevents the use of atomic operations,
* which require interrupt masking on that architecture.
*/
* which require interrupt masking on that architecture. */
uint32_t control;
__asm__ __volatile__("mrs %0, control" : "=r"(control));
__asm__("dsb; msr control, %0; isb"

View File

@ -40,15 +40,13 @@ extern "C" {
#if __ARM_ARCH == 6 || __ARM_ARCH == 7
/*
* In armv{6,7}, MPU regions have power-of-2 sizes with 8 subregions of equal
/* In armv{6,7}, MPU regions have power-of-2 sizes with 8 subregions of equal
* size. armv6 supports regions with size 256 or greater, and armv7 supports
* regions with size 32 or greater, but only supports subregions for regions
* with size 256 or greater. An MPU region of size 32 can be achieved with a
* 256-byte region and one active subregion, so just use >= 256 bytes always.
* Let SIZE_MAX represent the maximum-size region (4GB), even though it is
* off-by-one.
*/
* off-by-one. */
#define RT_MPU_SIZEBITS(n) \
(((n) <= 256) ? UINT32_C(7) \
: ((n) == SIZE_MAX) ? UINT32_C(31) \
@ -58,18 +56,16 @@ extern "C" {
#define RT_MPU_SUBREGION_SIZE(n) (RT_MPU_REGION_SIZE(n) / 8)
/*
* Only use subregions when the size is known at compile-time, to avoid
* run-time division during configurations. Static task stacks should have this
* property. Also require n > 0 because the calculations don't work for n == 0.
*/
/* Only use subregions when the size is known at compile-time, to avoid
* run-time division during configurations. Task stacks should have this
* property. Require n > 0 because the calculations don't work for n == 0. */
#define RT_MPU_USE_SUBREGIONS(n) \
(__builtin_constant_p(n) && ((n) > 0) && ((n) != SIZE_MAX))
#define RT_MPU_SUBREGIONS(n) \
(RT_MPU_USE_SUBREGIONS(n) ? ((((n)-1) / RT_MPU_SUBREGION_SIZE(n)) + 1) : 8)
/* Calculate in which subregion an address resides, given the region size. */
// Calculate in which subregion an address resides, given the region size.
#define RT_MPU_SUBREGION_OFFSET(a, n) \
((((a) & ~(RT_MPU_SUBREGION_SIZE(n) - 1)) - \
((a) & ~(RT_MPU_REGION_SIZE(n) - 1))) / \
@ -192,10 +188,8 @@ static inline void rt_mpu_region_set(uint32_t id, uintptr_t addr, size_t size,
#elif __ARM_ARCH == 8
/*
* In armv8, MPU regions are defined by start and end addresses that are
* multiples of 32 bytes.
*/
/* In armv8, MPU regions are defined by start and end addresses that are
* multiples of 32 bytes. */
#define RT_MPU_ALIGN(n) 32UL
struct rt_mpu_region
@ -329,9 +323,9 @@ static inline void rt_mpu_attr_set(uint32_t index, uint32_t attr)
*mair = (*mair & ~mask) | (attr << shift);
}
#else /* __ARM_ARCH */
#else // __ARM_ARCH < 6 || __ARM_ARCH > 8
#error "Unsupported __ARM_ARCH for MPU configuration."
#endif
#endif // __ARM_ARCH
static inline void rt_mpu_enable(void)
{

View File

@ -21,20 +21,16 @@ extern "C" {
#define RT_STACK_ALIGN(n) 8UL
#endif
/*
* A register context takes 16-18 words depending on the architecture
/* A register context takes 16-18 words depending on the architecture
* subvariant. Pushing all callee-saved registers once requires an
* additional 8 words.
*/
* additional 8 words. */
#ifndef RT_STACK_MIN
#define RT_STACK_MIN 128
#endif
/*
* With hardware floating-point, the context is larger by 34-36 words: 32 sp/16
/* With hardware floating-point, the context is larger by 34-36 words: 32 sp/16
* dp registers, 1-3 floating-point state registers (fpscr on M and R, fpexc,
* cpacr on R).
*/
* cpacr on R). */
#ifndef RT_STACK_FP_MIN
#ifdef __ARM_FP
#define RT_STACK_FP_MIN 384

View File

@ -1,12 +1,10 @@
@ vim:ft=arm
/*
* This variant of the syscall handler supports armv6-m and armv8-m.base.
/* This variant of the syscall handler supports armv6-m and armv8-m.base.
*
* armv8-m.base also supports cbz while armv6-m does not, but otherwise they
* support the same subset of armv{7,8}-m that makes the save/restore procedure
* different, mainly due to the lack of stmdb and ldm.w/stm.w.
*/
* different, mainly due to the lack of stmdb and ldm.w/stm.w. */
#include <rt/mpu.h>
@ -31,22 +29,24 @@
.exitm
.endif
#if __ARM_ARCH == 8
/* In armv8-m, the region number isn't part of the base address
* register and must be set separately. */
mov r3, \r & 0xFC
str r3, [r2]
#endif
#endif // __ARM_ARCH == 8
ldmia r1!, {r4-r5}
str r4, [r2, 4]
str r5, [r2, 8]
mpuset (\r + 1), (\n - 1)
.endm
#else
#else // !RT_MPU_ENABLE
#define CONTROL_SIZE 0
#define controltemp
#define getcontrol
#define setcontrol
#define mpuconfigure
#endif
#endif // RT_MPU_ENABLE
#if __ARM_ARCH == 8
#define PSPLIM_SIZE 4
@ -54,13 +54,13 @@
#define getpsplim mrs r3, psplim
#define setpsplim msr psplim, r3
#define returnifnull cbz r0, .Lreturn
#else /* v6 */
#else // __ARM_ARCH == 6
#define PSPLIM_SIZE 0
#define psplimtemp
#define getpsplim
#define setpsplim
#define returnifnull cmp r0, 0; beq .Lreturn
#endif
#endif // __ARM_ARCH
#define CONTEXT_SIZE (32 + PSPLIM_SIZE + CONTROL_SIZE)
@ -132,18 +132,22 @@ rt_start:
ldmia r1!, {controltemp psplimtemp r4-r7}
msr psp, r0
setpsplim
// Set the SPSEL bit in control to switch to the process stack pointer.
movs r0, 2
msr control, r0
isb
// Once we are using the process stack pointer, interrupts can be enabled.
cpsie i
// Copy pc to the psr slot so it can be popped.
ldr r0, [sp, 24]
str r0, [sp, 28]
/* Manually perform an exception return stack pop. This ignores the
* context's initial PSR, but the current PSR works as well. We can't pop
* all of the registers at once in v6/v8-m.base, so we need to load r12 and
* lr individually and copy pc to the psr slot so it can be popped. */
ldr r0, [sp, 16]
mov r12, r0
ldr r0, [sp, 20]
mov lr, r0
ldr r0, [sp, 24]
str r0, [sp, 28]
pop {r0-r3}
add sp, 12
pop {pc}

View File

@ -1,37 +1,31 @@
@ vim:ft=arm
/*
* This variant of the syscall handler supports armv{7,8}-m{,+nofp}.
*/
// This variant of the syscall handler supports armv{7,8}-m{,+nofp}.
#include <rt/mpu.h>
#include "exc_return.h"
#ifdef __ARM_FP
/*
* For floating-point, save and restore lr, which contains the exception return
/* For floating-point, save and restore lr, which contains the exception return
* value, and use the floating-point context bit of this value to decide if the
* non-volatile fp context should be saved/restored as well.
*/
* non-volatile fp context should be saved/restored as well. */
#define excreturn ,lr
#define pushlr str lr, [sp, #-8]! // Push only lr but keep sp 8-byte aligned.
#define poplr ldr lr, [sp], 8
#define savefp tst lr, 0x10; it eq; vstmdbeq r1!, {s16-s31}
#define loadfp tst lr, 0x10; it eq; vldmiaeq r0!, {s16-s31}
#define return bx lr
#else
/*
* With nofp, lr can be clobbered and reloaded from an immediate because the
* exception return value for tasks is a constant.
*/
#else // !defined(__ARM_FP)
/* With nofp, lr can be clobbered and reloaded from an immediate because the
* exception return value for tasks is a constant. */
#define excreturn
#define pushlr
#define poplr
#define savefp
#define loadfp
#define return mov r0, TASK_INITIAL_EXC_RETURN; bx r0
#endif
#endif // __ARM_FP
#if RT_MPU_ENABLE
#define controltemp r2,
@ -67,46 +61,46 @@
#if __ARM_ARCH == 8
add r3, r2, 4
stmia r3, {r4-r9}
#else
#else // __ARM_ARCH == 7
stmia r2, {r4-r9}
#endif
#endif // __ARM_ARCH
mpuset (\r + 3), (\n - 3)
.elseif ((\r % 4) == 2) && (\n >= 2)
ldmia r1!, {r4-r7}
#if __ARM_ARCH == 8
add r3, r2, 8
stmia r3, {r4-r7}
#else
#else // __ARM_ARCH == 7
stmia r2, {r4-r7}
#endif
#endif // __ARM_ARCH
mpuset (\r + 2), (\n - 2)
.else
ldmia r1!, {r4-r5}
#if __ARM_ARCH == 8
strd r4, r5, [r2, (\r % 4) * 4]
#else
#else // __ARM_ARCH == 7
strd r4, r5, [r2]
#endif
#endif // __ARM_ARCH
mpuset (\r + 1), (\n - 1)
.endif
.endm
#else
#else // !RT_MPU_ENABLE
#define controltemp
#define getcontrol
#define setcontrol
#define mpuconfigure
#endif
#endif // RT_MPU_ENABLE
#if __ARM_ARCH == 8
#define psplimtemp r3,
#define getpsplim mrs r3, psplim
#define setpsplim msr psplim, r3
#else // v7
#else // __ARM_ARCH == 7
#define psplimtemp
#define getpsplim
#define setpsplim
#endif
#endif // __ARM_ARCH
#define saveregs stmdb r1!, {controltemp psplimtemp r4-r11 excreturn}
#define loadregs ldmia r0!, {controltemp psplimtemp r4-r11 excreturn}
@ -161,11 +155,14 @@ rt_start:
loadregs
msr psp, r0
setpsplim
// Set the SPSEL bit in control to switch to the process stack pointer.
movs r0, 2
msr control, r0
isb
// Once we are using the process stack pointer, interrupts can be enabled.
cpsie i
/* Manually perform an exception return stack pop. This ignores the
* context's initial PSR, but the current PSR works as well. */
pop {r0-r3, r12, lr}
ldr pc, [sp], 8

View File

@ -4,11 +4,9 @@
#include "vic.h"
/*
* This file provides the entry and exit sequences for the syscall handler
/* This file provides the entry and exit sequences for the syscall handler
* specific to the Vectored Interrupt Manager (VIM) and the System Software
* Interrupt (SSI), as implemented on the TI Hercules family (TMS570/RM4).
*/
* Interrupt (SSI), as implemented on the TI Hercules family (TMS570/RM4). */
#define VIM_REQENASET 0xFFFFFE30
#define VIM_REQENACLR 0xFFFFFE40
@ -19,11 +17,9 @@
#define SSI_REQENACLR (VIM_REQENACLR + SSI_CHANNEL_REG_OFFSET)
#define SSIVEC 0xFFFFFFF4
/*
* Use the address of the system control registers as a base address because it
/* Use the address of the system control registers as a base address because it
* can be loaded in a single instruction in both arm and thumb, and it allows
* (+/-) 8-bit immediate offsets to access both the VIM and SSI registers.
*/
* (+/-) 8-bit immediate offsets to access both the VIM and SSI registers. */
#define SYS_BASE 0xFFFFFF00
.macro vic_syscall_start

View File

@ -1,10 +1,8 @@
#pragma once
/*
* The SSI should be routed to channel 126, which is the lowest priority.
/* The SSI should be routed to channel 126, which is the lowest priority.
* This maps to bit 30 of the REQENA*3 registers.
* Note: this is not the SSI's default VIM channel.
*/
* Note: this is not the SSI's default VIM channel. */
#ifndef RT_VIC_SSI_CHANNEL
#define RT_VIC_SSI_CHANNEL 126
#endif

View File

@ -2,11 +2,9 @@
@ vim:ft=arm
/*
* This file provides the entry and exit sequences for the syscall handler
/* This file provides the entry and exit sequences for the syscall handler
* specific to the Vectored Interrupt Manager (VIM) and the Software
* IRQ (SW_IRQ), as implemented on the TI Sitara family (AM26x).
*/
* IRQ (SW_IRQ), as implemented on the TI Sitara family (AM26x). */
#include "vic.h"
#include "../mode.h"
@ -25,13 +23,11 @@
str r0, [r0]
.endm
/*
* To keep the VIM management code for the IRQ simpler, we provide a separate
/* To keep the VIM management code for the IRQ simpler, we provide a separate
* SVC handler that just triggers the SW IRQ and returns.
* An alternative would be to set the VIM IRQ priority mask in
* vic_syscall_start and clear it in vic_syscall_finish. This step is only
* required when handling an SVC, but it would affect syscall IRQs as well.
*/
* required when handling an SVC, but it would affect syscall IRQs as well. */
.macro vic_syscall_pend
push {r0, r1}
@ -63,12 +59,10 @@ rt_syscall_handler_svc:
.size rt_syscall_handler_svc, .-rt_syscall_handler_svc
/*
* At the cost of 2 extra instructions and 16 bytes of per-task stack space,
/* At the cost of 2 extra instructions and 16 bytes of per-task stack space,
* the SVC handler can re-enable interrupts to minimize the latency of other
* interrupts. An application can choose either depending on system
* requirements.
*/
* requirements. */
.section .text.rt_syscall_handler_svc_ie,"ax",%progbits
.global rt_syscall_handler_svc_ie
.type rt_syscall_handler_svc_ie, %function

View File

@ -1,9 +1,7 @@
#pragma once
/*
* The SW IRQ should be given the lowest priority.
* Its ID is 129 on AM263x. Override it if needed.
*/
/* The SW IRQ should be given the lowest priority.
* Its ID is 129 on AM263x. Override it if needed. */
#ifndef RT_VIC_SW_IRQ
#define RT_VIC_SW_IRQ 129
#endif
@ -15,10 +13,8 @@
#define RT_VIC_SW_IRQ_REG_OFFSET ((RT_VIC_SW_IRQ / 32) * 0x20)
#define RT_VIC_SW_IRQ_GROUP (RT_VIC_VIM_GROUP + RT_VIC_SW_IRQ_REG_OFFSET)
/*
* The Sitara VIM requires IRQ handler addresses to be 4-byte aligned, even if
* they are thumb-encoded. (The least-significant 2 bits are ignored.)
*/
/* The Sitara VIM requires IRQ handler addresses to be 4-byte aligned, even if
* they are thumb-encoded. (The least-significant 2 bits are ignored.) */
#define RT_VIC_IRQ_ALIGN 4
#ifndef __ASSEMBLER__

View File

@ -1,13 +1,11 @@
@ vim:ft=arm
/*
* The interrupt management code for the syscall handler depends on both the
/* The interrupt management code for the syscall handler depends on both the
* type of interrupt controller and the source of the syscall interrupt, so we
* provide a way to extend this code with assembly macros.
* vic_syscall_start needs to mask and clear the syscall interrupt such that
* another syscall cannot occur when IRQs are re-enabled. vic_syscall_finish
* must unmask the syscall interrupt. Both are run with IRQs disabled.
*/
* must unmask the syscall interrupt. Both are run with IRQs disabled. */
#include <rt/mpu.h>

View File

@ -14,7 +14,7 @@ static unsigned x = 0;
static void trap_last(void)
{
static rt::sem trap_sem(NUM_TASKS - 1);
/* Only the last task to finish will call rt::trap. */
// Only the last task to finish will call rt::trap.
if (!trap_sem.trywait())
{
rt_assert(x == (ITERATIONS * NUM_TASKS), "x has the wrong value");

View File

@ -18,7 +18,7 @@ static void sleep_periodic(uintptr_t period)
"woke up at the wrong tick");
}
/* Only the second task to finish will call rt::trap. */
// Only the second task to finish will call rt::trap.
static rt::sem trap_sem(1);
if (!trap_sem.trywait())
{

View File

@ -14,7 +14,7 @@ static unsigned x = 0;
static void trap_last(void)
{
static RT_SEM(trap_sem, NUM_TASKS - 1);
/* Only the last task to finish will call rt_trap. */
// Only the last task to finish will call rt_trap.
if (!rt_sem_trywait(&trap_sem))
{
rt_assert(x == (ITERATIONS * NUM_TASKS), "x has the wrong value");

View File

@ -19,7 +19,7 @@ static void sleep_periodic(uintptr_t period)
"woke up at the wrong tick");
}
/* Only the second task to finish will call rt_trap. */
// Only the second task to finish will call rt_trap.
static RT_SEM(trap_sem, 1);
if (!rt_sem_trywait(&trap_sem))
{

View File

@ -1,10 +1,7 @@
#ifndef WATER_H
#define WATER_H
#pragma once
void hydrogen(void);
void oxygen(void);
void make_water(void);
#endif /* WATER_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_ASSERT_H
#define RT_ASSERT_H
#pragma once
#include <stdbool.h>
@ -18,5 +17,3 @@ void rt_assert(bool condition, const char *msg);
#ifdef __cplusplus
}
#endif
#endif /* RT_ASSERT_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_ATOMIC_H
#define RT_ATOMIC_H
#pragma once
#include <stdatomic.h>
#include <stdbool.h>
@ -50,13 +49,10 @@ typedef rt_atomic(uint32_t) rt_atomic_uint32_t;
#define RT_ATOMIC_ACQ_REL memory_order_acq_rel
#define RT_ATOMIC_SEQ_CST memory_order_seq_cst
/*
* Work around a bug in gcc where atomic_flag operations silently don't
/* Work around a bug in gcc where atomic_flag operations silently don't
* generate atomic code on armv6-m rather than failing to link. The equivalent
* atomic_exchange operations on an atomic_bool work.
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107567
*/
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107567 */
typedef rt_atomic_bool rt_atomic_flag;
#define RT_ATOMIC_FLAG_INIT false
#define rt_atomic_flag_test_and_set(f, mo) atomic_exchange_explicit(f, true, mo)
@ -65,5 +61,3 @@ typedef rt_atomic_bool rt_atomic_flag;
#ifdef __cplusplus
}
#endif
#endif /* RT_ATOMIC_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_BARRIER_H
#define RT_BARRIER_H
#pragma once
#include <rt/cond.h>
#include <rt/mutex.h>
@ -14,11 +13,9 @@ struct rt_barrier;
void rt_barrier_init(struct rt_barrier *barrier, unsigned int count);
/*
* Blocks until count threads have called it, at which point it returns
* true to one of those threads and false to the others, and resets to its
* initial state, waiting for another count threads.
*/
/* Block until count threads have called rt_barrier_wait. The function will
* then return true to one of the threads in the group and reset to its initial
* state, waiting for another count threads. */
bool rt_barrier_wait(struct rt_barrier *barrier);
struct rt_barrier
@ -40,5 +37,3 @@ struct rt_barrier
#ifdef __cplusplus
}
#endif
#endif /* RT_BARRIER_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_COND_H
#define RT_COND_H
#pragma once
#include <rt/sem.h>
@ -38,5 +37,3 @@ struct rt_cond
#ifdef __cplusplus
}
#endif
#endif /* RT_COND_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_CONTAINER_H
#define RT_CONTAINER_H
#pragma once
#include <stddef.h>
#include <stdint.h>
@ -14,5 +13,3 @@ extern "C" {
#ifdef __cplusplus
}
#endif
#endif /* RT_CONTAINER_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_CONTEXT_H
#define RT_CONTEXT_H
#pragma once
#include <stddef.h>
#include <stdint.h>
@ -8,32 +7,22 @@
extern "C" {
#endif
/*
* Initialize the given stack with a new context that will execute fn().
*/
// Initialize the given stack with a new context that will execute fn().
void *rt_context_init(void (*fn)(void), void *stack, size_t stack_size);
/*
* Initialize the given stack with a new context that will execute fn(arg).
*/
// Initialize the given stack with a new context that will execute fn(arg).
void *rt_context_init_arg(void (*fn)(uintptr_t), uintptr_t arg, void *stack,
size_t stack_size);
/*
* Start execution from the given context. An implementation can assume that
/* Start execution from the given context. An implementation can assume that
* the context represents the beginning of a function call. This should only be
* called by rt_start.
*/
* called by rt_start. */
__attribute__((noreturn)) void rt_context_start(void *ctx);
/*
* Pointer to the previous task's context field, used to store the suspending
* context during a context switch.
*/
/* Pointer to the previous task's context field, used to store the suspending
* context during a context switch. */
extern void **rt_context_prev;
#ifdef __cplusplus
}
#endif
#endif /* RT_CONTEXT_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_CYCLE_H
#define RT_CYCLE_H
#pragma once
#include <stdint.h>
@ -11,30 +10,20 @@ extern "C" {
#define RT_CYCLE_ENABLE 0
#endif
/*
* Initialize any hardware necessary for cycle counting.
*/
// Initialize any hardware necessary for cycle counting.
void rt_cycle_init(void);
/*
* Get the current CPU cycle.
*/
// Get the current CPU cycle.
uint32_t rt_cycle(void);
/*
* Pause attributing CPU cycles to the current task.
* NOTE: currently this can only be safely used from the syscall handler.
*/
/* Pause attributing CPU cycles to the current task.
* NOTE: currently this can only be safely used from the syscall handler. */
void rt_task_cycle_pause(void);
/*
* Resume attributing CPU cycles to the current task.
* NOTE: currently this can only be safely used from the syscall handler.
*/
/* Resume attributing CPU cycles to the current task.
* NOTE: currently this can only be safely used from the syscall handler. */
void rt_task_cycle_resume(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_CYCLE_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_IDLE_H
#define RT_IDLE_H
#pragma once
#ifdef __cplusplus
extern "C" {
@ -10,5 +9,3 @@ void rt_idle(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_IDLE_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_INTERRUPT_H
#define RT_INTERRUPT_H
#pragma once
#include <stdbool.h>
@ -7,13 +6,9 @@
extern "C" {
#endif
/*
* Returns true if called from within an interrupt.
*/
// Returns true if called from within an interrupt.
bool rt_interrupt_is_active(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_INTERRUPT_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_LIST_H
#define RT_LIST_H
#pragma once
#include <stdbool.h>
@ -22,9 +21,7 @@ struct rt_list *rt_list_front(const struct rt_list *list);
void rt_list_push_back(struct rt_list *list, struct rt_list *node);
/*
* Insert node into list at the first position where cmp(node, node->next).
*/
// Insert node into list at the first position where cmp(node, node->next).
void rt_list_insert_by(struct rt_list *list, struct rt_list *node,
bool (*cmp)(const struct rt_list *,
const struct rt_list *));
@ -42,5 +39,3 @@ void rt_list_insert_by(struct rt_list *list, struct rt_list *node,
#ifdef __cplusplus
}
#endif
#endif /* RT_LIST_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_LOG_H
#define RT_LOG_H
#pragma once
#ifdef __cplusplus
extern "C" {
@ -18,5 +17,3 @@ __attribute__((format(printf, 1, 2))) void rt_logf(const char *fmt, ...);
#ifdef __cplusplus
}
#endif
#endif /* RT_LOG_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_MPU_H
#define RT_MPU_H
#pragma once
#ifdef __cplusplus
extern "C" {
@ -13,9 +12,9 @@ extern "C" {
#include <rt/arch/mpu.h>
#else /* !RT_MPU_ENABLE */
#else // !RT_MPU_ENABLE
/* Provide no-op macros for when there's no MPU. */
// Provide no-op macros for when there's no MPU.
#define RT_MPU_CONFIG_INIT(config, ...) \
do \
{ \
@ -24,10 +23,8 @@ extern "C" {
#define RT_MPU_PRIV_DATA(name)
#define RT_MPU_PRIV_BSS(name)
#endif /* RT_MPU_ENABLE */
#endif // RT_MPU_ENABLE
#ifdef __cplusplus
}
#endif
#endif /* RT_MPU_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_MUTEX_H
#define RT_MUTEX_H
#pragma once
#include <rt/list.h>
#include <rt/sem.h>
@ -37,14 +36,10 @@ struct rt_mutex
#define RT_MUTEX_WAITER_MASK ((uintptr_t)1)
/*
* Try to lock the mutex from a given task.
* This function should not be called directly.
*/
/* Try to lock the mutex from a given task.
* This function should not be called directly. */
bool rt_mutex_trylock_with_task(struct rt_mutex *mutex, struct rt_task *task);
#ifdef __cplusplus
}
#endif
#endif /* RT_MUTEX_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_NOTIFY_H
#define RT_NOTIFY_H
#pragma once
#include <rt/atomic.h>
#include <rt/sem.h>
@ -26,66 +25,44 @@ void rt_notify_init(struct rt_notify *note, uint32_t value);
#define RT_NOTIFY(name, value_) \
struct rt_notify name = RT_NOTIFY_INIT(name, value_)
/*
* Notify without changing the notification value.
*/
// Notify without changing the notification value.
void rt_notify_post(struct rt_notify *note);
/*
* Notify and |= the notification value.
*/
// Notify and |= the notification value.
void rt_notify_or(struct rt_notify *note, uint32_t value);
/*
* Notify and += the notification value.
*/
// Notify and += the notification value.
void rt_notify_add(struct rt_notify *note, uint32_t value);
/*
* Notify set the notification value unconditionally.
*/
// Notify set the notification value unconditionally.
void rt_notify_set(struct rt_notify *note, uint32_t value);
/*
* Block until notified, and return the notification value. Only one task may
* call rt_notify_*wait* for each struct rt_notify.
*/
/* Block until notified, and return the notification value. Only one task may
* call rt_notify_*wait* for each struct rt_notify. */
uint32_t rt_notify_wait(struct rt_notify *note);
/*
* Block the current task until notified, and &= ~clear the notification value.
* The returned value is before clearing.
*/
/* Block the current task until notified, and &= ~clear the notification value.
* The returned value is before clearing. */
uint32_t rt_notify_wait_clear(struct rt_notify *note, uint32_t clear);
/*
* Get the notification value if one is pending. Returns false if there is no
* notification pending.
*/
/* Get the notification value if one is pending. Returns false if there is no
* notification pending. */
bool rt_notify_trywait(struct rt_notify *note, uint32_t *value);
/*
* rt_notify_trywait and &= ~clear the notification value after fetching it.
*/
// rt_notify_trywait and &= ~clear the notification value after fetching it.
bool rt_notify_trywait_clear(struct rt_notify *note, uint32_t *value,
uint32_t clear);
/*
* Wait for a pending notification until a timeout expires and get the value if
/* Wait for a pending notification until a timeout expires and get the value if
* a notification occurs. Returns false if there is no notification before the
* timeout expires.
*/
* timeout expires. */
bool rt_notify_timedwait(struct rt_notify *note, uint32_t *value,
unsigned long ticks);
/*
* rt_notify_timedwait and &= ~clear the notification value after fetching it.
*/
// rt_notify_timedwait and &= ~clear the notification value after fetching it.
bool rt_notify_timedwait_clear(struct rt_notify *note, uint32_t *value,
uint32_t clear, unsigned long ticks);
#ifdef __cplusplus
}
#endif
#endif /* RT_NOTIFY_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_ONCE_H
#define RT_ONCE_H
#pragma once
#include <rt/atomic.h>
#include <rt/mutex.h>
@ -10,11 +9,9 @@ extern "C" {
struct rt_once;
/*
* Call fn exactly once among all callers using the same struct rt_once.
/* Call fn exactly once among all callers using the same struct rt_once.
* Regardless of which caller actually executes fn, rt_once_call only returns
* for any caller after fn has returned.
*/
* for any caller after fn has returned. */
static inline void rt_once_call(struct rt_once *once, void (*fn)(void));
struct rt_once
@ -30,9 +27,7 @@ struct rt_once
#define RT_ONCE(name) struct rt_once name = RT_ONCE_INIT(name)
/*
* The slow path for rt_once. Do not call this directly.
*/
// The slow path for rt_once. Do not call this directly.
void rt_once_call_slow(struct rt_once *once, void (*fn)(void));
static inline void rt_once_call(struct rt_once *once, void (*fn)(void))
@ -46,5 +41,3 @@ static inline void rt_once_call(struct rt_once *once, void (*fn)(void))
#ifdef __cplusplus
}
#endif
#endif /* RT_ONCE_H */

View File

@ -1,15 +1,12 @@
#ifndef RT_QUEUE_H
#define RT_QUEUE_H
#pragma once
/*
* A bounded, multi-producer, multi-consumer, lock-free queue that supports
/* A bounded, multi-producer, multi-consumer, lock-free queue that supports
* blocking, timed, and non-blocking push, pop, and peek. The queue supports as
* many threads/interrupts accessing it simultaneously as there are slots in
* the queue. Additional queue operations will block, time out, or fail until a
* previous thread has completed its operation. A push will block, time out, or
* fail if there is no space left in the queue, and likewise for pop/peek if
* the queue is empty.
*/
* the queue is empty. */
#include <rt/assert.h>
#include <rt/atomic.h>
@ -79,5 +76,3 @@ struct rt_queue
#ifdef __cplusplus
}
#endif
#endif /* RT_QUEUE_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_RWLOCK_H
#define RT_RWLOCK_H
#pragma once
#include <rt/cond.h>
#include <rt/mutex.h>
@ -36,5 +35,3 @@ struct rt_rwlock
#ifdef __cplusplus
}
#endif
#endif /* RT_RWLOCK_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_SEM_H
#define RT_SEM_H
#pragma once
#include <rt/atomic.h>
#include <rt/list.h>
@ -77,5 +76,3 @@ struct rt_sem
#ifdef __cplusplus
}
#endif
#endif /* RT_SEM_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_STACK_H
#define RT_STACK_H
#pragma once
#include <rt/arch/stack.h>
@ -31,5 +30,3 @@ extern "C" {
#ifdef __cplusplus
}
#endif
#endif /* RT_STACK_H */

View File

@ -1,10 +1,7 @@
#ifndef RT_START_H
#define RT_START_H
#pragma once
/* Start rt. Generally should be called from assembly after initialization. */
// Start rt. Generally should be called from assembly after initialization.
__attribute__((noreturn)) void rt_start(void);
/* Get the first task context to run. Called by rt_start. */
// Get the first task context to run. Called by rt_start.
void *rt_start_context(void);
#endif /* RT_START_H */

View File

@ -1,42 +1,43 @@
#ifndef RT_SYSCALL_H
#define RT_SYSCALL_H
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
struct rt_task;
enum rt_syscall_op
{
/* Processes a tick. */
// Processes a tick.
RT_SYSCALL_TICK,
/* Sleep from a task. */
// Sleep from a task.
RT_SYSCALL_TASK_SLEEP,
RT_SYSCALL_TASK_SLEEP_PERIODIC,
/* Wait on a semaphore from a task. */
// Wait on a semaphore from a task.
RT_SYSCALL_SEM_WAIT,
RT_SYSCALL_SEM_TIMEDWAIT,
/* Post a semaphore from a task or interrupt. */
// Post a semaphore from a task or interrupt.
RT_SYSCALL_SEM_POST,
/* Lock a mutex from a task. */
// Lock a mutex from a task.
RT_SYSCALL_MUTEX_LOCK,
RT_SYSCALL_MUTEX_TIMEDLOCK,
/* Unlock a mutex from a task. */
// Unlock a mutex from a task.
RT_SYSCALL_MUTEX_UNLOCK,
/* Add a task to the ready list. */
// Add a task to the ready list.
RT_SYSCALL_TASK_READY,
/* Exit from a task. */
// Exit from a task.
RT_SYSCALL_TASK_EXIT,
};
struct rt_sem;
struct rt_mutex;
struct rt_task;
union rt_syscall_args
{
struct rt_syscall_args_task_sleep
@ -88,38 +89,29 @@ struct rt_syscall_record
enum rt_syscall_op op;
};
/* Enqueue a system call record. It will be processed once the system call
* handler is triggered, either by rt_syscall or rt_syscall_pend. */
void rt_syscall_push(struct rt_syscall_record *record);
/*
* Architecture-dependent handler for syscalls. This will call rt_syscall_run
* and perform a context switch if necessary. The syscall interrupt must be
* masked and cleared before calling rt_syscall_run.
*/
void rt_syscall_handler(void);
/*
* Architecture-dependent trigger for the syscall handler to be called from
/* Architecture-dependent trigger for the syscall handler to be called from
* tasks. This should cause the syscall handler to run before this function
* returns.
*/
* returns. */
void rt_syscall(void);
/*
* Architecture-dependent trigger for the syscall handler that can only be
/* Architecture-dependent trigger for the syscall handler that can only be
* called from an interrupt. If called from an interrupt, the syscall should
* run once no other interrupts are running, but before another task gets to
* run.
*/
* run. */
void rt_syscall_pend(void);
/*
* Perform all pending system calls and return a new context to execute or NULL
* if no context switch is required.
*/
/* Perform all pending system calls and return a new context to execute or NULL
* if no context switch is required. */
void *rt_syscall_run(void);
/* Architecture-dependent handler for syscalls. This will call rt_syscall_run
* and perform a context switch if necessary. */
void rt_syscall_handler(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_SYSCALL_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_TASK_H
#define RT_TASK_H
#pragma once
#include <rt/assert.h>
#include <rt/context.h>
@ -43,44 +42,28 @@ extern "C" {
#error "To use task cycle counts, the cycle counter must be enabled."
#endif
/*
* Yield the core to another task of the same priority. If the current task is
* still the highest priority, it will continue executing.
*/
/* Yield the core to another task of the same priority. If the current task is
* still the highest priority, it will continue executing. */
void rt_task_yield(void);
/*
* Sleep the current task for a given number of ticks.
*/
// Sleep the current task for a given number of ticks.
void rt_task_sleep(unsigned long ticks);
/*
* Sleep the current task until *last_wake_tick + period.
* *last_wake_tick will be set to the next wakeup tick.
*/
/* Sleep the current task until *last_wake_tick + period. *last_wake_tick will
* be set to the next wakeup tick. */
void rt_task_sleep_periodic(unsigned long *last_wake_tick,
unsigned long period);
/*
* Exit from the current task. This will be called automatically when a task
* function returns.
*/
// Exit from the current task. Returning from a task function also exits.
__attribute__((noreturn)) void rt_task_exit(void);
/*
* Get the name of the current task.
*/
// Get the name of the current task.
const char *rt_task_name(void);
/*
* Get the current task.
*/
// Get the current task.
struct rt_task *rt_task_self(void);
/*
* On architectures that have privilege levels, make the current task
* unprivileged.
*/
// Make the current task unprivileged. (This is a no-op on some architectures.)
void rt_task_drop_privilege(void);
enum rt_task_state
@ -121,10 +104,8 @@ struct rt_task
const char *name;
};
/*
* Add a task to the ready list. This function may only be called in the system
* call handler or before rt_start.
*/
/* Add a task to the ready list. This function may only be called in the system
* call handler or before rt_start. */
void rt_task_ready(struct rt_task *task);
#define RT_TASK_INIT(name_, name_str, priority_, ...) \
@ -172,11 +153,9 @@ void rt_task_ready(struct rt_task *task);
"the priority of task \"" name "\", " #priority \
", is higher than the maximum allowed")
/*
* Statically create a task that runs fn on a stack of at least stack_size,
* with the given priority. To create new tasks after rt is running, use
* rt_task_init*.
*/
/* Create a task that runs fn on a stack of at least stack_size, with the given
* priority. Additional arguments are MPU regions that will be active while the
* task is running. */
#define RT_TASK(fn, stack_size, priority, ...) \
RT_TASK_COMMON(fn, stack_size, priority, #fn, \
rt_context_init((fn), fn##_task_stack, \
@ -192,5 +171,3 @@ void rt_task_ready(struct rt_task *task);
#ifdef __cplusplus
}
#endif
#endif /* RT_TASK_H */

View File

@ -1,22 +1,15 @@
#ifndef RT_TICK_H
#define RT_TICK_H
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/*
* Advance to the next tick. Should be called periodically.
*/
// Advance to the next tick. Should be called periodically.
void rt_tick_advance(void);
/*
* Return the current tick.
*/
// Return the current tick.
unsigned long rt_tick_count(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_TICK_H */

View File

@ -1,5 +1,4 @@
#ifndef RT_TRAP_H
#define RT_TRAP_H
#pragma once
#ifdef __cplusplus
extern "C" {
@ -10,5 +9,3 @@ __attribute__((noreturn)) void rt_trap(void);
#ifdef __cplusplus
}
#endif
#endif /* RT_TRAP_H */

View File

@ -14,10 +14,8 @@ pub fn trap() -> ! {
unsafe { rt_trap() }
}
/*
* These items must be re-exported so rt's macros can use them. The task macro
* uses rt_syscall_record to make a task ready.
*/
/* These items must be re-exported so rt's macros can use them. The task macro
* uses rt_syscall_record to make a task ready. */
#[cfg(test)]
use std::sync::Once;

View File

@ -69,7 +69,7 @@ void rt_mutex_unlock(struct rt_mutex *mutex)
RT_ATOMIC_RELAXED))
{
rt_logf("%s mutex unlock with no waiters\n", rt_task_name());
/* Unlock with no waiters, nothing to do. */
// Unlock with no waiters, nothing to do.
return;
}

View File

@ -3,11 +3,11 @@
void rt_once_call_slow(struct rt_once *once, void (*fn)(void))
{
rt_mutex_lock(&once->mutex);
/* A mutex has acquire-release semantics, so we can load relaxed here. */
// A mutex has acquire-release semantics, so we can load relaxed here.
if (rt_atomic_load(&once->done, RT_ATOMIC_RELAXED) == 0)
{
fn();
/* This release pairs with the acquire in the fast path. */
// This release pairs with the acquire in the fast path.
rt_atomic_store(&once->done, 1, RT_ATOMIC_RELEASE);
}
rt_mutex_unlock(&once->mutex);

View File

@ -6,19 +6,19 @@
#include <limits.h>
#include <string.h>
/* An empty slot ready to be pushed. */
// An empty slot ready to be pushed.
#define SLOT_EMPTY 0x00U
/* An empty slot that has been claimed by a pusher. */
// An empty slot that has been claimed by a pusher.
#define SLOT_PUSH 0x05U
/* A full slot that has been claimed by a popper/peeker. */
// A full slot that has been claimed by a popper/peeker.
#define SLOT_POP 0x0AU
/* An empty slot that has been skipped by a popper. */
// An empty slot that has been skipped by a popper.
#define SLOT_SKIPPED 0x0CU
/* A full slot ready to be popped. */
// A full slot ready to be popped.
#define SLOT_FULL 0x0FU
#define SLOT_STATE_MASK 0x0FU
@ -260,7 +260,7 @@ static void peek(struct rt_queue *queue, void *elem)
}
}
}
/* After peeking, another popper/peeker may run. */
// After peeking, another popper/peeker may run.
rt_sem_post(&queue->pop_sem);
}

View File

@ -87,7 +87,7 @@ static bool mutex_priority_greater_than(const struct rt_list *a,
{
const struct rt_mutex *ma = mutex_from_list(a);
const struct rt_mutex *mb = mutex_from_list(b);
/* Only mutexes that have waiters should be compared. */
// Only mutexes that have waiters should be compared.
return task_from_list(rt_list_front(&ma->wait_list))->priority >
task_from_list(rt_list_front(&mb->wait_list))->priority;
}
@ -104,7 +104,8 @@ static struct rt_syscall_record *_Atomic rt_pending_syscalls = NULL;
RT_TASK(rt_idle, RT_STACK_MIN, 0);
/* rt_active_task must be readable from user code. */
/* rt_active_task must be readable from user code.
* Task structures themselves are privileged. */
static struct rt_task *rt_active_task = NULL;
void rt_task_yield(void)
@ -156,7 +157,7 @@ __attribute__((noreturn)) void rt_task_exit(void)
rt_syscall_push(&exit_record);
rt_syscall();
/* Should not be reached. */
// Should not be reached.
rt_trap();
}
@ -195,14 +196,12 @@ static void *sched(bool yield)
struct rt_task *next_task = next_ready_task();
if (!next_task)
{
/*
* Note, if a task other than the idle task is running, then the ready
/* Note, if a task other than the idle task is running, then the ready
* list will never be empty, because if the idle task is not running,
* then it is ready. This also means that the active task's state
* doesn't need to be checked or adjusted here, because it will always
* be RUNNING. For active tasks other than idle, the state can be
* anything at this point.
*/
* be running. For active tasks other than idle, the state can be
* anything at this point. */
rt_logf("sched: no new tasks to run, continuing %s\n", rt_task_name());
return NULL;
}
@ -261,9 +260,6 @@ static void *sched(bool yield)
return rt_active_task->ctx;
}
/*
* These globals may only be manipulated in the system call handler.
*/
RT_MPU_PRIV_BSS(rt_woken_tick)
static unsigned long rt_woken_tick;
@ -305,11 +301,11 @@ static void wake_mutex_waiter(struct rt_mutex *mutex)
{
if (rt_list_is_empty(&mutex->wait_list))
{
/* If the mutex has no waiters, there's nothing to do. */
// If the mutex has no waiters, there's nothing to do.
return;
}
/* Attempt to acquire the mutex on behalf of the first waiter. */
// Attempt to acquire the mutex on behalf of the first waiter.
struct rt_task *const task =
task_from_list(rt_list_front(&mutex->wait_list));
@ -343,7 +339,7 @@ static void wake_mutex_waiter(struct rt_mutex *mutex)
* whether the task's effective priority changed. */
static bool task_donate(struct rt_task *task)
{
/* Recalculate the task's priority starting from its base priority. */
// Recalculate the task's priority starting from its base priority.
unsigned int priority = task->base_priority;
/* If the task is holding any donating mutexes, donate the highest priority
@ -362,7 +358,7 @@ static bool task_donate(struct rt_task *task)
if (priority == task->priority)
{
/* The task priority didn't change; nothing else to do. */
// The task priority didn't change; nothing else to do.
return false;
}
@ -398,7 +394,7 @@ static void mutex_donate(struct rt_mutex *mutex)
~RT_MUTEX_WAITER_MASK;
if (holder == 0)
{
/* If the mutex is not held then no donation is needed. */
// If the mutex is not held then no donation is needed.
return;
}
@ -406,12 +402,12 @@ static void mutex_donate(struct rt_mutex *mutex)
if (!rt_list_is_empty(&mutex->wait_list))
{
/* Re-sort the mutex in the holder's mutex list. */
// Re-sort the mutex in the holder's mutex list.
rt_list_remove(&mutex->list);
insert_mutex_by_priority(&task->mutex_list, mutex);
}
/* Update the holder's priority. If it didn't change, we're done. */
// Update the holder's priority. If it didn't change, we're done.
if (!task_donate(task))
{
return;
@ -435,10 +431,10 @@ static void tick_syscall(void)
break;
}
/* Check if the task is blocked on a timed operation. */
// Check if the task is blocked on a timed operation.
if (!rt_list_is_empty(&task->list))
{
/* Unblock the task. */
// Unblock the task.
rt_list_remove(&task->list);
if (task->blocking_mutex != NULL)
{
@ -480,7 +476,7 @@ static void tick_syscall(void)
rt_woken_tick += ticks_to_advance;
}
/* Unprivileged tasks need to read the tick count. */
// Unprivileged tasks need to read the tick count.
static rt_atomic_ulong rt_tick;
RT_MPU_PRIV_BSS(rt_tick_pending)
@ -524,7 +520,7 @@ void rt_syscall_push(struct rt_syscall_record *record)
void rt_task_cycle_pause(void)
{
#if RT_TASK_CYCLE_ENABLE
/* TODO: Make this safe to call from any interrupt. */
// TODO: Make this safe to call from any interrupt.
const uint32_t task_cycles = rt_cycle() - rt_active_task->start_cycle;
rt_active_task->total_cycles += task_cycles;
#endif
@ -541,10 +537,8 @@ void *rt_syscall_run(void)
{
rt_task_cycle_pause();
/*
* Take all elements on the pending syscall stack at once. Syscalls added
* after this step will be on a new stack.
*/
/* Take all elements on the pending syscall stack at once. Syscalls added
* after this step will be on a new stack. */
struct rt_syscall_record *record =
rt_atomic_exchange(&rt_pending_syscalls, NULL, RT_ATOMIC_ACQUIRE);
bool yield = record == NULL;