rt/arch/arm/arm.c

325 lines
7.7 KiB
C

#include <rt/assert.h>
#include <rt/context.h>
#include <rt/cycle.h>
#include <rt/idle.h>
#include <rt/interrupt.h>
#include <rt/log.h>
#include <rt/mpu.h>
#include <rt/stack.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/trap.h>
#define PROFILE_R (__ARM_ARCH_PROFILE == 'R')
#define PROFILE_M (__ARM_ARCH_PROFILE == 'M')
#define BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define V6M ((__ARM_ARCH == 6) && PROFILE_M)
#define V8M ((__ARM_ARCH == 8) && PROFILE_M)
#ifdef __ARM_FP
#define FPU 1
#else
#define FPU 0
#endif
#if PROFILE_R
#include "r/coprocessor.h"
#include "r/mode.h"
#include <vic.h>
#elif PROFILE_M
#include "m/exc_return.h"
#endif
#include <stdbool.h>
#include <stdint.h>
struct context
{
#if PROFILE_M && RT_MPU_ENABLE
/* In M-profile, thread-mode privilege is part of the control register. In
* A/R-profile, it's part of the processor mode (system vs. user), which is
* a field of the CPSR. */
uint32_t control;
#endif
#if V8M
void *psplim;
#endif
#if PROFILE_R && FPU
uint32_t cpacr;
#endif
uint32_t r4, r5, r6, r7, r8, r9, r10, r11;
#if PROFILE_M && FPU
/* Only use a per-task exception return value if floating-point is enabled,
* because otherwise the exception return value is always the same. This
* is the lr value on exception entry, so place it after r4-r11 so it can
* be saved/restored along with those registers. */
uint32_t exc_return;
#endif
uint32_t r0, r1, r2, r3, r12, lr, pc, psr;
};
#if PROFILE_R
#define PSR_THUMB (UINT32_C(1) << 5)
#define PSR_E (UINT32_C(1) << 9)
#define FPEXC_EN (UINT32_C(1) << 30)
#elif PROFILE_M
#define CONTROL_NPRIV (UINT32_C(1) << 0)
#define CONTROL_SPSEL (UINT32_C(1) << 1)
#define PSR_THUMB (UINT32_C(1) << 24)
#define DWT_LAR (*(volatile uint32_t *)0xE0001FB0)
#define DWT_LAR_UNLOCK UINT32_C(0xC5ACCE55)
#define DEMCR (*(volatile unsigned *)0xE000EDFCU)
#define DEMCR_TRCENA (UINT32_C(1) << 24)
#define DWT_CTRL (*(volatile uint32_t *)0xE0001000U)
#define DWT_CTRL_CYCCNTENA (UINT32_C(1) << 0)
#define DWT_CYCCNT (*(volatile uint32_t *)0xE0001004U)
#endif // PROFILE
static struct context *context_init(void *stack, size_t stack_size,
uintptr_t fn_addr)
{
void *const sp = (char *)stack + stack_size;
struct context *ctx = sp;
--ctx;
#if V8M
ctx->psplim = stack;
#endif
#if PROFILE_R
ctx->psr = MODE_SYS | (BIG_ENDIAN ? PSR_E : 0) |
(((fn_addr & 1) != 0) ? PSR_THUMB : 0);
#if FPU
ctx->cpacr = 0;
#endif
#elif PROFILE_M
(void)fn_addr;
ctx->psr = PSR_THUMB;
#if RT_MPU_ENABLE
/* Tasks start privileged. The SPSEL bit is RAZ/WI in handler mode where
* context switches occur. The exception return value specifies which stack
* pointer is used when returning to thread mode. */
ctx->control = 0;
#endif
#if FPU
ctx->exc_return = (uint32_t)TASK_INITIAL_EXC_RETURN;
#endif
#endif // PROFILE
return ctx;
}
__attribute__((noreturn)) void rt_task_entry(void (*fn)(void));
__attribute__((noreturn)) void rt_task_entry_arg(uintptr_t arg,
void (*fn)(uintptr_t));
void *rt_context_init(void (*fn)(void), void *stack, size_t stack_size)
{
struct context *const ctx = context_init(stack, stack_size, (uintptr_t)fn);
ctx->pc = (uint32_t)rt_task_entry;
ctx->r0 = (uint32_t)fn;
return ctx;
}
void *rt_context_init_arg(void (*fn)(uintptr_t), uintptr_t arg, void *stack,
size_t stack_size)
{
struct context *const ctx = context_init(stack, stack_size, (uintptr_t)fn);
ctx->pc = (uint32_t)rt_task_entry_arg;
ctx->r0 = arg;
ctx->r1 = (uint32_t)fn;
return ctx;
}
__attribute__((noreturn, weak)) void rt_idle(void)
{
rt_task_drop_privilege();
for (;;)
{
__asm__("wfi" :::);
}
}
void rt_cycle_init(void)
{
#if PROFILE_R
// Enable counters and reset the cycle counter.
pmcr_oreq(PMCR_E | PMCR_C);
// Enable the cycle counter.
pmcntenset_oreq(PMCNTEN_C);
#elif PROFILE_M && (__ARM_ARCH >= 7)
// Enable the cycle counter.
DWT_LAR = DWT_LAR_UNLOCK;
DEMCR |= DEMCR_TRCENA;
DWT_CTRL |= DWT_CTRL_CYCCNTENA;
#endif // PROFILE
}
__attribute__((noreturn, weak)) void rt_trap(void)
{
for (;;)
{
__asm__("bkpt" :::);
}
}
#if PROFILE_R
static inline uint32_t cpsr_mode(void)
{
uint32_t cpsr;
__asm__ __volatile__("mrs %0, cpsr" : "=r"(cpsr));
return cpsr & MODE_MASK;
}
static inline uint32_t spsr_mode(void)
{
uint32_t spsr;
__asm__ __volatile__("mrs %0, spsr" : "=r"(spsr));
return spsr & MODE_MASK;
}
static inline bool mode_is_interrupt(uint32_t mode)
{
/*
* NOTE: this assumes that nested interrupts don't use system mode.
* Interrupt nesting should use supervisor mode, which doesn't require each
* task stack to accommodate interrupts.
*/
return (mode != MODE_USR) && (mode != MODE_SYS);
}
#endif
bool rt_interrupt_is_active(void)
{
#if PROFILE_R
return mode_is_interrupt(cpsr_mode());
#elif PROFILE_M
uint32_t ipsr;
__asm__ __volatile__("mrs %0, ipsr" : "=r"(ipsr));
return ipsr != 0;
#endif // PROFILE
}
void rt_syscall(void)
{
__asm__("svc 0" ::: "memory");
}
void rt_syscall_pend(void)
{
#if PROFILE_R
vic_syscall_pend();
#elif PROFILE_M
#define ICSR (*(volatile uint32_t *)0xE000ED04UL)
#define PENDSVSET (UINT32_C(1) << 28)
ICSR = PENDSVSET;
#endif // PROFILE
}
static const char *volatile rt_assert_msg;
__attribute__((weak)) void rt_assert(bool condition, const char *msg)
{
if (!condition)
{
rt_assert_msg = msg;
rt_trap();
}
}
uint32_t rt_cycle(void)
{
#if RT_CYCLE_ENABLE
#if PROFILE_R
return pmccntr();
#elif PROFILE_M && (__ARM_ARCH >= 7)
return DWT_CYCCNT;
#else
return 0;
#endif // PROFILE
#else // RT_CYCLE_ENABLE
return 0;
#endif
}
#if PROFILE_R && FPU
bool rt_lazy_enable_fp(void)
{
/* If the undefined instruction was in an interrupt or the FPU is already
* enabled, there's nothing to do. */
if (mode_is_interrupt(spsr_mode()) ||
((cpacr() & CPACR_FPU_ENABLE) == CPACR_FPU_ENABLE))
{
return false;
}
// Enable the FPU and initialize the floating point state.
cpacr_oreq(CPACR_FPU_ENABLE);
__asm__("vmsr fpexc, %0" : : "r"(FPEXC_EN));
__asm__("vmsr fpscr, %0" : : "r"(0));
__asm__("vmov d0, %0, %0" : : "r"(0));
__asm__("vmov d1, %0, %0" : : "r"(0));
__asm__("vmov d2, %0, %0" : : "r"(0));
__asm__("vmov d3, %0, %0" : : "r"(0));
__asm__("vmov d4, %0, %0" : : "r"(0));
__asm__("vmov d5, %0, %0" : : "r"(0));
__asm__("vmov d6, %0, %0" : : "r"(0));
__asm__("vmov d7, %0, %0" : : "r"(0));
__asm__("vmov d8, %0, %0" : : "r"(0));
__asm__("vmov d9, %0, %0" : : "r"(0));
__asm__("vmov d10, %0, %0" : : "r"(0));
__asm__("vmov d11, %0, %0" : : "r"(0));
__asm__("vmov d12, %0, %0" : : "r"(0));
__asm__("vmov d13, %0, %0" : : "r"(0));
__asm__("vmov d14, %0, %0" : : "r"(0));
__asm__("vmov d15, %0, %0" : : "r"(0));
return true;
}
#endif
void rt_task_drop_privilege(void)
{
/* Use a dsb to ensure any pending writes from the privileged state
* complete. */
#if PROFILE_R
__asm__("dsb; cps %0" : : "i"(MODE_USR) : "memory");
#elif PROFILE_M && (__ARM_ARCH >= 7) && RT_MPU_ENABLE
/*
* We only track per-task privilege in M-profile if the MPU is enabled.
* Also, dropping privilege on v6-m prevents the use of atomic operations,
* which require interrupt masking on that architecture.
*/
uint32_t control;
__asm__ __volatile__("mrs %0, control" : "=r"(control));
__asm__("dsb; msr control, %0; isb"
:
: "r"(control | CONTROL_NPRIV)
: "memory");
#endif
}
#if V6M
#include "m/atomic-v6.c"
#endif