summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-06-10 17:31:45 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-06-10 17:31:45 +0000
commit05ca8978c4fe965a619f016d79aaf6955767abf9 (patch)
tree606a19c322864fa823fda7c0a6daf998f76417e3
parent863891ce9aef50fde13cf3df897aca144a2c570a (diff)
downloadrockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.zip
rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.tar.gz
rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.tar.bz2
rockbox-05ca8978c4fe965a619f016d79aaf6955767abf9.tar.xz
Clean unused stuff out of thread.h and config.h and reorganize thread-pp.c to simplify the preprocessor blocks.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26743 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/export/config.h23
-rw-r--r--firmware/export/thread.h126
-rw-r--r--firmware/target/arm/thread-pp.c526
3 files changed, 284 insertions, 391 deletions
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 2039aa5..5947ca1 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -793,11 +793,6 @@ Lyre prototype 1 */
#define FORCE_SINGLE_CORE
#endif
-/* Core locking types - specifies type of atomic operation */
-#define CORELOCK_NONE 0
-#define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm
- and not a special semaphore instruction */
-
#if defined(CPU_PP)
#define IDLE_STACK_SIZE 0x80
#define IDLE_STACK_WORDS 0x20
@@ -811,6 +806,7 @@ Lyre prototype 1 */
#if !defined(FORCE_SINGLE_CORE)
#define NUM_CORES 2
+#define HAVE_CORELOCK_OBJECT
#define CURRENT_CORE current_core()
/* Attributes for core-shared data in DRAM where IRAM is better used for other
* purposes. */
@@ -821,9 +817,7 @@ Lyre prototype 1 */
#define IF_COP_VOID(...) __VA_ARGS__
#define IF_COP_CORE(core) core
-#define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
-
-#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */
+#endif /* !defined(FORCE_SINGLE_CORE) */
#endif /* CPU_PP */
@@ -832,18 +826,6 @@ Lyre prototype 1 */
#define NOCACHEDATA_ATTR __attribute__((section(".ncdata"),nocommon))
#endif
-#ifndef CONFIG_CORELOCK
-#define CONFIG_CORELOCK CORELOCK_NONE
-#endif
-
-#if CONFIG_CORELOCK == SW_CORELOCK
-#define IF_SWCL(...) __VA_ARGS__
-#define IFN_SWCL(...)
-#else
-#define IF_SWCL(...)
-#define IFN_SWCL(...) __VA_ARGS__
-#endif /* CONFIG_CORELOCK == */
-
#ifndef NUM_CORES
/* Default to single core */
#define NUM_CORES 1
@@ -855,7 +837,6 @@ Lyre prototype 1 */
#define NOCACHEBSS_ATTR
#define NOCACHEDATA_ATTR
#endif
-#define CONFIG_CORELOCK CORELOCK_NONE
#define IF_COP(...)
#define IF_COP_VOID(...) void
diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index a26b596..8912283 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -109,6 +109,23 @@ struct regs
uint32_t lr; /* 36 - r14 (lr) */
uint32_t start; /* 40 - Thread start address, or NULL when started */
};
+
+#ifdef CPU_PP
+#ifdef HAVE_CORELOCK_OBJECT
+/* No reliable atomic instruction available - use Peterson's algorithm */
+struct corelock
+{
+ volatile unsigned char myl[NUM_CORES];
+ volatile unsigned char turn;
+} __attribute__((packed));
+
+/* Too big to inline everywhere */
+void corelock_init(struct corelock *cl);
+void corelock_lock(struct corelock *cl);
+int corelock_try_lock(struct corelock *cl);
+void corelock_unlock(struct corelock *cl);
+#endif /* HAVE_CORELOCK_OBJECT */
+#endif /* CPU_PP */
#elif defined(CPU_MIPS)
struct regs
{
@@ -162,26 +179,13 @@ struct thread_list
struct thread_entry *next; /* Next thread in a list */
};
-/* Small objects for core-wise mutual exclusion */
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* No reliable atomic instruction available - use Peterson's algorithm */
-struct corelock
-{
- volatile unsigned char myl[NUM_CORES];
- volatile unsigned char turn;
-} __attribute__((packed));
-
-void corelock_init(struct corelock *cl);
-void corelock_lock(struct corelock *cl);
-int corelock_try_lock(struct corelock *cl);
-void corelock_unlock(struct corelock *cl);
-#else
+#ifndef HAVE_CORELOCK_OBJECT
/* No atomic corelock op needed or just none defined */
#define corelock_init(cl)
#define corelock_lock(cl)
#define corelock_try_lock(cl)
#define corelock_unlock(cl)
-#endif /* core locking selection */
+#endif /* HAVE_CORELOCK_OBJECT */
#ifdef HAVE_PRIORITY_SCHEDULING
struct blocker
@@ -341,98 +345,6 @@ struct core_entry
#define IFN_PRIO(...) __VA_ARGS__
#endif
-/* Macros generate better code than an inline function is this case */
-#if defined (CPU_ARM)
-/* atomic */
-#if CONFIG_CORELOCK == SW_CORELOCK
-#define test_and_set(a, v, cl) \
- xchg8((a), (v), (cl))
-/* atomic */
-#define xchg8(a, v, cl) \
-({ uint32_t o; \
- corelock_lock(cl); \
- o = *(uint8_t *)(a); \
- *(uint8_t *)(a) = (v); \
- corelock_unlock(cl); \
- o; })
-#define xchg32(a, v, cl) \
-({ uint32_t o; \
- corelock_lock(cl); \
- o = *(uint32_t *)(a); \
- *(uint32_t *)(a) = (v); \
- corelock_unlock(cl); \
- o; })
-#define xchgptr(a, v, cl) \
-({ typeof (*(a)) o; \
- corelock_lock(cl); \
- o = *(a); \
- *(a) = (v); \
- corelock_unlock(cl); \
- o; })
-#endif /* locking selection */
-#elif defined (CPU_COLDFIRE)
-/* atomic */
-/* one branch will be optimized away if v is a constant expression */
-#define test_and_set(a, v, ...) \
-({ uint32_t o = 0; \
- if (v) { \
- asm volatile ( \
- "bset.b #0, (%0)" \
- : : "a"((uint8_t*)(a)) \
- : "cc"); \
- } else { \
- asm volatile ( \
- "bclr.b #0, (%0)" \
- : : "a"((uint8_t*)(a)) \
- : "cc"); \
- } \
- asm volatile ("sne.b %0" \
- : "+d"(o)); \
- o; })
-#elif CONFIG_CPU == SH7034
-/* atomic */
-#define test_and_set(a, v, ...) \
-({ uint32_t o; \
- asm volatile ( \
- "tas.b @%2 \n" \
- "mov #-1, %0 \n" \
- "negc %0, %0 \n" \
- : "=r"(o) \
- : "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
- "r"((uint8_t *)(a))); \
- o; })
-#endif /* CONFIG_CPU == */
-
-/* defaults for no asm version */
-#ifndef test_and_set
-/* not atomic */
-#define test_and_set(a, v, ...) \
-({ uint32_t o = *(uint8_t *)(a); \
- *(uint8_t *)(a) = (v); \
- o; })
-#endif /* test_and_set */
-#ifndef xchg8
-/* not atomic */
-#define xchg8(a, v, ...) \
-({ uint32_t o = *(uint8_t *)(a); \
- *(uint8_t *)(a) = (v); \
- o; })
-#endif /* xchg8 */
-#ifndef xchg32
-/* not atomic */
-#define xchg32(a, v, ...) \
-({ uint32_t o = *(uint32_t *)(a); \
- *(uint32_t *)(a) = (v); \
- o; })
-#endif /* xchg32 */
-#ifndef xchgptr
-/* not atomic */
-#define xchgptr(a, v, ...) \
-({ typeof (*(a)) o = *(a); \
- *(a) = (v); \
- o; })
-#endif /* xchgptr */
-
void core_idle(void);
void core_wake(IF_COP_VOID(unsigned int core));
diff --git a/firmware/target/arm/thread-pp.c b/firmware/target/arm/thread-pp.c
index 8dfbd64..335f1f3 100644
--- a/firmware/target/arm/thread-pp.c
+++ b/firmware/target/arm/thread-pp.c
@@ -26,7 +26,21 @@
#define IF_NO_SKIP_YIELD(...) __VA_ARGS__
#endif
-#if NUM_CORES > 1
+#if NUM_CORES == 1
+/* Single-core variants for FORCE_SINGLE_CORE */
+static inline void core_sleep(void)
+{
+ sleep_core(CURRENT_CORE);
+ enable_irq();
+}
+
+/* Shared single-core build debugging version */
+void core_wake(void)
+{
+ /* No wakey - core already wakey (because this is it) */
+}
+#else /* NUM_CORES > 1 */
+/** Model-generic PP dual-core code **/
extern uintptr_t cpu_idlestackbegin[];
extern uintptr_t cpu_idlestackend[];
extern uintptr_t cop_idlestackbegin[];
@@ -37,23 +51,7 @@ static uintptr_t * const idle_stacks[NUM_CORES] =
[COP] = cop_idlestackbegin
};
-#if CONFIG_CPU == PP5002
-/* Bytes to emulate the PP502x mailbox bits */
-struct core_semaphores
-{
- volatile uint8_t intend_wake; /* 00h */
- volatile uint8_t stay_awake; /* 01h */
- volatile uint8_t intend_sleep; /* 02h */
- volatile uint8_t unused; /* 03h */
-};
-
-static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
-#endif /* CONFIG_CPU == PP5002 */
-
-#endif /* NUM_CORES */
-
-#if CONFIG_CORELOCK == SW_CORELOCK
-/* Software core locks using Peterson's mutual exclusion algorithm */
+/* Core locks using Peterson's mutual exclusion algorithm */
/*---------------------------------------------------------------------------
* Initialize the corelock structure.
@@ -69,8 +67,7 @@ void corelock_init(struct corelock *cl)
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
-void corelock_lock(struct corelock *cl) __attribute__((naked));
-void corelock_lock(struct corelock *cl)
+void __attribute__((naked)) corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
@@ -96,8 +93,7 @@ void corelock_lock(struct corelock *cl)
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
-int corelock_try_lock(struct corelock *cl) __attribute__((naked));
-int corelock_try_lock(struct corelock *cl)
+int __attribute__((naked)) corelock_try_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
@@ -125,8 +121,7 @@ int corelock_try_lock(struct corelock *cl)
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
-void corelock_unlock(struct corelock *cl) __attribute__((naked));
-void corelock_unlock(struct corelock *cl)
+void __attribute__((naked)) corelock_unlock(struct corelock *cl)
{
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */
@@ -138,11 +133,9 @@ void corelock_unlock(struct corelock *cl)
);
(void)cl;
}
+
#else /* C versions for reference */
-/*---------------------------------------------------------------------------
- * Wait for the corelock to become free and aquire it when it does.
- *---------------------------------------------------------------------------
- */
+
void corelock_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
@@ -158,10 +151,6 @@ void corelock_lock(struct corelock *cl)
}
}
-/*---------------------------------------------------------------------------
- * Try to aquire the corelock. If free, caller gets it, otherwise return 0.
- *---------------------------------------------------------------------------
- */
int corelock_try_lock(struct corelock *cl)
{
const unsigned int core = CURRENT_CORE;
@@ -179,85 +168,141 @@ int corelock_try_lock(struct corelock *cl)
return 0;
}
-/*---------------------------------------------------------------------------
- * Release ownership of the corelock
- *---------------------------------------------------------------------------
- */
void corelock_unlock(struct corelock *cl)
{
cl->myl[CURRENT_CORE] = 0;
}
#endif /* ASM / C selection */
-#endif /* CONFIG_CORELOCK == SW_CORELOCK */
-
/*---------------------------------------------------------------------------
- * Put core in a power-saving state if waking list wasn't repopulated and if
- * no other core requested a wakeup for it to perform a task.
+ * Do any device-specific inits for the threads and synchronize the kernel
+ * initializations.
*---------------------------------------------------------------------------
*/
-#ifdef CPU_PP502x
-#if NUM_CORES == 1
-static inline void core_sleep(void)
+static void INIT_ATTR core_thread_init(unsigned int core)
{
- sleep_core(CURRENT_CORE);
- enable_irq();
+ if (core == CPU)
+ {
+ /* Wake up coprocessor and let it initialize kernel and threads */
+#ifdef CPU_PP502x
+ MBX_MSG_CLR = 0x3f;
+#endif
+ wake_core(COP);
+ /* Sleep until COP has finished */
+ sleep_core(CPU);
+ }
+ else
+ {
+ /* Wake the CPU and return */
+ wake_core(CPU);
+ }
}
-#else
-static inline void core_sleep(unsigned int core)
+
+/*---------------------------------------------------------------------------
+ * Switches to a stack that always resides in the Rockbox core.
+ *
+ * Needed when a thread suicides on a core other than the main CPU since the
+ * stack used when idling is the stack of the last thread to run. This stack
+ * may not reside in the core firmware in which case the core will continue
+ * to use a stack from an unloaded module until another thread runs on it.
+ *---------------------------------------------------------------------------
+ */
+static inline void switch_to_idle_stack(const unsigned int core)
{
-#if 1
asm volatile (
- "mov r0, #4 \n" /* r0 = 0x4 << core */
- "mov r0, r0, lsl %[c] \n"
- "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
- "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
- "tst r1, r0, lsl #2 \n"
- "moveq r1, #0x80000000 \n" /* Then sleep */
- "streq r1, [%[ctl], %[c], lsl #2] \n"
- "moveq r1, #0 \n" /* Clear control reg */
- "streq r1, [%[ctl], %[c], lsl #2] \n"
- "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
- "str r1, [%[mbx], #8] \n"
- "1: \n" /* Wait for wake procedure to finish */
- "ldr r1, [%[mbx], #0] \n"
- "tst r1, r0, lsr #2 \n"
- "bne 1b \n"
- :
- : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
- : "r0", "r1");
-#else /* C version for reference */
- /* Signal intent to sleep */
- MBX_MSG_SET = 0x4 << core;
+ "str sp, [%0] \n" /* save original stack pointer on idle stack */
+ "mov sp, %0 \n" /* switch stacks */
+ : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
+ (void)core;
+}
- /* Something waking or other processor intends to wake us? */
- if ((MBX_MSG_STAT & (0x10 << core)) == 0)
- {
- sleep_core(core);
- wake_core(core);
- }
+/*---------------------------------------------------------------------------
+ * Perform core switch steps that need to take place inside switch_thread.
+ *
+ * These steps must take place while before changing the processor and after
+ * having entered switch_thread since switch_thread may not do a normal return
+ * because the stack being used for anything the compiler saved will not belong
+ * to the thread's destination core and it may have been recycled for other
+ * purposes by the time a normal context load has taken place. switch_thread
+ * will also clobber anything stashed in the thread's context or stored in the
+ * nonvolatile registers if it is saved there before the call since the
+ * compiler's order of operations cannot be known for certain.
+ */
+static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
+{
+ /* Flush our data to ram */
+ cpucache_flush();
+ /* Stash thread in r4 slot */
+ thread->context.r[0] = (uint32_t)thread;
+ /* Stash restart address in r5 slot */
+ thread->context.r[1] = thread->context.start;
+ /* Save sp in context.sp while still running on old core */
+ thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
+}
- /* Signal wake - clear wake flag */
- MBX_MSG_CLR = 0x14 << core;
+/*---------------------------------------------------------------------------
+ * Machine-specific helper function for switching the processor a thread is
+ * running on. Basically, the thread suicides on the departing core and is
+ * reborn on the destination. Were it not for gcc's ill-behavior regarding
+ * naked functions written in C where it actually clobbers non-volatile
+ * registers before the intended prologue code, this would all be much
+ * simpler. Generic setup is done in switch_core itself.
+ */
- /* Wait for other processor to finish wake procedure */
- while (MBX_MSG_STAT & (0x1 << core));
-#endif /* ASM/C selection */
- enable_irq();
-}
-#endif /* NUM_CORES */
-#elif CONFIG_CPU == PP5002
-#if NUM_CORES == 1
-static inline void core_sleep(void)
+/*---------------------------------------------------------------------------
+ * This actually performs the core switch.
+ */
+static void __attribute__((naked))
+ switch_thread_core(unsigned int core, struct thread_entry *thread)
{
- sleep_core(CURRENT_CORE);
- enable_irq();
+ /* Pure asm for this because compiler behavior isn't sufficiently predictable.
+ * Stack access also isn't permitted until restoring the original stack and
+ * context. */
+ asm volatile (
+ "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
+ "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
+ "ldr r2, [r2, r0, lsl #2] \n"
+ "add r2, r2, %0*4 \n"
+ "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
+ "mov sp, r2 \n" /* switch stacks */
+ "adr r2, 1f \n" /* r2 = new core restart address */
+ "str r2, [r1, #40] \n" /* thread->context.start = r2 */
+ "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
+ "1: \n"
+ "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
+ "mov r1, #0 \n" /* Clear start address */
+ "str r1, [r0, #40] \n"
+ "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
+ "mov lr, pc \n"
+ "bx r0 \n"
+ "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
+ : : "i"(IDLE_STACK_WORDS)
+ );
+ (void)core; (void)thread;
}
-#else
-/* PP5002 has no mailboxes - emulate using bytes */
+
+/** PP-model-specific dual-core code **/
+
+#if CONFIG_CPU == PP5002
+/* PP5002 has no mailboxes - Bytes to emulate the PP502x mailbox bits */
+struct core_semaphores
+{
+ volatile uint8_t intend_wake; /* 00h */
+ volatile uint8_t stay_awake; /* 01h */
+ volatile uint8_t intend_sleep; /* 02h */
+ volatile uint8_t unused; /* 03h */
+};
+
+static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
+
+#if 1 /* Select ASM */
+/*---------------------------------------------------------------------------
+ * Put core in a power-saving state if waking list wasn't repopulated and if
+ * no other core requested a wakeup for it to perform a task.
+ *---------------------------------------------------------------------------
+ */
static inline void core_sleep(unsigned int core)
{
-#if 1
asm volatile (
"mov r0, #1 \n" /* Signal intent to sleep */
"strb r0, [%[sem], #2] \n"
@@ -288,7 +333,50 @@ static inline void core_sleep(unsigned int core)
[ctl]"r"(&CPU_CTL)
: "r0"
);
+ enable_irq();
+}
+
+/*---------------------------------------------------------------------------
+ * Wake another processor core that is sleeping or prevent it from doing so
+ * if it was already destined. FIQ, IRQ should be disabled before calling.
+ *---------------------------------------------------------------------------
+ */
+void core_wake(unsigned int othercore)
+{
+ /* avoid r0 since that contains othercore */
+ asm volatile (
+ "mrs r3, cpsr \n" /* Disable IRQ */
+ "orr r1, r3, #0x80 \n"
+ "msr cpsr_c, r1 \n"
+ "mov r1, #1 \n" /* Signal intent to wake other core */
+ "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
+ "strh r1, [%[sem], #0] \n"
+ "mov r2, #0x8000 \n"
+ "1: \n" /* If it intends to sleep, let it first */
+ "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
+ "cmp r1, #1 \n"
+ "ldr r1, [%[st]] \n" /* && not sleeping ? */
+ "tsteq r1, r2, lsr %[oc] \n"
+ "beq 1b \n" /* Wait for sleep or wake */
+ "tst r1, r2, lsr %[oc] \n"
+ "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
+ "movne r1, #0xce \n"
+ "strne r1, [r2, %[oc], lsl #2] \n"
+ "mov r1, #0 \n" /* Done with wake procedure */
+ "strb r1, [%[sem], #0] \n"
+ "msr cpsr_c, r3 \n" /* Restore IRQ */
+ :
+ : [sem]"r"(&core_semaphores[othercore]),
+ [st]"r"(&PROC_STAT),
+ [oc]"r"(othercore)
+ : "r1", "r2", "r3"
+ );
+}
+
#else /* C version for reference */
+
+static inline void core_sleep(unsigned int core)
+{
/* Signal intent to sleep */
core_semaphores[core].intend_sleep = 1;
@@ -306,27 +394,71 @@ static inline void core_sleep(unsigned int core)
while (core_semaphores[core].intend_wake != 0);
/* Enable IRQ */
-#endif /* ASM/C selection */
enable_irq();
}
-#endif /* NUM_CORES */
-#endif /* PP CPU type */
+void core_wake(unsigned int othercore)
+{
+ /* Disable interrupts - avoid reentrancy from the tick */
+ int oldlevel = disable_irq_save();
+
+ /* Signal intent to wake other processor - set stay awake */
+ core_semaphores[othercore].intend_wake = 1;
+ core_semaphores[othercore].stay_awake = 1;
+
+ /* If it intends to sleep, wait until it does or aborts */
+ while (core_semaphores[othercore].intend_sleep != 0 &&
+ (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
+
+ /* If sleeping, wake it up */
+ if (PROC_STAT & PROC_SLEEPING(othercore))
+ wake_core(othercore);
+
+ /* Done with wake procedure */
+ core_semaphores[othercore].intend_wake = 0;
+ restore_irq(oldlevel);
+}
+#endif /* ASM/C selection */
+
+#elif defined (CPU_PP502x)
+
+#if 1 /* Select ASM */
/*---------------------------------------------------------------------------
- * Wake another processor core that is sleeping or prevent it from doing so
- * if it was already destined. FIQ, IRQ should be disabled before calling.
+ * Put core in a power-saving state if waking list wasn't repopulated and if
+ * no other core requested a wakeup for it to perform a task.
*---------------------------------------------------------------------------
*/
-#if NUM_CORES == 1
-/* Shared single-core build debugging version */
-void core_wake(void)
+static inline void core_sleep(unsigned int core)
{
- /* No wakey - core already wakey */
+ asm volatile (
+ "mov r0, #4 \n" /* r0 = 0x4 << core */
+ "mov r0, r0, lsl %[c] \n"
+ "str r0, [%[mbx], #4] \n" /* signal intent to sleep */
+ "ldr r1, [%[mbx], #0] \n" /* && !(MBX_MSG_STAT & (0x10<<core)) ? */
+ "tst r1, r0, lsl #2 \n"
+ "moveq r1, #0x80000000 \n" /* Then sleep */
+ "streq r1, [%[ctl], %[c], lsl #2] \n"
+ "moveq r1, #0 \n" /* Clear control reg */
+ "streq r1, [%[ctl], %[c], lsl #2] \n"
+ "orr r1, r0, r0, lsl #2 \n" /* Signal intent to wake - clear wake flag */
+ "str r1, [%[mbx], #8] \n"
+ "1: \n" /* Wait for wake procedure to finish */
+ "ldr r1, [%[mbx], #0] \n"
+ "tst r1, r0, lsr #2 \n"
+ "bne 1b \n"
+ :
+ : [ctl]"r"(&CPU_CTL), [mbx]"r"(MBX_BASE), [c]"r"(core)
+ : "r0", "r1");
+ enable_irq();
}
-#elif defined (CPU_PP502x)
+
+/*---------------------------------------------------------------------------
+ * Wake another processor core that is sleeping or prevent it from doing so
+ * if it was already destined. FIQ, IRQ should be disabled before calling.
+ *---------------------------------------------------------------------------
+ */
void core_wake(unsigned int othercore)
{
-#if 1
/* avoid r0 since that contains othercore */
asm volatile (
"mrs r3, cpsr \n" /* Disable IRQ */
@@ -352,190 +484,58 @@ void core_wake(unsigned int othercore)
: [ctl]"r"(&PROC_CTL(CPU)), [mbx]"r"(MBX_BASE),
[oc]"r"(othercore)
: "r1", "r2", "r3");
+}
+
#else /* C version for reference */
- /* Disable interrupts - avoid reentrancy from the tick */
- int oldlevel = disable_irq_save();
- /* Signal intent to wake other processor - set stay awake */
- MBX_MSG_SET = 0x11 << othercore;
+static inline void core_sleep(unsigned int core)
+{
+ /* Signal intent to sleep */
+ MBX_MSG_SET = 0x4 << core;
- /* If it intends to sleep, wait until it does or aborts */
- while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
- (PROC_CTL(othercore) & PROC_SLEEP) == 0);
+ /* Something waking or other processor intends to wake us? */
+ if ((MBX_MSG_STAT & (0x10 << core)) == 0)
+ {
+ sleep_core(core);
+ wake_core(core);
+ }
- /* If sleeping, wake it up */
- if (PROC_CTL(othercore) & PROC_SLEEP)
- PROC_CTL(othercore) = 0;
+ /* Signal wake - clear wake flag */
+ MBX_MSG_CLR = 0x14 << core;
- /* Done with wake procedure */
- MBX_MSG_CLR = 0x1 << othercore;
- restore_irq(oldlevel);
-#endif /* ASM/C selection */
+ /* Wait for other processor to finish wake procedure */
+ while (MBX_MSG_STAT & (0x1 << core));
+ enable_irq();
}
-#elif CONFIG_CPU == PP5002
-/* PP5002 has no mailboxes - emulate using bytes */
+
void core_wake(unsigned int othercore)
{
-#if 1
- /* avoid r0 since that contains othercore */
- asm volatile (
- "mrs r3, cpsr \n" /* Disable IRQ */
- "orr r1, r3, #0x80 \n"
- "msr cpsr_c, r1 \n"
- "mov r1, #1 \n" /* Signal intent to wake other core */
- "orr r1, r1, r1, lsl #8 \n" /* and set stay_awake */
- "strh r1, [%[sem], #0] \n"
- "mov r2, #0x8000 \n"
- "1: \n" /* If it intends to sleep, let it first */
- "ldrb r1, [%[sem], #2] \n" /* intend_sleep != 0 ? */
- "cmp r1, #1 \n"
- "ldr r1, [%[st]] \n" /* && not sleeping ? */
- "tsteq r1, r2, lsr %[oc] \n"
- "beq 1b \n" /* Wait for sleep or wake */
- "tst r1, r2, lsr %[oc] \n"
- "ldrne r2, =0xcf004054 \n" /* If sleeping, wake it */
- "movne r1, #0xce \n"
- "strne r1, [r2, %[oc], lsl #2] \n"
- "mov r1, #0 \n" /* Done with wake procedure */
- "strb r1, [%[sem], #0] \n"
- "msr cpsr_c, r3 \n" /* Restore IRQ */
- :
- : [sem]"r"(&core_semaphores[othercore]),
- [st]"r"(&PROC_STAT),
- [oc]"r"(othercore)
- : "r1", "r2", "r3"
- );
-#else /* C version for reference */
/* Disable interrupts - avoid reentrancy from the tick */
int oldlevel = disable_irq_save();
/* Signal intent to wake other processor - set stay awake */
- core_semaphores[othercore].intend_wake = 1;
- core_semaphores[othercore].stay_awake = 1;
+ MBX_MSG_SET = 0x11 << othercore;
/* If it intends to sleep, wait until it does or aborts */
- while (core_semaphores[othercore].intend_sleep != 0 &&
- (PROC_STAT & PROC_SLEEPING(othercore)) == 0);
+ while ((MBX_MSG_STAT & (0x4 << othercore)) != 0 &&
+ (PROC_CTL(othercore) & PROC_SLEEP) == 0);
/* If sleeping, wake it up */
- if (PROC_STAT & PROC_SLEEPING(othercore))
- wake_core(othercore);
+ if (PROC_CTL(othercore) & PROC_SLEEP)
+ PROC_CTL(othercore) = 0;
/* Done with wake procedure */
- core_semaphores[othercore].intend_wake = 0;
+ MBX_MSG_CLR = 0x1 << othercore;
restore_irq(oldlevel);
-#endif /* ASM/C selection */
-}
-#endif /* CPU type */
-
-#if NUM_CORES > 1
-/*---------------------------------------------------------------------------
- * Switches to a stack that always resides in the Rockbox core.
- *
- * Needed when a thread suicides on a core other than the main CPU since the
- * stack used when idling is the stack of the last thread to run. This stack
- * may not reside in the core firmware in which case the core will continue
- * to use a stack from an unloaded module until another thread runs on it.
- *---------------------------------------------------------------------------
- */
-static inline void switch_to_idle_stack(const unsigned int core)
-{
- asm volatile (
- "str sp, [%0] \n" /* save original stack pointer on idle stack */
- "mov sp, %0 \n" /* switch stacks */
- : : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
- (void)core;
-}
-
-/*---------------------------------------------------------------------------
- * Perform core switch steps that need to take place inside switch_thread.
- *
- * These steps must take place while before changing the processor and after
- * having entered switch_thread since switch_thread may not do a normal return
- * because the stack being used for anything the compiler saved will not belong
- * to the thread's destination core and it may have been recycled for other
- * purposes by the time a normal context load has taken place. switch_thread
- * will also clobber anything stashed in the thread's context or stored in the
- * nonvolatile registers if it is saved there before the call since the
- * compiler's order of operations cannot be known for certain.
- */
-static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
-{
- /* Flush our data to ram */
- cpucache_flush();
- /* Stash thread in r4 slot */
- thread->context.r[0] = (uint32_t)thread;
- /* Stash restart address in r5 slot */
- thread->context.r[1] = thread->context.start;
- /* Save sp in context.sp while still running on old core */
- thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
}
+#endif /* ASM/C selection */
-/*---------------------------------------------------------------------------
- * Machine-specific helper function for switching the processor a thread is
- * running on. Basically, the thread suicides on the departing core and is
- * reborn on the destination. Were it not for gcc's ill-behavior regarding
- * naked functions written in C where it actually clobbers non-volatile
- * registers before the intended prologue code, this would all be much
- * simpler. Generic setup is done in switch_core itself.
- */
+#endif /* CPU_PPxxxx */
-/*---------------------------------------------------------------------------
- * This actually performs the core switch.
- */
-static void __attribute__((naked))
- switch_thread_core(unsigned int core, struct thread_entry *thread)
+/* Keep constant pool in range of inline ASM */
+static void __attribute__((naked, used)) dump_ltorg(void)
{
- /* Pure asm for this because compiler behavior isn't sufficiently predictable.
- * Stack access also isn't permitted until restoring the original stack and
- * context. */
- asm volatile (
- "stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
- "ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
- "ldr r2, [r2, r0, lsl #2] \n"
- "add r2, r2, %0*4 \n"
- "stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
- "mov sp, r2 \n" /* switch stacks */
- "adr r2, 1f \n" /* r2 = new core restart address */
- "str r2, [r1, #40] \n" /* thread->context.start = r2 */
- "ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
- "1: \n"
- "ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
- "mov r1, #0 \n" /* Clear start address */
- "str r1, [r0, #40] \n"
- "ldr r0, =cpucache_invalidate \n" /* Invalidate new core's cache */
- "mov lr, pc \n"
- "bx r0 \n"
- "ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
- ".ltorg \n" /* Dump constant pool */
- : : "i"(IDLE_STACK_WORDS)
- );
- (void)core; (void)thread;
+ asm volatile (".ltorg");
}
-/*---------------------------------------------------------------------------
- * Do any device-specific inits for the threads and synchronize the kernel
- * initializations.
- *---------------------------------------------------------------------------
- */
-static void core_thread_init(unsigned int core) INIT_ATTR;
-static void core_thread_init(unsigned int core)
-{
- if (core == CPU)
- {
- /* Wake up coprocessor and let it initialize kernel and threads */
-#ifdef CPU_PP502x
- MBX_MSG_CLR = 0x3f;
-#endif
- wake_core(COP);
- /* Sleep until COP has finished */
- sleep_core(CPU);
- }
- else
- {
- /* Wake the CPU and return */
- wake_core(CPU);
- }
-}
#endif /* NUM_CORES */
-