For multiprocessor targets, do the thread_exit routine such that we don't need to rely on the compiler's good graces to have stack switching be reliable. Only needs a few asm instructions.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26906 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2010-06-18 03:10:18 +00:00
parent b812465bff
commit d9c9fe305c
2 changed files with 29 additions and 21 deletions

View file

@ -199,7 +199,9 @@ static void INIT_ATTR core_thread_init(unsigned int core)
}
/*---------------------------------------------------------------------------
* Switches to a stack that always resides in the Rockbox core.
* Switches to a stack that always resides in the Rockbox core then calls
* the final exit routine to actually finish removing the thread from the
* scheduler.
*
* Needed when a thread suicides on a core other than the main CPU since the
* stack used when idling is the stack of the last thread to run. This stack
@ -207,13 +209,24 @@ static void INIT_ATTR core_thread_init(unsigned int core)
* to use a stack from an unloaded module until another thread runs on it.
*---------------------------------------------------------------------------
*/
static inline void switch_to_idle_stack(const unsigned int core)
static inline void __attribute__((noreturn,always_inline))
thread_final_exit(struct thread_entry *current)
{
asm volatile (
"str sp, [%0] \n" /* save original stack pointer on idle stack */
"mov sp, %0 \n" /* switch stacks */
: : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
(void)core;
"cmp %1, #0 \n" /* CPU? */
"ldrne r0, =cpucache_flush \n" /* No? write back data */
"movne lr, pc \n"
"bxne r0 \n"
"mov r0, %0 \n" /* copy thread parameter */
"mov sp, %2 \n" /* switch to idle stack */
"bl thread_final_exit_do \n" /* finish removal */
: : "r"(current),
"r"(current->core),
"r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
: "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
force inputs out
of scratch regs */
while (1);
}
/*---------------------------------------------------------------------------

View file

@ -157,8 +157,8 @@ static inline void load_context(const void* addr)
__attribute__((always_inline));
#if NUM_CORES > 1
static void thread_final_exit(struct thread_entry *current)
__attribute__((noinline, noreturn));
static void thread_final_exit_do(struct thread_entry *current)
__attribute__((noinline, noreturn, used));
#else
static inline void thread_final_exit(struct thread_entry *current)
__attribute__((always_inline, noreturn));
@ -1675,22 +1675,16 @@ void thread_wait(unsigned int thread_id)
/* This is done to foil optimizations that may require the current stack,
* such as optimizing subexpressions that put variables on the stack that
* get used after switching stacks. */
static void thread_final_exit(struct thread_entry *current)
{
#if NUM_CORES > 1
cpucache_flush();
/* Switch to the idle stack if not on the main core (where "main"
* runs) - we can hope gcc doesn't need the old stack beyond this
* point. */
if (current->core != CPU)
{
switch_to_idle_stack(current->core);
}
/* Called by ASM stub */
static void thread_final_exit_do(struct thread_entry *current)
#else
/* No special procedure is required before calling */
static inline void thread_final_exit(struct thread_entry *current)
#endif
{
/* At this point, this thread isn't using resources allocated for
* execution except the slot itself. */
#endif /* NUM_CORES */
/* Signal this thread */
thread_queue_wake(&current->queue);
@ -1746,6 +1740,7 @@ void thread_exit(void)
new_thread_id(current->id, current);
current->name = NULL;
/* Do final cleanup and remove the thread */
thread_final_exit(current);
}