For multiprocessor targets, do the thread_exit routine such that we don't need to rely on the compiler's good graces to have stack switching be reliable. Only needs a few asm instructions.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26906 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2010-06-18 03:10:18 +00:00
parent b812465bff
commit d9c9fe305c
2 changed files with 29 additions and 21 deletions

View file

@ -199,7 +199,9 @@ static void INIT_ATTR core_thread_init(unsigned int core)
} }
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
* Switches to a stack that always resides in the Rockbox core. * Switches to a stack that always resides in the Rockbox core then calls
* the final exit routine to actually finish removing the thread from the
* scheduler.
* *
* Needed when a thread suicides on a core other than the main CPU since the * Needed when a thread suicides on a core other than the main CPU since the
* stack used when idling is the stack of the last thread to run. This stack * stack used when idling is the stack of the last thread to run. This stack
@ -207,13 +209,24 @@ static void INIT_ATTR core_thread_init(unsigned int core)
* to use a stack from an unloaded module until another thread runs on it. * to use a stack from an unloaded module until another thread runs on it.
*--------------------------------------------------------------------------- *---------------------------------------------------------------------------
*/ */
static inline void switch_to_idle_stack(const unsigned int core) static inline void __attribute__((noreturn,always_inline))
thread_final_exit(struct thread_entry *current)
{ {
asm volatile ( asm volatile (
"str sp, [%0] \n" /* save original stack pointer on idle stack */ "cmp %1, #0 \n" /* CPU? */
"mov sp, %0 \n" /* switch stacks */ "ldrne r0, =cpucache_flush \n" /* No? write back data */
: : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1])); "movne lr, pc \n"
(void)core; "bxne r0 \n"
"mov r0, %0 \n" /* copy thread parameter */
"mov sp, %2 \n" /* switch to idle stack */
"bl thread_final_exit_do \n" /* finish removal */
: : "r"(current),
"r"(current->core),
"r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
: "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
force inputs out
of scratch regs */
while (1);
} }
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------

View file

@ -157,8 +157,8 @@ static inline void load_context(const void* addr)
__attribute__((always_inline)); __attribute__((always_inline));
#if NUM_CORES > 1 #if NUM_CORES > 1
static void thread_final_exit(struct thread_entry *current) static void thread_final_exit_do(struct thread_entry *current)
__attribute__((noinline, noreturn)); __attribute__((noinline, noreturn, used));
#else #else
static inline void thread_final_exit(struct thread_entry *current) static inline void thread_final_exit(struct thread_entry *current)
__attribute__((always_inline, noreturn)); __attribute__((always_inline, noreturn));
@ -1675,22 +1675,16 @@ void thread_wait(unsigned int thread_id)
/* This is done to foil optimizations that may require the current stack, /* This is done to foil optimizations that may require the current stack,
* such as optimizing subexpressions that put variables on the stack that * such as optimizing subexpressions that put variables on the stack that
* get used after switching stacks. */ * get used after switching stacks. */
static void thread_final_exit(struct thread_entry *current)
{
#if NUM_CORES > 1 #if NUM_CORES > 1
cpucache_flush(); /* Called by ASM stub */
static void thread_final_exit_do(struct thread_entry *current)
/* Switch to the idle stack if not on the main core (where "main" #else
* runs) - we can hope gcc doesn't need the old stack beyond this /* No special procedure is required before calling */
* point. */ static inline void thread_final_exit(struct thread_entry *current)
if (current->core != CPU) #endif
{ {
switch_to_idle_stack(current->core);
}
/* At this point, this thread isn't using resources allocated for /* At this point, this thread isn't using resources allocated for
* execution except the slot itself. */ * execution except the slot itself. */
#endif /* NUM_CORES */
/* Signal this thread */ /* Signal this thread */
thread_queue_wake(&current->queue); thread_queue_wake(&current->queue);
@ -1746,6 +1740,7 @@ void thread_exit(void)
new_thread_id(current->id, current); new_thread_id(current->id, current);
current->name = NULL; current->name = NULL;
/* Do final cleanup and remove the thread */
thread_final_exit(current); thread_final_exit(current);
} }