0aec12f3fd
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@10681 a1c6a512-1295-4272-9138-f99709370657
471 lines
15 KiB
C
471 lines
15 KiB
C
/***************************************************************************
|
|
* __________ __ ___.
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
* \/ \/ \/ \/ \/
|
|
* $Id$
|
|
*
|
|
* Copyright (C) 2002 by Ulf Ralberg
|
|
*
|
|
* All files in this archive are subject to the GNU General Public License.
|
|
* See the file COPYING in the source tree root for full license agreement.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
****************************************************************************/
|
|
#include "config.h"
|
|
#include <stdbool.h>
|
|
#include "thread.h"
|
|
#include "panic.h"
|
|
#include "system.h"
|
|
#include "kernel.h"
|
|
#include "cpu.h"
|
|
|
|
#ifdef CPU_COLDFIRE
|
|
struct regs
|
|
{
|
|
unsigned int macsr; /* EMAC status register */
|
|
unsigned int d[6]; /* d2-d7 */
|
|
unsigned int a[5]; /* a2-a6 */
|
|
void *sp; /* Stack pointer (a7) */
|
|
void *start; /* Thread start address, or NULL when started */
|
|
};
|
|
#elif CONFIG_CPU == SH7034
|
|
struct regs
|
|
{
|
|
unsigned int r[7]; /* Registers r8 thru r14 */
|
|
void *sp; /* Stack pointer (r15) */
|
|
void *pr; /* Procedure register */
|
|
void *start; /* Thread start address, or NULL when started */
|
|
};
|
|
#elif defined(CPU_ARM)
|
|
struct regs
|
|
{
|
|
unsigned int r[8]; /* Registers r4-r11 */
|
|
void *sp; /* Stack pointer (r13) */
|
|
unsigned int lr; /* r14 (lr) */
|
|
void *start; /* Thread start address, or NULL when started */
|
|
};
|
|
#elif CONFIG_CPU == TCC730
|
|
struct regs
|
|
{
|
|
void *sp; /* Stack pointer (a15) */
|
|
void *start; /* Thread start address */
|
|
int started; /* 0 when not started */
|
|
};
|
|
#endif
|
|
|
|
#define DEADBEEF ((unsigned int)0xdeadbeef)
|
|
/* Cast to the the machine int type, whose size could be < 4. */
|
|
|
|
|
|
int num_threads[NUM_CORES];
|
|
static volatile int num_sleepers[NUM_CORES];
|
|
static int current_thread[NUM_CORES];
|
|
static struct regs thread_contexts[NUM_CORES][MAXTHREADS] IBSS_ATTR;
|
|
const char *thread_name[NUM_CORES][MAXTHREADS];
|
|
void *thread_stack[NUM_CORES][MAXTHREADS];
|
|
int thread_stack_size[NUM_CORES][MAXTHREADS];
|
|
static const char main_thread_name[] = "main";
|
|
|
|
extern int stackbegin[];
|
|
extern int stackend[];
|
|
|
|
#ifdef CPU_PP
|
|
#ifndef BOOTLOADER
|
|
extern int cop_stackbegin[];
|
|
extern int cop_stackend[];
|
|
#else
|
|
/* The coprocessor stack is not set up in the bootloader code, but the
|
|
threading is. No threads are run on the coprocessor, so set up some dummy
|
|
stack */
|
|
int *cop_stackbegin = stackbegin;
|
|
int *cop_stackend = stackend;
|
|
#endif
|
|
#endif
|
|
|
|
void switch_thread(void) ICODE_ATTR;
|
|
static inline void store_context(void* addr) __attribute__ ((always_inline));
|
|
static inline void load_context(const void* addr) __attribute__ ((always_inline));
|
|
|
|
#ifdef RB_PROFILE
|
|
#include <profile.h>
|
|
void profile_thread(void) {
|
|
profstart(current_thread);
|
|
}
|
|
#endif
|
|
|
|
#if defined(CPU_ARM)
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile(
|
|
"stmia %0, { r4-r11, sp, lr }\n"
|
|
: : "r" (addr)
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile(
|
|
"ldmia %0, { r4-r11, sp, lr }\n" /* load regs r4 to r14 from context */
|
|
"ldr r0, [%0, #40] \n" /* load start pointer */
|
|
"mov r1, #0 \n"
|
|
"cmp r0, r1 \n" /* check for NULL */
|
|
"strne r1, [%0, #40] \n" /* if it's NULL, we're already running */
|
|
"movne pc, r0 \n" /* not already running, so jump to start */
|
|
: : "r" (addr) : "r0", "r1"
|
|
);
|
|
}
|
|
|
|
#elif defined(CPU_COLDFIRE)
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile (
|
|
"move.l %%macsr,%%d0 \n"
|
|
"movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
|
|
: : "a" (addr) : "d0" /* only! */
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile (
|
|
"movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
|
|
"move.l %%d0,%%macsr \n"
|
|
"move.l (52,%0),%%d0 \n" /* Get start address */
|
|
"beq.b .running \n" /* NULL -> already running */
|
|
"clr.l (52,%0) \n" /* Clear start address.. */
|
|
"move.l %%d0,%0 \n"
|
|
"jmp (%0) \n" /* ..and start the thread */
|
|
".running: \n"
|
|
: : "a" (addr) : "d0" /* only! */
|
|
);
|
|
}
|
|
|
|
#elif CONFIG_CPU == SH7034
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void store_context(void* addr)
|
|
{
|
|
asm volatile (
|
|
"add #36,%0 \n"
|
|
"sts.l pr, @-%0 \n"
|
|
"mov.l r15,@-%0 \n"
|
|
"mov.l r14,@-%0 \n"
|
|
"mov.l r13,@-%0 \n"
|
|
"mov.l r12,@-%0 \n"
|
|
"mov.l r11,@-%0 \n"
|
|
"mov.l r10,@-%0 \n"
|
|
"mov.l r9, @-%0 \n"
|
|
"mov.l r8, @-%0 \n"
|
|
: : "r" (addr)
|
|
);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
static inline void load_context(const void* addr)
|
|
{
|
|
asm volatile (
|
|
"mov.l @%0+,r8 \n"
|
|
"mov.l @%0+,r9 \n"
|
|
"mov.l @%0+,r10 \n"
|
|
"mov.l @%0+,r11 \n"
|
|
"mov.l @%0+,r12 \n"
|
|
"mov.l @%0+,r13 \n"
|
|
"mov.l @%0+,r14 \n"
|
|
"mov.l @%0+,r15 \n"
|
|
"lds.l @%0+,pr \n"
|
|
"mov.l @%0,r0 \n" /* Get start address */
|
|
"tst r0,r0 \n"
|
|
"bt .running \n" /* NULL -> already running */
|
|
"lds r0,pr \n"
|
|
"mov #0,r0 \n"
|
|
"rts \n" /* Start the thread */
|
|
"mov.l r0,@%0 \n" /* Clear start address */
|
|
".running: \n"
|
|
: : "r" (addr) : "r0" /* only! */
|
|
);
|
|
}
|
|
|
|
#elif CONFIG_CPU == TCC730
|
|
/*---------------------------------------------------------------------------
|
|
* Store non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
#define store_context(addr) \
|
|
__asm__ volatile ( \
|
|
"push r0,r1\n\t" \
|
|
"push r2,r3\n\t" \
|
|
"push r4,r5\n\t" \
|
|
"push r6,r7\n\t" \
|
|
"push a8,a9\n\t" \
|
|
"push a10,a11\n\t" \
|
|
"push a12,a13\n\t" \
|
|
"push a14\n\t" \
|
|
"ldw @[%0+0], a15\n\t" : : "a" (addr) );
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Load non-volatile context.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
#define load_context(addr) \
|
|
{ \
|
|
if (!(addr)->started) { \
|
|
(addr)->started = 1; \
|
|
__asm__ volatile ( \
|
|
"ldw a15, @[%0+0]\n\t" \
|
|
"ldw a14, @[%0+4]\n\t" \
|
|
"jmp a14\n\t" : : "a" (addr) \
|
|
); \
|
|
} else \
|
|
__asm__ volatile ( \
|
|
"ldw a15, @[%0+0]\n\t" \
|
|
"pop a14\n\t" \
|
|
"pop a13,a12\n\t" \
|
|
"pop a11,a10\n\t" \
|
|
"pop a9,a8\n\t" \
|
|
"pop r7,r6\n\t" \
|
|
"pop r5,r4\n\t" \
|
|
"pop r3,r2\n\t" \
|
|
"pop r1,r0\n\t" : : "a" (addr) \
|
|
); \
|
|
\
|
|
}
|
|
|
|
#endif
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Switch thread in round robin fashion.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
void switch_thread(void)
|
|
{
|
|
#ifdef RB_PROFILE
|
|
profile_thread_stopped(current_thread);
|
|
#endif
|
|
int current;
|
|
unsigned int *stackptr;
|
|
|
|
#ifdef SIMULATOR
|
|
/* Do nothing */
|
|
#else
|
|
while (num_sleepers[CURRENT_CORE] == num_threads[CURRENT_CORE])
|
|
{
|
|
/* Enter sleep mode, woken up on interrupt */
|
|
#ifdef CPU_COLDFIRE
|
|
asm volatile ("stop #0x2000");
|
|
#elif CONFIG_CPU == SH7034
|
|
and_b(0x7F, &SBYCR);
|
|
asm volatile ("sleep");
|
|
#elif CONFIG_CPU == PP5020
|
|
/* This should sleep the CPU. It appears to wake by itself on
|
|
interrupts */
|
|
CPU_CTL = 0x80000000;
|
|
#elif CONFIG_CPU == TCC730
|
|
/* Sleep mode is triggered by the SYS instr on CalmRisc16.
|
|
* Unfortunately, the manual doesn't specify which arg to use.
|
|
__asm__ volatile ("sys #0x0f");
|
|
0x1f seems to trigger a reset;
|
|
0x0f is the only one other argument used by Archos.
|
|
*/
|
|
#elif CONFIG_CPU == S3C2440
|
|
CLKCON |= 2;
|
|
#endif
|
|
}
|
|
#endif
|
|
current = current_thread[CURRENT_CORE];
|
|
store_context(&thread_contexts[CURRENT_CORE][current]);
|
|
|
|
#if CONFIG_CPU != TCC730
|
|
/* Check if the current thread stack is overflown */
|
|
stackptr = thread_stack[CURRENT_CORE][current];
|
|
if(stackptr[0] != DEADBEEF)
|
|
panicf("Stkov %s", thread_name[CURRENT_CORE][current]);
|
|
#endif
|
|
|
|
if (++current >= num_threads[CURRENT_CORE])
|
|
current = 0;
|
|
|
|
current_thread[CURRENT_CORE] = current;
|
|
load_context(&thread_contexts[CURRENT_CORE][current]);
|
|
#ifdef RB_PROFILE
|
|
profile_thread_started(current_thread);
|
|
#endif
|
|
}
|
|
|
|
void sleep_thread(void)
|
|
{
|
|
++num_sleepers[CURRENT_CORE];
|
|
switch_thread();
|
|
}
|
|
|
|
void wake_up_thread(void)
|
|
{
|
|
num_sleepers[CURRENT_CORE] = 0;
|
|
}
|
|
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Create thread on the current core.
|
|
* Return ID if context area could be allocated, else -1.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
int create_thread(void (*function)(void), void* stack, int stack_size,
|
|
const char *name)
|
|
{
|
|
return create_thread_on_core(CURRENT_CORE, function, stack, stack_size,
|
|
name);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Create thread on a specific core.
|
|
* Return ID if context area could be allocated, else -1.
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
int create_thread_on_core(unsigned int core, void (*function)(void), void* stack, int stack_size,
|
|
const char *name)
|
|
{
|
|
unsigned int i;
|
|
unsigned int stacklen;
|
|
unsigned int *stackptr;
|
|
struct regs *regs;
|
|
|
|
if (num_threads[core] >= MAXTHREADS)
|
|
return -1;
|
|
|
|
/* Munge the stack to make it easy to spot stack overflows */
|
|
stacklen = stack_size / sizeof(int);
|
|
stackptr = stack;
|
|
for(i = 0;i < stacklen;i++)
|
|
{
|
|
stackptr[i] = DEADBEEF;
|
|
}
|
|
|
|
/* Store interesting information */
|
|
thread_name[core][num_threads[core]] = name;
|
|
thread_stack[core][num_threads[core]] = stack;
|
|
thread_stack_size[core][num_threads[core]] = stack_size;
|
|
regs = &thread_contexts[core][num_threads[core]];
|
|
#if defined(CPU_COLDFIRE) || (CONFIG_CPU == SH7034) || defined(CPU_ARM)
|
|
/* Align stack to an even 32 bit boundary */
|
|
regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
|
|
#elif CONFIG_CPU == TCC730
|
|
/* Align stack on word boundary */
|
|
regs->sp = (void*)(((unsigned long)stack + stack_size - 2) & ~1);
|
|
regs->started = 0;
|
|
#endif
|
|
regs->start = (void*)function;
|
|
|
|
wake_up_thread();
|
|
return num_threads[core]++; /* return the current ID, e.g for remove_thread() */
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Remove a thread on the current core from the scheduler.
|
|
* Parameter is the ID as returned from create_thread().
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
void remove_thread(int threadnum)
|
|
{
|
|
remove_thread_on_core(CURRENT_CORE, threadnum);
|
|
}
|
|
|
|
/*---------------------------------------------------------------------------
|
|
* Remove a thread on the specified core from the scheduler.
|
|
* Parameters are the core and the ID as returned from create_thread().
|
|
*---------------------------------------------------------------------------
|
|
*/
|
|
void remove_thread_on_core(unsigned int core, int threadnum)
|
|
{
|
|
int i;
|
|
|
|
if(threadnum >= num_threads[core])
|
|
return;
|
|
|
|
num_threads[core]--;
|
|
for (i=threadnum; i<num_threads[core]-1; i++)
|
|
{ /* move all entries which are behind */
|
|
thread_name[core][i] = thread_name[core][i+1];
|
|
thread_stack[core][i] = thread_stack[core][i+1];
|
|
thread_stack_size[core][i] = thread_stack_size[core][i+1];
|
|
thread_contexts[core][i] = thread_contexts[core][i+1];
|
|
}
|
|
|
|
if (current_thread[core] == threadnum) /* deleting the current one? */
|
|
current_thread[core] = num_threads[core]; /* set beyond last, avoid store harm */
|
|
else if (current_thread[core] > threadnum) /* within the moved positions? */
|
|
current_thread[core]--; /* adjust it, point to same context again */
|
|
}
|
|
|
|
void init_threads(void)
|
|
{
|
|
unsigned int core = CURRENT_CORE;
|
|
|
|
num_threads[core] = 1; /* We have 1 thread to begin with */
|
|
current_thread[core] = 0; /* The current thread is number 0 */
|
|
thread_name[core][0] = main_thread_name;
|
|
/* In multiple core setups, each core has a different stack. There is probably
|
|
a much better way to do this. */
|
|
if(core == CPU)
|
|
{
|
|
thread_stack[CPU][0] = stackbegin;
|
|
thread_stack_size[CPU][0] = (int)stackend - (int)stackbegin;
|
|
} else {
|
|
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
|
|
thread_stack[COP][0] = cop_stackbegin;
|
|
thread_stack_size[COP][0] = (int)cop_stackend - (int)cop_stackbegin;
|
|
#endif
|
|
}
|
|
#if CONFIG_CPU == TCC730
|
|
thread_contexts[core][0].started = 1;
|
|
#else
|
|
thread_contexts[core][0].start = 0; /* thread 0 already running */
|
|
#endif
|
|
num_sleepers[core] = 0;
|
|
}
|
|
|
|
int thread_stack_usage(int threadnum){
|
|
return thread_stack_usage_on_core(CURRENT_CORE, threadnum);
|
|
}
|
|
|
|
int thread_stack_usage_on_core(unsigned int core, int threadnum)
|
|
{
|
|
unsigned int i;
|
|
unsigned int *stackptr = thread_stack[core][threadnum];
|
|
|
|
if(threadnum >= num_threads[core])
|
|
return -1;
|
|
|
|
for(i = 0;i < thread_stack_size[core][threadnum]/sizeof(int);i++)
|
|
{
|
|
if(stackptr[i] != DEADBEEF)
|
|
break;
|
|
}
|
|
|
|
return ((thread_stack_size[core][threadnum] - i * sizeof(int)) * 100) /
|
|
thread_stack_size[core][threadnum];
|
|
}
|