Commit a subset of the dual core changes that have to do with cache handling, stacks, firmware startup and thread startup. Tested on e200, H10-20GB, iPod Color and 5.5G. Thread function return implemented for all targets. Some changes to plugins to follow shortly.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@14879 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2007-09-28 10:20:02 +00:00
parent edbf5d81f5
commit 7914e90738
23 changed files with 684 additions and 547 deletions

View file

@ -157,6 +157,10 @@ struct codec_api ci = {
/* new stuff at the end, sort into place next time
the API gets incompatible */
#ifdef CACHE_FUNCTIONS_AS_CALL
flush_icache,
invalidate_icache,
#endif
};
void codec_get_full_path(char *path, const char *codec_root_fn)

View file

@ -80,7 +80,7 @@
#define CODEC_ENC_MAGIC 0x52454E43 /* RENC */
/* increase this every time the api struct changes */
#define CODEC_API_VERSION 18
#define CODEC_API_VERSION 19
/* update this to latest version if a change to the api struct breaks
backwards compatibility (and please take the opportunity to sort in any
@ -230,6 +230,10 @@ struct codec_api {
/* new stuff at the end, sort into place next time
the API gets incompatible */
#ifdef CACHE_FUNCTIONS_AS_CALL
void (*flush_icache)(void);
void (*invalidate_icache)(void);
#endif
};
/* codec header */
@ -286,4 +290,22 @@ int codec_load_file(const char* codec, struct codec_api *api);
/* defined by the codec */
enum codec_status codec_start(struct codec_api* rockbox);
#ifndef CACHE_FUNCTION_WRAPPERS
#ifdef CACHE_FUNCTIONS_AS_CALL
#define CACHE_FUNCTION_WRAPPERS(api) \
void flush_icache(void) \
{ \
(api)->flush_icache(); \
} \
void invalidate_icache(void) \
{ \
(api)->invalidate_icache(); \
}
#else
#define CACHE_FUNCTION_WRAPPERS(api)
#endif /* CACHE_FUNCTIONS_AS_CALL */
#endif /* CACHE_FUNCTION_WRAPPERS */
#endif

View file

@ -32,6 +32,8 @@ extern unsigned char plugin_end_addr[];
extern enum codec_status codec_main(void);
CACHE_FUNCTION_WRAPPERS(ci);
enum codec_status codec_start(struct codec_api *api)
{
#ifndef SIMULATOR

View file

@ -180,7 +180,6 @@ static bool dbg_list(struct action_callback_info *info)
/*---------------------------------------------------*/
extern struct thread_entry threads[MAXTHREADS];
static char thread_status_char(int status)
{
switch (status)
@ -193,42 +192,48 @@ static char thread_status_char(int status)
return '?';
}
#if NUM_CORES > 1
#define IF_COP2(...) __VA_ARGS__
#else
#define IF_COP2(...)
#endif
static char* threads_getname(int selected_item, void * data, char *buffer)
{
(void)data;
char name[32];
struct thread_entry *thread = NULL;
int status, usage;
unsigned status;
int usage;
#if NUM_CORES > 1
if (selected_item < (int)NUM_CORES)
{
usage = idle_stack_usage(selected_item);
snprintf(buffer, MAX_PATH, "Idle (%d): %2d%%", selected_item, usage);
return buffer;
}
selected_item -= NUM_CORES;
#endif
thread = &threads[selected_item];
status = thread_get_status(thread);
if (thread->name == NULL)
{
snprintf(buffer, MAX_PATH, "%2d: ---", selected_item);
return buffer;
}
thread_get_name(name, 32, thread);
usage = thread_stack_usage(thread);
status = thread_get_status(thread);
#ifdef HAVE_PRIORITY_SCHEDULING
snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %d %2d%% %s",
snprintf(buffer, MAX_PATH,
"%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d ") "%2d%% %s",
selected_item,
IF_COP2(thread->core,)
IF_COP(thread->core,)
(status == STATE_RUNNING) ? '*' : ' ',
thread_status_char(status),
thread->priority,
usage, thread->name);
#else
snprintf(buffer, MAX_PATH, "%2d: " IF_COP2("(%d) ") "%c%c %2d%% %s",
selected_item,
IF_COP2(thread->core,)
(status == STATE_RUNNING) ? '*' : ' ',
thread_status_char(status),
usage, thread->name);
#endif
IF_PRIO(thread->priority,)
usage, name);
return buffer;
}
static int dbg_threads_action_callback(int action, struct action_callback_info *info)
@ -236,11 +241,16 @@ static int dbg_threads_action_callback(int action, struct action_callback_info *
#ifdef ROCKBOX_HAS_LOGF
if (action == ACTION_STD_OK)
{
struct thread_entry *thread = &threads[gui_synclist_get_sel_pos(info->lists)];
if (thread->name != NULL)
remove_thread(thread);
}
int selpos = gui_synclist_get_sel_pos(info->lists);
#if NUM_CORES > 1
if (selpos >= NUM_CORES)
remove_thread(&threads[selpos - NUM_CORES]);
#else
remove_thread(&threads[selpos]);
#endif
}
gui_synclist_hide_selection_marker(info->lists, false);
#endif /* ROCKBOX_HAS_LOGF */
gui_synclist_draw(info->lists);
return action;
}
@ -248,8 +258,12 @@ static int dbg_threads_action_callback(int action, struct action_callback_info *
static bool dbg_os(void)
{
struct action_callback_info info;
info.title = IF_COP2("Core and ") "Stack usage:";
info.title = IF_COP("Core and ") "Stack usage:";
#if NUM_CORES == 1
info.count = MAXTHREADS;
#else
info.count = MAXTHREADS+NUM_CORES;
#endif
info.selection_size = 1;
info.action_callback = dbg_threads_action_callback;
info.dbg_getname = threads_getname;

View file

@ -334,9 +334,7 @@ static void init(void)
/* if nobody initialized ATA before, I consider this a cold start */
bool coldstart = (PACR2 & 0x4000) != 0; /* starting from Flash */
#endif
#ifdef CPU_PP
COP_CTL = PROC_WAKE;
#endif
system_init();
kernel_init();
@ -591,25 +589,19 @@ void cop_main(void)
so it should not be assumed that the coprocessor be usable even on
platforms which support it.
A kernel thread runs on the coprocessor which waits for other threads to be
added, and gracefully handles RoLo */
A kernel thread is initially setup on the coprocessor and immediately
destroyed for purposes of continuity. The cop sits idle until at least
one thread exists on it. */
#if CONFIG_CPU == PP5002
/* 3G doesn't have Rolo or dual core support yet */
#if NUM_CORES > 1
system_init();
kernel_init();
/* This should never be reached */
#endif
while(1) {
COP_CTL = PROC_SLEEP;
}
#else
extern volatile unsigned char cpu_message;
system_init();
kernel_init();
while(cpu_message != COP_REBOOT) {
sleep(HZ);
}
rolo_restart_cop();
#endif /* PP5002 */
}
#endif /* CPU_PP */

View file

@ -509,6 +509,11 @@ static const struct plugin_api rockbox_api = {
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
lcd_yuv_set_options,
#endif
#ifdef CACHE_FUNCTIONS_AS_CALL
flush_icache,
invalidate_icache,
#endif
};
int plugin_load(const char* plugin, void* parameter)

View file

@ -112,7 +112,7 @@
#define PLUGIN_MAGIC 0x526F634B /* RocK */
/* increase this every time the api struct changes */
#define PLUGIN_API_VERSION 77
#define PLUGIN_API_VERSION 78
/* update this to latest version if a change to the api struct breaks
backwards compatibility (and please take the opportunity to sort in any
@ -627,6 +627,11 @@ struct plugin_api {
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
void (*lcd_yuv_set_options)(unsigned options);
#endif
#ifdef CACHE_FUNCTIONS_AS_CALL
void (*flush_icache)(void);
void (*invalidate_icache)(void);
#endif
};
/* plugin header */
@ -710,4 +715,22 @@ enum plugin_status plugin_start(struct plugin_api* rockbox, void* parameter)
return (api)->memcmp(s1, s2, n); \
}
#ifndef CACHE_FUNCTION_WRAPPERS
#ifdef CACHE_FUNCTIONS_AS_CALL
#define CACHE_FUNCTION_WRAPPERS(api) \
void flush_icache(void) \
{ \
(api)->flush_icache(); \
} \
void invalidate_icache(void) \
{ \
(api)->invalidate_icache(); \
}
#else
#define CACHE_FUNCTION_WRAPPERS(api)
#endif /* CACHE_FUNCTIONS_AS_CALL */
#endif /* CACHE_FUNCTION_WRAPPERS */
#endif

View file

@ -197,6 +197,18 @@ SECTIONS
} > IRAM
#ifdef CPU_PP
#if NUM_CORES > 1
.idle_stacks :
{
*(.idle_stacks)
cpu_idlestackbegin = .;
. += 0x0080;
cpu_idlestackend = .;
cop_idlestackbegin = .;
. += 0x0080;
cop_idlestackend = .;
} > IRAM
#else
.cop_stack :
{
*(.cop_stack)
@ -205,6 +217,7 @@ SECTIONS
cop_stackend = .;
} > IRAM
#endif
#endif
#else
/* TRICK ALERT! We want 0x2000 bytes of stack, but we set the section

View file

@ -611,7 +611,6 @@ static void backlight_tick(void)
void backlight_init(void)
{
queue_init(&backlight_queue, true);
queue_set_irq_safe(&backlight_queue, true);
#ifndef SIMULATOR
if (__backlight_init())

View file

@ -356,10 +356,6 @@ void button_init(void)
queue_init(&button_queue, true);
/* Enable less protection which would kill IRQ handler. Writing queue is
* no longer core-wise thread safe. */
queue_set_irq_safe(&button_queue, true);
button_read();
lastbtn = button_read();
tick_add_task(button_tick);

View file

@ -372,10 +372,10 @@
#define NOCACHEBSS_ATTR IBSS_ATTR
#define NOCACHEDATA_ATTR IDATA_ATTR
#define IF_COP(empty, x, y) , x, y
#define IF_COP(...) __VA_ARGS__
/* Defines for inter-core messaging */
#define COP_REBOOT 1
#define IDLE_STACK_SIZE 0x80
#define IDLE_STACK_WORDS 0x20
#else
#define NUM_CORES 1
@ -383,7 +383,7 @@
#define NOCACHEBSS_ATTR
#define NOCACHEDATA_ATTR
#define IF_COP(empty, x, y)
#define IF_COP(...)
#endif /* Processor specific */

View file

@ -94,9 +94,6 @@ struct event_queue
struct thread_entry *thread;
unsigned int read;
unsigned int write;
#if NUM_CORES > 1
bool irq_safe;
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
struct queue_sender_list *send;
#endif
@ -153,11 +150,6 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
void timeout_cancel(struct timeout *tmo);
extern void queue_init(struct event_queue *q, bool register_queue);
#if NUM_CORES > 1
extern void queue_set_irq_safe(struct event_queue *q, bool state);
#else
#define queue_set_irq_safe(q,state)
#endif
extern void queue_delete(struct event_queue *q);
extern void queue_wait(struct event_queue *q, struct event *ev);
extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks);

View file

@ -118,18 +118,18 @@
#define DEV_RS (*(volatile unsigned long *)(0x60006004))
#define DEV_EN (*(volatile unsigned long *)(0x6000600c))
#define DEV_SYSTEM 0x4
#define DEV_SER0 0x40
#define DEV_SER1 0x80
#define DEV_I2S 0x800
#define DEV_I2C 0x1000
#define DEV_ATA 0x4000
#define DEV_OPTO 0x10000
#define DEV_PIEZO 0x10000
#define DEV_USB 0x400000
#define DEV_FIREWIRE 0x800000
#define DEV_IDE0 0x2000000
#define DEV_LCD 0x4000000
#define DEV_SYSTEM 0x00000004
#define DEV_SER0 0x00000040
#define DEV_SER1 0x00000080
#define DEV_I2S 0x00000800
#define DEV_I2C 0x00001000
#define DEV_ATA 0x00004000
#define DEV_OPTO 0x00010000
#define DEV_PIEZO 0x00010000
#define DEV_USB 0x00400000
#define DEV_FIREWIRE 0x00800000
#define DEV_IDE0 0x02000000
#define DEV_LCD 0x04000000
/* clock control */
#define CLOCK_SOURCE (*(volatile unsigned long *)(0x60006020))
@ -174,6 +174,7 @@
#define CACHE_DISABLE 0
#define CACHE_ENABLE 1
#define CACHE_RUN 2
#define CACHE_INIT 4
/* GPIO Ports */

View file

@ -50,32 +50,32 @@
#ifndef SIMULATOR
/* Need to keep structures inside the header file because debug_menu
* needs them. */
# ifdef CPU_COLDFIRE
#ifdef CPU_COLDFIRE
struct regs
{
unsigned int macsr; /* EMAC status register */
unsigned int d[6]; /* d2-d7 */
unsigned int a[5]; /* a2-a6 */
void *sp; /* Stack pointer (a7) */
void *start; /* Thread start address, or NULL when started */
};
# elif CONFIG_CPU == SH7034
unsigned int macsr; /* 0 - EMAC status register */
unsigned int d[6]; /* 4-24 - d2-d7 */
unsigned int a[5]; /* 28-44 - a2-a6 */
void *sp; /* 48 - Stack pointer (a7) */
void *start; /* 52 - Thread start address, or NULL when started */
} __attribute__((packed));
#elif CONFIG_CPU == SH7034
struct regs
{
unsigned int r[7]; /* Registers r8 thru r14 */
void *sp; /* Stack pointer (r15) */
void *pr; /* Procedure register */
void *start; /* Thread start address, or NULL when started */
};
# elif defined(CPU_ARM)
unsigned int r[7]; /* 0-24 - Registers r8 thru r14 */
void *sp; /* 28 - Stack pointer (r15) */
void *pr; /* 32 - Procedure register */
void *start; /* 36 - Thread start address, or NULL when started */
} __attribute__((packed));
#elif defined(CPU_ARM)
struct regs
{
unsigned int r[8]; /* Registers r4-r11 */
void *sp; /* Stack pointer (r13) */
unsigned int lr; /* r14 (lr) */
void *start; /* Thread start address, or NULL when started */
};
# endif
unsigned int r[8]; /* 0-28 - Registers r4-r11 */
void *sp; /* 32 - Stack pointer (r13) */
unsigned int lr; /* 36 - r14 (lr) */
void *start; /* 40 - Thread start address, or NULL when started */
} __attribute__((packed));
#endif /* CONFIG_CPU */
#else
struct regs
{
@ -140,9 +140,9 @@ struct core_entry {
};
#ifdef HAVE_PRIORITY_SCHEDULING
#define IF_PRIO(empty, type) , type
#define IF_PRIO(...) __VA_ARGS__
#else
#define IF_PRIO(empty, type)
#define IF_PRIO(...)
#endif
/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
@ -197,14 +197,6 @@ struct core_entry {
})
#endif
#if NUM_CORES > 1
inline void lock_cores(void);
inline void unlock_cores(void);
#else
#define lock_cores(...)
#define unlock_cores(...)
#endif
struct thread_entry*
create_thread(void (*function)(void), void* stack, int stack_size,
const char *name IF_PRIO(, int priority)
@ -239,7 +231,12 @@ void priority_yield(void);
struct thread_entry * thread_get_current(void);
void init_threads(void);
int thread_stack_usage(const struct thread_entry *thread);
#if NUM_CORES > 1
int idle_stack_usage(unsigned int core);
#endif
int thread_get_status(const struct thread_entry *thread);
void thread_get_name(char *buffer, int size,
struct thread_entry *thread);
#ifdef RB_PROFILE
void profile_thread(void);
#endif

View file

@ -46,16 +46,21 @@ void queue_wait(struct event_queue *q, struct event *ev) ICODE_ATTR;
void kernel_init(void)
{
/* Init the threading API */
init_threads();
if(CURRENT_CORE == CPU)
#if NUM_CORES > 1
if (CURRENT_CORE == COP)
{
memset(tick_funcs, 0, sizeof(tick_funcs));
num_queues = 0;
memset(all_queues, 0, sizeof(all_queues));
/* This enables the interrupt but it won't be active until
the timer is actually started and interrupts are unmasked */
tick_start(1000/HZ);
}
#endif
init_threads();
/* No processor other than the CPU will proceed here */
memset(tick_funcs, 0, sizeof(tick_funcs));
num_queues = 0;
memset(all_queues, 0, sizeof(all_queues));
tick_start(1000/HZ);
}
@ -172,9 +177,6 @@ void queue_init(struct event_queue *q, bool register_queue)
q->read = 0;
q->write = 0;
q->thread = NULL;
#if NUM_CORES > 1
q->irq_safe = false;
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
q->send = NULL; /* No message sending by default */
#endif
@ -186,29 +188,12 @@ void queue_init(struct event_queue *q, bool register_queue)
}
}
#if NUM_CORES > 1
/**
* If IRQ mode is enabled, some core-wise locking mechanisms are disabled
* causing accessing queue to be no longer thread safe from the other core.
* However, that locking mechanism would also kill IRQ handlers.
*
* @param q struct of an event_queue
* @param state enable/disable IRQ mode
* @default state disabled
*/
void queue_set_irq_safe(struct event_queue *q, bool state)
{
q->irq_safe = state;
}
#endif
void queue_delete(struct event_queue *q)
{
int i;
bool found = false;
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
/* Release theads waiting on queue */
wakeup_thread(&q->thread);
@ -241,7 +226,6 @@ void queue_delete(struct event_queue *q)
num_queues--;
}
unlock_cores();
set_irq_level(oldlevel);
}
@ -251,13 +235,11 @@ void queue_wait(struct event_queue *q, struct event *ev)
unsigned int rd;
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
if (q->read == q->write)
{
set_irq_level_and_block_thread(&q->thread, oldlevel);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
}
rd = q->read++ & QUEUE_LENGTH_MASK;
@ -271,20 +253,17 @@ void queue_wait(struct event_queue *q, struct event *ev)
}
#endif
unlock_cores();
set_irq_level(oldlevel);
}
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
if (q->read == q->write && ticks > 0)
{
set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
lock_cores();
}
if (q->read != q->write)
@ -305,7 +284,6 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
ev->id = SYS_TIMEOUT;
}
unlock_cores();
set_irq_level(oldlevel);
}
@ -314,11 +292,6 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
unsigned int wr;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id;
@ -338,10 +311,6 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
#endif
wakeup_thread_irq_safe(&q->thread);
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel);
}
@ -355,8 +324,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
unsigned int wr;
lock_cores();
wr = q->write++ & QUEUE_LENGTH_MASK;
q->events[wr].id = id;
@ -379,7 +346,6 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
/* Function as queue_post if sending is not enabled */
wakeup_thread(&q->thread);
unlock_cores();
set_irq_level(oldlevel);
return 0;
@ -396,43 +362,23 @@ bool queue_in_queue_send(struct event_queue *q)
/* Replies with retval to any dequeued message sent with queue_send */
void queue_reply(struct event_queue *q, intptr_t retval)
{
lock_cores();
/* No IRQ lock here since IRQs cannot change this */
if(q->send && q->send->curr_sender)
{
queue_release_sender(&q->send->curr_sender, retval);
}
unlock_cores();
}
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
bool queue_empty(const struct event_queue* q)
{
bool is_empty;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
is_empty = ( q->read == q->write );
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
return is_empty;
return ( q->read == q->write );
}
void queue_clear(struct event_queue* q)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
/* Release all thread waiting in the queue for a reply -
dequeued sent message will be handled by owning thread */
@ -442,11 +388,6 @@ void queue_clear(struct event_queue* q)
q->read = 0;
q->write = 0;
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel);
}
@ -454,11 +395,6 @@ void queue_remove_from_head(struct event_queue *q, long id)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
while(q->read != q->write)
{
unsigned int rd = q->read & QUEUE_LENGTH_MASK;
@ -483,11 +419,6 @@ void queue_remove_from_head(struct event_queue *q, long id)
q->read++;
}
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel);
}
@ -499,24 +430,7 @@ void queue_remove_from_head(struct event_queue *q, long id)
*/
int queue_count(const struct event_queue *q)
{
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
int result;
#if NUM_CORES > 1
if (!q->irq_safe)
lock_cores();
#endif
result = q->write - q->read;
#if NUM_CORES > 1
if (!q->irq_safe)
unlock_cores();
#endif
set_irq_level(oldlevel);
return result;
return q->write - q->read;
}
int queue_broadcast(long id, intptr_t data)
@ -644,22 +558,22 @@ void TIMER1(void)
int i;
TIMER1_VAL; /* Read value to ack IRQ */
/* Run through the list of tick tasks (using main core) */
if (CURRENT_CORE == CPU)
{
for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
{
if (tick_funcs[i])
{
tick_funcs[i]();
}
}
current_tick++;
/* Run through the list of tick tasks (using main core -
COP does not dispatch ticks to this subroutine) */
for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
{
if (tick_funcs[i])
{
tick_funcs[i]();
}
}
current_tick++;
}
#endif
/* Must be last function called init kernel/thread initialization */
void tick_start(unsigned int interval_in_ms)
{
#ifndef BOOTLOADER
@ -922,14 +836,10 @@ void mutex_lock(struct mutex *m)
void mutex_unlock(struct mutex *m)
{
lock_cores();
if (m->thread == NULL)
m->locked = 0;
else
wakeup_thread(&m->thread);
unlock_cores();
}
void spinlock_lock(struct mutex *m)

View file

@ -428,7 +428,6 @@ unsigned long pcm_rec_sample_rate(void)
void pcm_rec_init(void)
{
queue_init(&pcmrec_queue, true);
queue_set_irq_safe(&pcmrec_queue, true);
queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send);
pcmrec_thread_p =
create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack),

View file

@ -20,6 +20,7 @@
#include "config.h"
#include "lcd.h"
#include "lcd-remote.h"
#include "thread.h"
#include "kernel.h"
#include "sprintf.h"
#include "button.h"
@ -52,10 +53,24 @@
volatile unsigned char IDATA_ATTR cpu_message = 0;
volatile unsigned char IDATA_ATTR cpu_reply = 0;
#if NUM_CORES > 1
extern int cop_idlestackbegin[];
#endif
void rolo_restart_cop(void) ICODE_ATTR;
void rolo_restart_cop(void)
{
if (CURRENT_CORE == CPU)
{
/* There should be free thread slots aplenty */
create_thread(rolo_restart_cop, cop_idlestackbegin, IDLE_STACK_SIZE,
"rolo COP" IF_PRIO(, PRIORITY_REALTIME)
IF_COP(, COP, false));
return;
}
COP_INT_CLR = -1;
/* Invalidate cache */
invalidate_icache();
@ -63,14 +78,14 @@ void rolo_restart_cop(void)
CACHE_CTL = CACHE_DISABLE;
/* Tell the main core that we're ready to reload */
cpu_reply = 2;
cpu_reply = 1;
/* Wait while RoLo loads the image into SDRAM */
/* TODO: Accept checksum failure gracefully */
while(cpu_message == 1) {}
while(cpu_message != 1);
/* Acknowledge the CPU and then reload */
cpu_reply = 1;
cpu_reply = 2;
asm volatile(
"mov r0, #0x10000000 \n"
@ -127,9 +142,7 @@ void rolo_restart(const unsigned char* source, unsigned char* dest,
: : "a"(dest)
);
#elif defined(CPU_PP502x)
/* Tell the COP that we've finished loading and started rebooting */
cpu_message = 0;
CPU_INT_CLR = -1;
/* Flush cache */
flush_icache();
@ -141,8 +154,11 @@ void rolo_restart(const unsigned char* source, unsigned char* dest,
for (i=0;i<8;i++)
memmapregs[i]=0;
/* Tell the COP it's safe to continue rebooting */
cpu_message = 1;
/* Wait for the COP to tell us it is rebooting */
while(cpu_reply != 1) {}
while(cpu_reply != 2);
asm volatile(
"mov r0, #0x10000000 \n"
@ -211,11 +227,11 @@ int rolo_load(const char* filename)
#endif
#ifdef CPU_PP
cpu_message = COP_REBOOT;
COP_CTL = PROC_WAKE;
lcd_puts(0, 2, "Waiting for coprocessor...");
lcd_update();
while(cpu_reply != 2) {}
rolo_restart_cop();
/* Wait for COP to be in safe code */
while(cpu_reply != 1);
lcd_puts(0, 2, " ");
lcd_update();
#endif

View file

@ -179,6 +179,7 @@ static const char* const uiename[] = {
/* Unexpected Interrupt or Exception handler. Currently only deals with
exceptions, but will deal with interrupts later.
*/
void UIE(unsigned int pc, unsigned int num) __attribute__((noreturn));
void UIE(unsigned int pc, unsigned int num)
{
char str[32];
@ -188,7 +189,8 @@ void UIE(unsigned int pc, unsigned int num)
lcd_setfont(FONT_SYSFIXED);
#endif
lcd_puts(0, 0, uiename[num]);
snprintf(str, sizeof(str), "at %08x", pc);
snprintf(str, sizeof(str), "at %08x" IF_COP(" (%d)"), pc
IF_COP(, CURRENT_CORE));
lcd_puts(0, 1, str);
lcd_update();

View file

@ -34,6 +34,7 @@ start:
#if CONFIG_CPU == PP5002
.equ PROC_ID, 0xc4000000
.equ CPU_ICLR, 0xcf001028
.equ CPU_CTRL, 0xcf004054
.equ COP_ICLR, 0xcf001038
.equ COP_CTRL, 0xcf004058
.equ COP_STATUS, 0xcf004050
@ -44,6 +45,8 @@ start:
#else
.equ PROC_ID, 0x60000000
.equ CPU_ICLR, 0x60004028
.equ CPU_CTRL, 0x60007000
.equ CPU_STATUS, 0x60007000
.equ COP_ICLR, 0x60004038
.equ COP_CTRL, 0x60007004
.equ COP_STATUS, 0x60007004
@ -57,15 +60,16 @@ start:
msr cpsr_c, #0xd3 /* enter supervisor mode, disable IRQ/FIQ */
b pad_skip
.space 60*4 /* (more than enough) space for exception vectors and mi4 magic */
.space 64*4 /* (more than enough) space for exception vectors and mi4 magic */
pad_skip:
#if defined(SANSA_E200) || defined(SANSA_C200)
/* On the Sansa, copying the vectors fails if the cache is initialised */
ldr r1, =CACHE_CTRL
mov r2, #0x0
str r2, [r1]
#endif
/* Find out which processor we are - r0 should be preserved for the
* duration of the init to avoid constant reloading of the processor ID.
* For each stage, CPU proceeds first, then COP.
*/
ldr r0, =PROC_ID
ldrb r0, [r0]
/* We need to remap memory from wherever SDRAM is mapped natively, to
base address 0, so we can put our exception vectors there. We don't
want to do this remapping while executing from SDRAM, so we copy the
@ -73,70 +77,95 @@ pad_skip:
code is compiled for address 0, but is currently executing at either
0x28000000 or 0x10000000, depending on chipset version. Do not use any
absolute addresses until remapping has been done. */
ldr r1, =0x40000000
ldr r2, =remap_start
ldr r3, =remap_end
and r5, pc, #0xff000000 /* adjust for execute address */
orr r2, r2, r5
orr r3, r3, r5
/* Cores are stepped though the init in turn: CPU then COP. The the remap
stage is completed by each core in turn and then the COP waits for the
CPU to finish initializing its kernel where the CPU will wake the COP
and wait for the COP to finish. This ensures no threading activity
starts until it is safe. */
cmp r0, #0x55
/* mask all interrupt sources before setting anything up */
ldreq r2, =CPU_ICLR
ldrne r2, =COP_ICLR
mvn r1, #0
str r1, [r2]
/* put us (co-processor) to sleep and wait for CPU to remap */
ldrne r2, =COP_CTRL
movne r1, #SLEEP
strne r1, [r2]
/* wait for co-processor to sleep then CPU can begin its remapping */
ldreq r2, =COP_STATUS
1:
ldreq r1, [r2]
tsteq r1, #SLEEPING
beq 1b
#ifdef CPU_PP502x
/* disable cache and local interrupt vectors - it is really not desireable
to have them enabled here */
ldr r2, =CACHE_CTRL
mov r1, #0
str r1, [r2]
#endif
mov r2, #0x40000000
ldr r3, =remap_start
ldr r4, =remap_end
and r6, pc, #0xff000000 /* adjust for execute address */
orr r3, r3, r6
orr r4, r4, r6
/* copy the code to 0x40000000 */
1:
ldr r4, [r2], #4
str r4, [r1], #4
cmp r2, r3
ble 1b
ldr r5, [r3], #4
str r5, [r2], #4
cmp r3, r4
blo 1b
ldr r3, =0x3f84 /* r3 and r1 values here are magic, don't touch */
orr r3, r3, r5 /* adjust for execute address */
ldr r2, =0xf000f014
ldr r4, =0x3f84 /* r3 and r1 values here are magic, don't touch */
orr r4, r4, r6 /* adjust for execute address */
ldr r3, =0xf000f014
#if MEM > 32
mov r1, #0x7400 /* r1 appears to indicate how much memory (not in
mov r2, #0x7400 /* r1 appears to indicate how much memory (not in
bytes) is remapped */
#else
mov r1, #0x3a00
mov r2, #0x3a00
#endif
ldr r0, =0xf000f010
ldr r1, =0xf000f010
mov pc, #0x40000000
remap_start:
str r1, [r0]
str r3, [r2]
ldr r0, L_post_remap
mov pc, r0
L_post_remap: .word remap_end
str r2, [r1]
str r4, [r3]
ldr r1, L_post_remap
mov pc, r1
L_post_remap:
.word remap_end
remap_end:
/* After doing the remapping, send the COP to sleep.
On wakeup it will go to cop_init */
/* Find out which processor we are */
ldr r0, =PROC_ID
ldrb r0, [r0]
cmp r0, #0x55
/* Mask all interrupt sources before setting up modes */
ldreq r0, =CPU_ICLR
ldrne r0, =COP_ICLR
mvn r1, #1
str r1, [r0]
/* put us (co-processor) to sleep */
ldrne r4, =COP_CTRL
ldr r4, =COP_CTRL
/* Wakeup co-processor to let it do remappings */
moveq r3, #WAKE
/* Sleep us (co-processor) and wait for CPU to do kernel initialization */
movne r3, #SLEEP
strne r3, [r4]
str r3, [r4]
/* Jump to co-processor init */
ldrne pc, =cop_init
cpu_init:
/* Wait for COP to be sleeping */
/* Wait for COP to go to sleep before proceeding */
ldr r4, =COP_STATUS
1:
ldr r3, [r4]
tst r3, #SLEEPING
beq 1b
/* Copy exception handler code to address 0 */
ldr r2, =_vectorsstart
ldr r3, =_vectorsend
@ -174,17 +203,28 @@ cpu_init:
cmp r3, r2
strhi r4, [r2], #4
bhi 1b
/* Set up some stack and munge it with 0xdeadbeef */
ldr sp, =stackend
mov r3, sp
ldr r2, =stackbegin
/* Load stack munge value */
ldr r4, =0xdeadbeef
/* Set up some stack and munge it with 0xdeadbeef */
ldr r2, =stackbegin
ldr sp, =stackend
1:
cmp sp, r2
strhi r4, [r2], #4
bhi 1b
#if NUM_CORES > 1
/* Set up idle stack and munge it with 0xdeadbeef */
ldr r2, =cpu_idlestackbegin
ldr r3, =cpu_idlestackend
1:
cmp r3, r2
strhi r4, [r2], #4
bhi 1b
#endif
/* Set up stack for IRQ mode */
msr cpsr_c, #0x92 /* IRQ disabled, FIQ enabled */
ldr sp, =irq_stack
@ -203,34 +243,41 @@ cpu_init:
msr cpsr_c, #0xdb /* IRQ/FIQ disabled */
ldr sp, =irq_stack
/* Switch to supervisor mode */
/* Switch back to supervisor mode */
msr cpsr_c, #0xd3
ldr sp, =stackend
/* Delay waking the COP until thread initialization is complete unless dual-core
support is not enabled in which case the cop_main function does not perform
any kernel or thread initialization. It's just a trivial sleep loop. */
#if NUM_CORES == 1
ldr r4, =COP_CTRL
mov r3, #WAKE
str r3, [r4]
#endif
bl main
/* main() should never return */
cop_init:
#if CONFIG_CPU != PP5002
/* COP: Invalidate cache */
ldr r0, =0xf000f044
ldr r1, [r0]
orr r1, r1, #0x6
str r1, [r0]
ldr r0, =CACHE_CTRL
#if NUM_CORES > 1
/* Wait for CPU to go to sleep at the end of its kernel init */
ldr r4, =CPU_STATUS
1:
ldr r1, [r0]
tst r1, #0x8000
bne 1b
#endif
ldr r3, [r4]
tst r3, #SLEEPING
beq 1b
/* Setup stack for COP */
ldr sp, =cop_stackend
mov r3, sp
/* Set up idle stack for COP and munge it with 0xdeadbeef */
ldr r2, =cop_idlestackbegin
ldr sp, =cop_idlestackend
#else
/* Setup stack for COP and munge it with 0xdeadbeef */
ldr r2, =cop_stackbegin
ldr sp, =cop_stackend
#endif
ldr r4, =0xdeadbeef
2:
cmp r3, r2
cmp sp, r2
strhi r4, [r2], #4
bhi 2b
@ -247,13 +294,12 @@ cop_init:
msr cpsr_c, #0xdb /* IRQ/FIQ disabled */
ldr sp, =cop_irq_stack
/* Switch to supervisor mode */
/* Switch back to supervisor mode */
msr cpsr_c, #0xd3
ldr sp, =cop_stackend
/* Run cop_main() in apps/main.c */
bl cop_main
/* Exception handlers. Will be copied to address 0 after memory remapping */
.section .vectors,"aw"
ldr pc, [pc, #24]
@ -300,7 +346,6 @@ undef_instr_handler:
software_int_handler:
reserved_handler:
movs pc, lr
prefetch_abort_handler:
sub r0, lr, #4
mov r1, #1
@ -324,6 +369,9 @@ UIE:
b UIE
#endif
/* Align stacks to cache line boundary */
.balign 16
/* 256 words of IRQ stack */
.space 256*4
irq_stack:

View file

@ -70,9 +70,7 @@ void irq(void)
}
#endif
} else {
if (COP_INT_STAT & TIMER1_MASK)
TIMER1();
else if (COP_INT_STAT & TIMER2_MASK)
if (COP_INT_STAT & TIMER2_MASK)
TIMER2();
}
}
@ -85,25 +83,49 @@ void irq(void)
to extend the funtions to do alternate cache configurations. */
#ifndef BOOTLOADER
static void ipod_init_cache(void)
void flush_icache(void) ICODE_ATTR;
void flush_icache(void)
{
if (CACHE_CTL & CACHE_ENABLE)
{
outl(inl(0xf000f044) | 0x2, 0xf000f044);
while ((CACHE_CTL & 0x8000) != 0);
}
}
void invalidate_icache(void) ICODE_ATTR;
void invalidate_icache(void)
{
if (CACHE_CTL & CACHE_ENABLE)
{
unsigned i;
outl(inl(0xf000f044) | 0x6, 0xf000f044);
while ((CACHE_CTL & 0x8000) != 0);
for (i = 0x10000000; i < 0x10002000; i += 16)
inb(i);
}
}
static void init_cache(void)
{
/* Initialising the cache in the iPod bootloader prevents Rockbox from starting */
unsigned i;
/* cache init mode? */
CACHE_CTL = CACHE_INIT;
CACHE_CTL |= CACHE_INIT;
/* PP5002 has 8KB cache */
for (i = 0xf0004000; i < 0xf0006000; i += 16) {
outl(0x0, i);
}
/* what's this do? */
outl(inl(0x60006044) | (CURRENT_CORE == CPU ? 0x10 : 0x20),
0x60006044);
outl(0x0, 0xf000f040);
outl(0x3fc0, 0xf000f044);
outl(0xc00, 0xf000f040);
outl(0xfc0, 0xf000f044);
/* enable cache */
CACHE_CTL = CACHE_ENABLE;
CACHE_CTL |= CACHE_INIT | CACHE_ENABLE | CACHE_RUN;
/* fill cache from physical address - do we have a better candidate for
an 8KB unchanging memory range? */
for (i = 0x10000000; i < 0x10002000; i += 16)
inb(i);
}
@ -206,6 +228,12 @@ void system_init(void)
outl(0xffffffff, 0x60006008);
DEV_RS = 0;
outl(0x00000000, 0x60006008);
#elif defined (IRIVER_H10)
DEV_RS = 0x3ffffef8;
outl(0xffffffff, 0x60006008);
outl(inl(0x70000024) | 0xc0, 0x70000024);
DEV_RS = 0;
outl(0x00000000, 0x60006008);
#endif
/* Remap the flash ROM from 0x00000000 to 0x20000000. */
MMAP3_LOGICAL = 0x20000000 | 0x3a00;
@ -248,8 +276,8 @@ void system_init(void)
pp_set_cpu_frequency(CPUFREQ_MAX);
#endif
}
ipod_init_cache();
init_cache();
#endif /* BOOTLOADER */
}

View file

@ -70,29 +70,20 @@ static inline unsigned int current_core(void)
);
return core;
}
#else
unsigned int current_core(void);
#endif
#if CONFIG_CPU != PP5002
#define CACHE_FUNCTIONS_AS_CALL
#define HAVE_INVALIDATE_ICACHE
static inline void invalidate_icache(void)
{
outl(inl(0xf000f044) | 0x6, 0xf000f044);
while ((CACHE_CTL & 0x8000) != 0);
}
void invalidate_icache(void);
#define HAVE_FLUSH_ICACHE
static inline void flush_icache(void)
{
outl(inl(0xf000f044) | 0x2, 0xf000f044);
while ((CACHE_CTL & 0x8000) != 0);
}
void flush_icache(void);
#endif /* CONFIG_CPU */
#else
unsigned int current_core(void);
#endif /* CPU_PP502x */
#endif
#endif /* CPU_PP */
#endif /* SYSTEM_TARGET_H */

View file

@ -45,59 +45,13 @@ static int boosted_threads IBSS_ATTR;
#endif
/* Define to enable additional checks for blocking violations etc. */
#define THREAD_EXTRA_CHECKS
#define THREAD_EXTRA_CHECKS 0
static const char main_thread_name[] = "main";
extern int stackbegin[];
extern int stackend[];
#ifdef CPU_PP
#ifndef BOOTLOADER
extern int cop_stackbegin[];
extern int cop_stackend[];
#else
/* The coprocessor stack is not set up in the bootloader code, but the threading
* is. No threads are run on the coprocessor, so set up some dummy stack */
int *cop_stackbegin = stackbegin;
int *cop_stackend = stackend;
#endif
#endif
#if NUM_CORES > 1
#if 0
static long cores_locked IBSS_ATTR;
#define LOCK(...) do { } while (test_and_set(&cores_locked, 1))
#define UNLOCK(...) cores_locked = 0
#endif
/* #warning "Core locking mechanism should be fixed on H10/4G!" */
inline void lock_cores(void)
{
#if 0
if (!cores[CURRENT_CORE].lock_issued)
{
LOCK();
cores[CURRENT_CORE].lock_issued = true;
}
#endif
}
inline void unlock_cores(void)
{
#if 0
if (cores[CURRENT_CORE].lock_issued)
{
cores[CURRENT_CORE].lock_issued = false;
UNLOCK();
}
#endif
}
#endif
/* Conserve IRAM
static void add_to_list(struct thread_entry **list,
struct thread_entry *thread) ICODE_ATTR;
@ -114,6 +68,32 @@ static inline void load_context(const void* addr)
static inline void core_sleep(void) __attribute__((always_inline));
#if defined(CPU_ARM)
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
static void start_thread(void) __attribute__((naked,used));
static void start_thread(void)
{
/* r0 = context */
asm volatile (
"ldr sp, [r0, #32] \n" /* Load initial sp */
"ldr r4, [r0, #40] \n" /* start in r4 since it's non-volatile */
"mov r1, #0 \n" /* Mark thread as running */
"str r1, [r0, #40] \n"
#if NUM_CORES > 1
"ldr r0, =invalidate_icache \n" /* Invalidate this core's cache. */
"mov lr, pc \n" /* This could be the first entry into */
"bx r0 \n" /* plugin or codec code for this core. */
#endif
"mov lr, pc \n" /* Call thread function */
"bx r4 \n"
"mov r0, #0 \n" /* remove_thread(NULL) */
"ldr pc, =remove_thread \n"
".ltorg \n" /* Dump constant pool */
); /* No clobber list - new thread doesn't care */
}
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
@ -121,73 +101,83 @@ static inline void core_sleep(void) __attribute__((always_inline));
static inline void store_context(void* addr)
{
asm volatile(
"stmia %0, { r4-r11, sp, lr }\n"
"stmia %0, { r4-r11, sp, lr } \n"
: : "r" (addr)
);
}
/*---------------------------------------------------------------------------
* Load non-volatile context.
*---------------------------------------------------------------------------
*/
static void start_thread(void (*thread_func)(void), const void* addr) __attribute__((naked,used));
static void start_thread(void (*thread_func)(void), const void* addr)
{
/* r0 = thread_func, r1 = addr */
#if NUM_CORES > 1 && CONFIG_CPU != PP5002
asm volatile (
"mov r2, #0 \n"
"str r2, [r1, #40] \n"
"ldr r1, =0xf000f044 \n" /* invalidate this core's cache */
"ldr r2, [r1] \n"
"orr r2, r2, #6 \n"
"str r2, [r1] \n"
"ldr r1, =0x6000c000 \n"
"1: \n"
"ldr r2, [r1] \n"
"tst r2, #0x8000 \n"
"bne 1b \n"
"mov pc, r0 \n"
: : : "r1", "r2"
);
#else
asm volatile (
"mov r2, #0 \n"
"str r2, [r1, #40] \n"
"mov pc, r0 \n"
: : : "r1", "r2"
);
#endif
(void)thread_func;
(void)addr;
}
/* For startup, place context pointer in r4 slot, start_thread pointer in r5
* slot, and thread function pointer in context.start. See load_context for
* what happens when thread is initially going to run. */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
(thread)->context.r[1] = (unsigned int)start_thread, \
(thread)->context.start = (void *)function; })
static inline void load_context(const void* addr)
{
asm volatile(
"ldmia %0, { r4-r11, sp, lr } \n" /* load regs r4 to r14 from context */
"ldr r0, [%0, #40] \n" /* load start pointer */
"cmp r0, #0 \n" /* check for NULL */
"movne r1, %0 \n" /* if not already running, jump to start */
"ldrne pc, =start_thread \n"
: : "r" (addr) : "r0", "r1"
"ldr r0, [%0, #40] \n" /* Load start pointer */
"cmp r0, #0 \n" /* Check for NULL */
"ldmneia %0, { r0, pc } \n" /* If not already running, jump to start */
"ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
: : "r" (addr) : "r0" /* only! */
);
}
#if defined (CPU_PP)
#if NUM_CORES > 1
extern int cpu_idlestackbegin[];
extern int cpu_idlestackend[];
extern int cop_idlestackbegin[];
extern int cop_idlestackend[];
static int * const idle_stacks[NUM_CORES] NOCACHEDATA_ATTR =
{
[CPU] = cpu_idlestackbegin,
[COP] = cop_idlestackbegin
};
#else /* NUM_CORES == 1 */
#ifndef BOOTLOADER
extern int cop_stackbegin[];
extern int cop_stackend[];
#else
/* The coprocessor stack is not set up in the bootloader code, but the threading
* is. No threads are run on the coprocessor, so set up some dummy stack */
int *cop_stackbegin = stackbegin;
int *cop_stackend = stackend;
#endif /* BOOTLOADER */
#endif /* NUM_CORES */
static inline void core_sleep(void)
{
unlock_cores();
/* This should sleep the CPU. It appears to wake by itself on
interrupts */
if (CURRENT_CORE == CPU)
CPU_CTL = PROC_SLEEP;
else
COP_CTL = PROC_SLEEP;
lock_cores();
}
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Switches to a stack that always resides in the Rockbox core.
*
* Needed when a thread suicides on a core other than the main CPU since the
* stack used when idling is the stack of the last thread to run. This stack
* may not reside in the core in which case the core will continue to use a
* stack from an unloaded module until another thread runs on it.
*---------------------------------------------------------------------------
*/
static inline void switch_to_idle_stack(const unsigned int core)
{
asm volatile (
"str sp, [%0] \n" /* save original stack pointer on idle stack */
"mov sp, %0 \n" /* switch stacks */
: : "r"(&idle_stacks[core][IDLE_STACK_WORDS-1]));
}
#endif /* NUM_CORES */
#elif CONFIG_CPU == S3C2440
static inline void core_sleep(void)
{
@ -204,6 +194,42 @@ static inline void core_sleep(void)
#endif
#elif defined(CPU_COLDFIRE)
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
static void __start_thread(void) __attribute__((used));
static void __start_thread(void)
{
/* a0=macsr, a1=context */
asm volatile (
"start_thread: \n" /* Start here - no naked attribute */
"move.l %a0, %macsr \n" /* Set initial mac status reg */
"lea.l 48(%a1), %a1 \n"
"move.l (%a1)+, %sp \n" /* Set initial stack */
"move.l (%a1), %a2 \n" /* Fetch thread function pointer */
"clr.l (%a1) \n" /* Mark thread running */
"jsr (%a2) \n" /* Call thread function */
"clr.l -(%sp) \n" /* remove_thread(NULL) */
"jsr remove_thread \n"
);
}
/* Set EMAC unit to fractional mode with saturation for each new thread,
* since that's what'll be the most useful for most things which the dsp
* will do. Codecs should still initialize their preferred modes
* explicitly. Context pointer is placed in d2 slot and start_thread
* pointer in d3 slot. thread function pointer is placed in context.start.
* See load_context for what happens when thread is initially going to
* run.
*/
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE, \
(thread)->context.d[0] = (unsigned int)&(thread)->context, \
(thread)->context.d[1] = (unsigned int)start_thread, \
(thread)->context.start = (void *)(function); })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
@ -211,8 +237,8 @@ static inline void core_sleep(void)
static inline void store_context(void* addr)
{
asm volatile (
"move.l %%macsr,%%d0 \n"
"movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
"move.l %%macsr,%%d0 \n"
"movem.l %%d0/%%d2-%%d7/%%a2-%%a7,(%0) \n"
: : "a" (addr) : "d0" /* only! */
);
}
@ -224,14 +250,13 @@ static inline void store_context(void* addr)
static inline void load_context(const void* addr)
{
asm volatile (
"movem.l (%0),%%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
"move.l %%d0,%%macsr \n"
"move.l (52,%0),%%d0 \n" /* Get start address */
"beq.b 1f \n" /* NULL -> already running */
"clr.l (52,%0) \n" /* Clear start address.. */
"move.l %%d0,%0 \n"
"jmp (%0) \n" /* ..and start the thread */
"1: \n"
"move.l 52(%0), %%d0 \n" /* Get start address */
"beq.b 1f \n" /* NULL -> already running */
"movem.l (%0), %%a0-%%a2 \n" /* a0=macsr, a1=context, a2=start_thread */
"jmp (%%a2) \n" /* Start the thread */
"1: \n"
"movem.l (%0), %%d0/%%d2-%%d7/%%a2-%%a7 \n" /* Load context */
"move.l %%d0, %%macsr \n"
: : "a" (addr) : "d0" /* only! */
);
}
@ -249,6 +274,37 @@ static inline void core_sleep(void)
({ (thread)->context.macsr = EMAC_FRACTIONAL | EMAC_SATURATE; })
#elif CONFIG_CPU == SH7034
/*---------------------------------------------------------------------------
* Start the thread running and terminate it if it returns
*---------------------------------------------------------------------------
*/
void start_thread(void); /* Provide C access to ASM label */
static void __start_thread(void) __attribute__((used));
static void __start_thread(void)
{
/* r8 = context */
asm volatile (
"_start_thread: \n" /* Start here - no naked attribute */
"mov.l @(4, r8), r0 \n" /* Fetch thread function pointer */
"mov.l @(28, r8), r15 \n" /* Set initial sp */
"mov #0, r1 \n" /* Start the thread */
"jsr @r0 \n"
"mov.l r1, @(36, r8) \n" /* Clear start address */
"mov.l 1f, r0 \n" /* remove_thread(NULL) */
"jmp @r0 \n"
"mov #0, r4 \n"
"1: \n"
".long _remove_thread \n"
);
}
/* Place context pointer in r8 slot, function pointer in r9 slot, and
* start_thread pointer in context_start */
#define THREAD_STARTUP_INIT(core, thread, function) \
({ (thread)->context.r[0] = (unsigned int)&(thread)->context, \
(thread)->context.r[1] = (unsigned int)(function), \
(thread)->context.start = (void*)start_thread; })
/*---------------------------------------------------------------------------
* Store non-volatile context.
*---------------------------------------------------------------------------
@ -256,8 +312,8 @@ static inline void core_sleep(void)
static inline void store_context(void* addr)
{
asm volatile (
"add #36,%0 \n"
"sts.l pr, @-%0 \n"
"add #36, %0 \n" /* Start at last reg. By the time routine */
"sts.l pr, @-%0 \n" /* is done, %0 will have the original value */
"mov.l r15,@-%0 \n"
"mov.l r14,@-%0 \n"
"mov.l r13,@-%0 \n"
@ -277,23 +333,20 @@ static inline void store_context(void* addr)
static inline void load_context(const void* addr)
{
asm volatile (
"mov.l @%0+,r8 \n"
"mov.l @%0+,r9 \n"
"mov.l @%0+,r10 \n"
"mov.l @%0+,r11 \n"
"mov.l @%0+,r12 \n"
"mov.l @%0+,r13 \n"
"mov.l @%0+,r14 \n"
"mov.l @%0+,r15 \n"
"lds.l @%0+,pr \n"
"mov.l @%0,r0 \n" /* Get start address */
"tst r0,r0 \n"
"bt .running \n" /* NULL -> already running */
"lds r0,pr \n"
"mov #0,r0 \n"
"rts \n" /* Start the thread */
"mov.l r0,@%0 \n" /* Clear start address */
".running: \n"
"mov.l @(36, %0), r0 \n" /* Get start address */
"tst r0, r0 \n"
"bt .running \n" /* NULL -> already running */
"jmp @r0 \n" /* r8 = context */
".running: \n"
"mov.l @%0+, r8 \n" /* Executes in delay slot and outside it */
"mov.l @%0+, r9 \n"
"mov.l @%0+, r10 \n"
"mov.l @%0+, r11 \n"
"mov.l @%0+, r12 \n"
"mov.l @%0+, r13 \n"
"mov.l @%0+, r14 \n"
"mov.l @%0+, r15 \n"
"lds.l @%0+, pr \n"
: : "r" (addr) : "r0" /* only! */
);
}
@ -311,38 +364,36 @@ static inline void core_sleep(void)
#define THREAD_CPU_INIT(core, thread)
#endif
#ifdef THREAD_EXTRA_CHECKS
static void thread_panicf_format_name(char *buffer, struct thread_entry *thread)
#if THREAD_EXTRA_CHECKS
static void thread_panicf(const char *msg, struct thread_entry *thread)
{
*buffer = '\0';
if (thread)
{
/* Display thread name if one or ID if none */
const char *fmt = thread->name ? " %s" : " %08lX";
intptr_t name = thread->name ?
(intptr_t)thread->name : (intptr_t)thread;
snprintf(buffer, 16, fmt, name);
}
#if NUM_CORES > 1
const unsigned int core = thread->core;
#endif
static char name[32];
thread_get_name(name, 32, thread);
panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
}
static void thread_panicf(const char *msg,
struct thread_entry *thread1, struct thread_entry *thread2)
static void thread_stkov(struct thread_entry *thread)
{
static char thread1_name[16], thread2_name[16];
thread_panicf_format_name(thread1_name, thread1);
thread_panicf_format_name(thread2_name, thread2);
panicf ("%s%s%s", msg, thread1_name, thread2_name);
thread_panicf("Stkov", thread);
}
#define THREAD_PANICF(msg, thread) \
thread_panicf(msg, thread)
#define THREAD_ASSERT(exp, msg, thread) \
({ if (!({ exp; })) thread_panicf((msg), (thread)); })
#else
static void thread_stkov(void)
static void thread_stkov(struct thread_entry *thread)
{
/* Display thread name if one or ID if none */
struct thread_entry *current = cores[CURRENT_CORE].running;
const char *fmt = current->name ? "%s %s" : "%s %08lX";
intptr_t name = current->name ?
(intptr_t)current->name : (intptr_t)current;
panicf(fmt, "Stkov", name);
#if NUM_CORES > 1
const unsigned int core = thread->core;
#endif
static char name[32];
thread_get_name(name, 32, thread);
panicf("Stkov %s" IF_COP(" (%d)"), name IF_COP(, core));
}
#define THREAD_PANICF(msg, thread)
#define THREAD_ASSERT(exp, msg, thread)
#endif /* THREAD_EXTRA_CHECKS */
static void add_to_list(struct thread_entry **list, struct thread_entry *thread)
@ -564,8 +615,6 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
/* Do nothing */
#else
lock_cores();
/* Begin task switching by saving our current context so that we can
* restore the state of the current thread later to the point prior
* to this call. */
@ -576,11 +625,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
/* Check if the current thread stack is overflown */
stackptr = cores[core].running->stack;
if(stackptr[0] != DEADBEEF)
#ifdef THREAD_EXTRA_CHECKS
thread_panicf("Stkov", cores[core].running, NULL);
#else
thread_stkov();
#endif
thread_stkov(cores[core].running);
/* Rearrange thread lists as needed */
change_thread_state(blocked_list);
@ -627,7 +672,6 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
#endif
#endif
unlock_cores();
/* And finally give control to the next thread. */
load_context(&cores[core].running->context);
@ -641,8 +685,6 @@ void sleep_thread(int ticks)
{
struct thread_entry *current;
lock_cores();
current = cores[CURRENT_CORE].running;
#ifdef HAVE_SCHEDULER_BOOSTCTRL
@ -668,8 +710,6 @@ void block_thread(struct thread_entry **list)
{
struct thread_entry *current;
lock_cores();
/* Get the entry for the current running thread. */
current = cores[CURRENT_CORE].running;
@ -680,11 +720,9 @@ void block_thread(struct thread_entry **list)
unsigned long boost_flag = STATE_IS_BOOSTED(current->statearg);
#endif
#ifdef THREAD_EXTRA_CHECKS
/* We are not allowed to mix blocking types in one queue. */
if (*list && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO)
thread_panicf("Blocking violation B->*T", current, *list);
#endif
THREAD_ASSERT(*list != NULL && GET_STATE((*list)->statearg) == STATE_BLOCKED_W_TMO,
"Blocking violation B->*T", current);
/* Set the state to blocked and ask the scheduler to switch tasks,
* this takes us off of the run queue until we are explicitly woken */
@ -707,7 +745,6 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
/* Get the entry for the current running thread. */
current = cores[CURRENT_CORE].running;
lock_cores();
#ifdef HAVE_SCHEDULER_BOOSTCTRL
/* A block with a timeout is a sleep situation, whatever we are waiting
* for _may or may not_ happen, regardless of boost state, (user input
@ -722,12 +759,9 @@ void block_thread_w_tmo(struct thread_entry **list, int timeout)
}
#endif
#ifdef THREAD_EXTRA_CHECKS
/* We can store only one thread to the "list" if thread is used
* in other list (such as core's list for sleeping tasks). */
if (*list)
thread_panicf("Blocking violation T->*B", current, NULL);
#endif
THREAD_ASSERT(*list == NULL, "Blocking violation T->*B", current);
/* Set the state to blocked with the specified timeout */
SET_STATE(current->statearg, STATE_BLOCKED_W_TMO, current_tick + timeout);
@ -836,7 +870,6 @@ struct thread_entry*
unsigned int stacklen;
unsigned int *stackptr;
int slot;
struct regs *regs;
struct thread_entry *thread;
/*****
@ -862,12 +895,9 @@ struct thread_entry*
}
#endif
lock_cores();
slot = find_empty_thread_slot();
if (slot < 0)
{
unlock_cores();
return NULL;
}
@ -899,17 +929,13 @@ struct thread_entry*
flush_icache();
#endif
regs = &thread->context;
/* Align stack to an even 32 bit boundary */
regs->sp = (void*)(((unsigned int)stack + stack_size) & ~3);
regs->start = (void*)function;
thread->context.sp = (void*)(((unsigned int)stack + stack_size) & ~3);
/* Load the thread's context structure with needed startup information */
THREAD_STARTUP_INIT(core, thread, function);
/* Do any CPU specific inits after initializing common items
to have access to valid data */
THREAD_CPU_INIT(core, thread);
add_to_list(&cores[core].running, thread);
unlock_cores();
return thread;
#if NUM_CORES == 1
@ -920,8 +946,6 @@ struct thread_entry*
#ifdef HAVE_SCHEDULER_BOOSTCTRL
void trigger_cpu_boost(void)
{
lock_cores();
if (!STATE_IS_BOOSTED(cores[CURRENT_CORE].running->statearg))
{
SET_BOOST_STATE(cores[CURRENT_CORE].running->statearg);
@ -931,8 +955,6 @@ void trigger_cpu_boost(void)
}
boosted_threads++;
}
unlock_cores();
}
#endif
@ -943,10 +965,10 @@ void trigger_cpu_boost(void)
*/
void remove_thread(struct thread_entry *thread)
{
lock_cores();
const unsigned int core = CURRENT_CORE;
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
thread = cores[core].running;
/* Free the entry by removing thread name. */
thread->name = NULL;
@ -957,16 +979,26 @@ void remove_thread(struct thread_entry *thread)
if (thread == cores[IF_COP2(thread->core)].running)
{
remove_from_list(&cores[IF_COP2(thread->core)].running, thread);
#if NUM_CORES > 1
/* Switch to the idle stack if not on the main core (where "main"
* runs) */
if (core != CPU)
{
switch_to_idle_stack(core);
}
flush_icache();
#endif
switch_thread(false, NULL);
return ;
/* This should never and must never be reached - if it is, the
* state is corrupted */
THREAD_PANICF("remove_thread->K:*R", thread);
}
if (thread == cores[IF_COP2(thread->core)].sleeping)
remove_from_list(&cores[IF_COP2(thread->core)].sleeping, thread);
else
remove_from_list(NULL, thread);
unlock_cores();
}
#ifdef HAVE_PRIORITY_SCHEDULING
@ -974,14 +1006,12 @@ int thread_set_priority(struct thread_entry *thread, int priority)
{
int old_priority;
lock_cores();
if (thread == NULL)
thread = cores[CURRENT_CORE].running;
old_priority = thread->priority;
thread->priority = priority;
cores[IF_COP2(thread->core)].highest_priority = 100;
unlock_cores();
return old_priority;
}
@ -1013,15 +1043,7 @@ void init_threads(void)
const unsigned int core = CURRENT_CORE;
int slot;
/* Let main CPU initialize first. */
#if NUM_CORES > 1
if (core != CPU)
{
while (!cores[CPU].kernel_running) ;
}
#endif
lock_cores();
/* CPU will initialize first and then sleep */
slot = find_empty_thread_slot();
cores[core].sleeping = NULL;
@ -1041,9 +1063,6 @@ void init_threads(void)
threads[slot].priority = PRIORITY_USER_INTERFACE;
threads[slot].priority_x = 0;
cores[core].highest_priority = 100;
#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL
boosted_threads = 0;
#endif
add_to_list(&cores[core].running, &threads[slot]);
@ -1051,21 +1070,34 @@ void init_threads(void)
* probably a much better way to do this. */
if (core == CPU)
{
#ifdef HAVE_SCHEDULER_BOOSTCTRL
boosted_threads = 0;
#endif
threads[slot].stack = stackbegin;
threads[slot].stack_size = (int)stackend - (int)stackbegin;
}
#if NUM_CORES > 1 /* This code path will not be run on single core targets */
/* Mark CPU initialized */
cores[CPU].kernel_running = true;
/* TODO: HAL interface for this */
/* Wake up coprocessor and let it initialize kernel and threads */
COP_CTL = PROC_WAKE;
/* Sleep until finished */
CPU_CTL = PROC_SLEEP;
}
else
{
threads[slot].stack = cop_stackbegin;
threads[slot].stack_size =
(int)cop_stackend - (int)cop_stackbegin;
}
cores[core].kernel_running = true;
/* Initial stack is the COP idle stack */
threads[slot].stack = cop_idlestackbegin;
threads[slot].stack_size = IDLE_STACK_SIZE;
/* Mark COP initialized */
cores[COP].kernel_running = true;
/* Get COP safely primed inside switch_thread where it will remain
* until a thread actually exists on it */
CPU_CTL = PROC_WAKE;
set_irq_level(0);
remove_thread(NULL);
#endif
unlock_cores();
}
}
int thread_stack_usage(const struct thread_entry *thread)
@ -1083,7 +1115,59 @@ int thread_stack_usage(const struct thread_entry *thread)
thread->stack_size;
}
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
* runtime.
*---------------------------------------------------------------------------
*/
int idle_stack_usage(unsigned int core)
{
unsigned int *stackptr = idle_stacks[core];
int i, usage = 0;
for (i = 0; i < IDLE_STACK_WORDS; i++)
{
if (stackptr[i] != DEADBEEF)
{
usage = ((IDLE_STACK_WORDS - i) * 100) / IDLE_STACK_WORDS;
break;
}
}
return usage;
}
#endif
int thread_get_status(const struct thread_entry *thread)
{
return GET_STATE(thread->statearg);
}
/*---------------------------------------------------------------------------
* Fills in the buffer with the specified thread's name. If the name is NULL,
* empty, or the thread is in destruct state a formatted ID is written
* instead.
*---------------------------------------------------------------------------
*/
void thread_get_name(char *buffer, int size,
struct thread_entry *thread)
{
if (size <= 0)
return;
*buffer = '\0';
if (thread)
{
/* Display thread name if one or ID if none */
const char *name = thread->name;
const char *fmt = "%s";
if (name == NULL || *name == '\0')
{
name = (const char *)thread;
fmt = "%08lX";
}
snprintf(buffer, size, fmt, name);
}
}

View file

@ -306,7 +306,6 @@ void usb_init(void)
#ifndef BOOTLOADER
queue_init(&usb_queue, true);
queue_set_irq_safe(&usb_queue, true);
create_thread(usb_thread, usb_stack, sizeof(usb_stack),
usb_thread_name IF_PRIO(, PRIORITY_SYSTEM)