Finally full multicore support for PortalPlayer 502x targets with an eye towards the possibility of other types. All SVN targets the low-lag code to speed up blocking operations. Most files are modified here simple due to a name change to actually support a real event object and a param change to create_thread. Add some use of new features but just sit on things for a bit and leave full integration for later. Work will continue on to address size on sensitive targets and simplify things if possible. Any PP target having problems with SWP can easily be changed to sw corelocks with one #define change in config.h though only PP5020 has shown an issue and seems to work without any difficulties.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15134 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
parent
a3fbbc9fa7
commit
a9b2fb5ee3
44 changed files with 3863 additions and 1144 deletions
|
@ -125,6 +125,8 @@ static char* dbg_listmessage_getname(int item, void * data, char *buffer)
|
|||
#endif
|
||||
|
||||
struct action_callback_info;
|
||||
#define DBGLIST_SHOW_SELECTION 0x1
|
||||
|
||||
struct action_callback_info
|
||||
{
|
||||
char *title;
|
||||
|
@ -137,6 +139,7 @@ struct action_callback_info
|
|||
};
|
||||
|
||||
static char* dbg_menu_getname(int item, void * data, char *buffer);
|
||||
static char* threads_getname(int selected_item, void * data, char *buffer);
|
||||
static bool dbg_list(struct action_callback_info *info)
|
||||
{
|
||||
struct gui_synclist lists;
|
||||
|
@ -149,8 +152,7 @@ static bool dbg_list(struct action_callback_info *info)
|
|||
gui_synclist_set_title(&lists, info->title, NOICON);
|
||||
gui_synclist_set_icon_callback(&lists, NULL);
|
||||
gui_synclist_set_nb_items(&lists, info->count*info->selection_size);
|
||||
if (info->dbg_getname != dbg_menu_getname)
|
||||
gui_synclist_hide_selection_marker(&lists, true);
|
||||
gui_synclist_hide_selection_marker(&lists, true);
|
||||
|
||||
if (info->action_callback)
|
||||
info->action_callback(ACTION_REDRAW, info);
|
||||
|
@ -179,17 +181,28 @@ static bool dbg_list(struct action_callback_info *info)
|
|||
/*---------------------------------------------------*/
|
||||
extern struct thread_entry threads[MAXTHREADS];
|
||||
|
||||
static char thread_status_char(int status)
|
||||
static char thread_status_char(unsigned status)
|
||||
{
|
||||
switch (status)
|
||||
static const char thread_status_chars[THREAD_NUM_STATES+1] =
|
||||
{
|
||||
case STATE_RUNNING : return 'R';
|
||||
case STATE_BLOCKED : return 'B';
|
||||
case STATE_SLEEPING : return 'S';
|
||||
case STATE_BLOCKED_W_TMO: return 'T';
|
||||
}
|
||||
[0 ... THREAD_NUM_STATES] = '?',
|
||||
[STATE_RUNNING] = 'R',
|
||||
[STATE_BLOCKED] = 'B',
|
||||
[STATE_SLEEPING] = 'S',
|
||||
[STATE_BLOCKED_W_TMO] = 'T',
|
||||
[STATE_FROZEN] = 'F',
|
||||
[STATE_KILLED] = 'K',
|
||||
};
|
||||
|
||||
return '?';
|
||||
#if NUM_CORES > 1
|
||||
if (status == STATE_BUSY) /* Not a state index */
|
||||
return '.';
|
||||
#endif
|
||||
|
||||
if (status > THREAD_NUM_STATES)
|
||||
status = THREAD_NUM_STATES;
|
||||
|
||||
return thread_status_chars[status];
|
||||
}
|
||||
|
||||
static char* threads_getname(int selected_item, void * data, char *buffer)
|
||||
|
@ -214,7 +227,7 @@ static char* threads_getname(int selected_item, void * data, char *buffer)
|
|||
thread = &threads[selected_item];
|
||||
status = thread_get_status(thread);
|
||||
|
||||
if (thread->name == NULL)
|
||||
if (status == STATE_KILLED)
|
||||
{
|
||||
snprintf(buffer, MAX_PATH, "%2d: ---", selected_item);
|
||||
return buffer;
|
||||
|
@ -222,7 +235,6 @@ static char* threads_getname(int selected_item, void * data, char *buffer)
|
|||
|
||||
thread_get_name(name, 32, thread);
|
||||
usage = thread_stack_usage(thread);
|
||||
status = thread_get_status(thread);
|
||||
|
||||
snprintf(buffer, MAX_PATH,
|
||||
"%2d: " IF_COP("(%d) ") "%c%c " IF_PRIO("%d ") "%2d%% %s",
|
||||
|
@ -2329,6 +2341,7 @@ static const struct the_menu_item menuitems[] = {
|
|||
};
|
||||
static int menu_action_callback(int btn, struct action_callback_info *info)
|
||||
{
|
||||
gui_synclist_hide_selection_marker(info->lists, false);
|
||||
if (btn == ACTION_STD_OK)
|
||||
{
|
||||
menuitems[gui_synclist_get_sel_pos(info->lists)].function();
|
||||
|
|
|
@ -290,8 +290,8 @@ static void set_current_codec(int codec_idx);
|
|||
static void set_filebuf_watermark(int seconds);
|
||||
|
||||
/* Audio thread */
|
||||
static struct event_queue audio_queue;
|
||||
static struct queue_sender_list audio_queue_sender_list;
|
||||
static struct event_queue audio_queue NOCACHEBSS_ATTR;
|
||||
static struct queue_sender_list audio_queue_sender_list NOCACHEBSS_ATTR;
|
||||
static long audio_stack[(DEFAULT_STACK_SIZE + 0x1000)/sizeof(long)];
|
||||
static const char audio_thread_name[] = "audio";
|
||||
|
||||
|
@ -340,9 +340,10 @@ static unsigned char *dram_buf = NULL;
|
|||
automatically swaps in the other and the swap when unlocking should not
|
||||
happen if the parity is even.
|
||||
*/
|
||||
static bool swap_codec_parity = false; /* true=odd, false=even */
|
||||
/* Mutex to control which codec (normal/voice) is running */
|
||||
static struct mutex mutex_codecthread NOCACHEBSS_ATTR;
|
||||
static bool swap_codec_parity NOCACHEBSS_ATTR = false; /* true=odd, false=even */
|
||||
/* Locking to control which codec (normal/voice) is running */
|
||||
static struct semaphore sem_codecthread NOCACHEBSS_ATTR;
|
||||
static struct event event_codecthread NOCACHEBSS_ATTR;
|
||||
|
||||
/* Voice state */
|
||||
static volatile bool voice_thread_start = false; /* Triggers voice playback (A/V) */
|
||||
|
@ -424,8 +425,7 @@ static void wait_for_voice_swap_in(void)
|
|||
if (NULL == iram_buf)
|
||||
return;
|
||||
|
||||
while (current_codec != CODEC_IDX_VOICE)
|
||||
yield();
|
||||
event_wait(&event_codecthread, STATE_NONSIGNALED);
|
||||
#endif /* PLAYBACK_VOICE */
|
||||
}
|
||||
|
||||
|
@ -924,21 +924,21 @@ static void swap_codec(void)
|
|||
}
|
||||
|
||||
/* Release my semaphore */
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
semaphore_release(&sem_codecthread);
|
||||
logf("unlocked: %d", my_codec);
|
||||
|
||||
/* Loop until the other codec has locked and run */
|
||||
do {
|
||||
/* Release my semaphore and force a task switch. */
|
||||
yield();
|
||||
} while (my_codec == current_codec);
|
||||
/* Wait for other codec */
|
||||
event_wait(&event_codecthread,
|
||||
(my_codec == CODEC_IDX_AUDIO) ? STATE_NONSIGNALED : STATE_SIGNALED);
|
||||
|
||||
/* Wait for other codec to unlock */
|
||||
mutex_lock(&mutex_codecthread);
|
||||
logf("waiting for lock: %d", my_codec);
|
||||
semaphore_wait(&sem_codecthread);
|
||||
|
||||
/* Take control */
|
||||
logf("waiting for lock: %d", my_codec);
|
||||
set_current_codec(my_codec);
|
||||
event_set_state(&event_codecthread,
|
||||
(my_codec == CODEC_IDX_AUDIO) ? STATE_SIGNALED : STATE_NONSIGNALED);
|
||||
|
||||
/* Reload our IRAM and DRAM */
|
||||
memswap128(iram_buf, CODEC_IRAM_ORIGIN, CODEC_IRAM_SIZE);
|
||||
|
@ -1161,7 +1161,7 @@ static bool voice_on_voice_stop(bool aborting, size_t *realsize)
|
|||
|
||||
static void* voice_request_buffer_callback(size_t *realsize, size_t reqsize)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
if (ci_voice.new_track)
|
||||
{
|
||||
|
@ -1332,7 +1332,8 @@ static void voice_thread(void)
|
|||
{
|
||||
logf("Loading voice codec");
|
||||
voice_codec_loaded = true;
|
||||
mutex_lock(&mutex_codecthread);
|
||||
semaphore_wait(&sem_codecthread);
|
||||
event_set_state(&event_codecthread, false);
|
||||
set_current_codec(CODEC_IDX_VOICE);
|
||||
dsp_configure(DSP_RESET, 0);
|
||||
voice_remaining = 0;
|
||||
|
@ -1344,9 +1345,8 @@ static void voice_thread(void)
|
|||
|
||||
logf("Voice codec finished");
|
||||
voice_codec_loaded = false;
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
voice_thread_p = NULL;
|
||||
remove_thread(NULL);
|
||||
semaphore_release(&sem_codecthread);
|
||||
} /* voice_thread */
|
||||
|
||||
#endif /* PLAYBACK_VOICE */
|
||||
|
@ -1968,7 +1968,7 @@ static bool codec_request_next_track_callback(void)
|
|||
|
||||
static void codec_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
int status;
|
||||
size_t wrap;
|
||||
|
||||
|
@ -1988,13 +1988,14 @@ static void codec_thread(void)
|
|||
LOGFQUEUE("codec > voice Q_AUDIO_PLAY");
|
||||
queue_post(&voice_queue, Q_AUDIO_PLAY, 0);
|
||||
}
|
||||
mutex_lock(&mutex_codecthread);
|
||||
semaphore_wait(&sem_codecthread);
|
||||
event_set_state(&event_codecthread, true);
|
||||
#endif
|
||||
set_current_codec(CODEC_IDX_AUDIO);
|
||||
ci.stop_codec = false;
|
||||
status = codec_load_file((const char *)ev.data, &ci);
|
||||
#ifdef PLAYBACK_VOICE
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
semaphore_release(&sem_codecthread);
|
||||
#endif
|
||||
break;
|
||||
|
||||
|
@ -2019,7 +2020,8 @@ static void codec_thread(void)
|
|||
LOGFQUEUE("codec > voice Q_AUDIO_PLAY");
|
||||
queue_post(&voice_queue, Q_AUDIO_PLAY, 0);
|
||||
}
|
||||
mutex_lock(&mutex_codecthread);
|
||||
semaphore_wait(&sem_codecthread);
|
||||
event_set_state(&event_codecthread, true);
|
||||
#endif
|
||||
set_current_codec(CODEC_IDX_AUDIO);
|
||||
ci.stop_codec = false;
|
||||
|
@ -2027,7 +2029,7 @@ static void codec_thread(void)
|
|||
status = codec_load_ram(CUR_TI->codecbuf, CUR_TI->codecsize,
|
||||
&filebuf[0], wrap, &ci);
|
||||
#ifdef PLAYBACK_VOICE
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
semaphore_release(&sem_codecthread);
|
||||
#endif
|
||||
break;
|
||||
|
||||
|
@ -2041,14 +2043,15 @@ static void codec_thread(void)
|
|||
LOGFQUEUE("codec > voice Q_ENCODER_RECORD");
|
||||
queue_post(&voice_queue, Q_ENCODER_RECORD, 0);
|
||||
}
|
||||
mutex_lock(&mutex_codecthread);
|
||||
semaphore_wait(&sem_codecthread);
|
||||
event_set_state(&event_codecthread, true);
|
||||
#endif
|
||||
logf("loading encoder");
|
||||
set_current_codec(CODEC_IDX_AUDIO);
|
||||
ci.stop_encoder = false;
|
||||
status = codec_load_file((const char *)ev.data, &ci);
|
||||
#ifdef PLAYBACK_VOICE
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
semaphore_release(&sem_codecthread);
|
||||
#endif
|
||||
logf("encoder stopped");
|
||||
break;
|
||||
|
@ -3594,13 +3597,13 @@ static bool ata_fillbuffer_callback(void)
|
|||
|
||||
static void audio_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
pcm_postinit();
|
||||
|
||||
#ifdef PLAYBACK_VOICE
|
||||
/* Unlock mutex that init stage locks before creating this thread */
|
||||
mutex_unlock(&mutex_codecthread);
|
||||
/* Unlock semaphore that init stage locks before creating this thread */
|
||||
semaphore_release(&sem_codecthread);
|
||||
|
||||
/* Buffers must be set up by now - should panic - really */
|
||||
if (buffer_state != BUFFER_STATE_INITIALIZED)
|
||||
|
@ -3764,7 +3767,9 @@ void audio_init(void)
|
|||
#ifdef PLAYBACK_VOICE
|
||||
static bool voicetagtrue = true;
|
||||
static struct mp3entry id3_voice;
|
||||
struct thread_entry *voice_thread_p = NULL;
|
||||
#endif
|
||||
struct thread_entry *audio_thread_p;
|
||||
|
||||
/* Can never do this twice */
|
||||
if (audio_is_initialized)
|
||||
|
@ -3779,11 +3784,11 @@ void audio_init(void)
|
|||
to send messages. Thread creation will be delayed however so nothing
|
||||
starts running until ready if something yields such as talk_init. */
|
||||
#ifdef PLAYBACK_VOICE
|
||||
mutex_init(&mutex_codecthread);
|
||||
/* Take ownership of lock to prevent playback of anything before audio
|
||||
hardware is initialized - audio thread unlocks it after final init
|
||||
stage */
|
||||
mutex_lock(&mutex_codecthread);
|
||||
semaphore_init(&sem_codecthread, 1, 0);
|
||||
event_init(&event_codecthread, EVENT_MANUAL | STATE_SIGNALED);
|
||||
#endif
|
||||
queue_init(&audio_queue, true);
|
||||
queue_enable_queue_send(&audio_queue, &audio_queue_sender_list);
|
||||
|
@ -3842,16 +3847,16 @@ void audio_init(void)
|
|||
talk first */
|
||||
talk_init();
|
||||
|
||||
/* Create the threads late now that we shouldn't be yielding again before
|
||||
returning */
|
||||
codec_thread_p = create_thread(
|
||||
codec_thread, codec_stack, sizeof(codec_stack),
|
||||
CREATE_THREAD_FROZEN,
|
||||
codec_thread_name IF_PRIO(, PRIORITY_PLAYBACK)
|
||||
IF_COP(, CPU, true));
|
||||
IF_COP(, CPU));
|
||||
|
||||
create_thread(audio_thread, audio_stack, sizeof(audio_stack),
|
||||
audio_thread_p = create_thread(audio_thread, audio_stack,
|
||||
sizeof(audio_stack), CREATE_THREAD_FROZEN,
|
||||
audio_thread_name IF_PRIO(, PRIORITY_BUFFERING)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
|
||||
#ifdef PLAYBACK_VOICE
|
||||
/* TODO: Change this around when various speech codecs can be used */
|
||||
|
@ -3859,9 +3864,10 @@ void audio_init(void)
|
|||
{
|
||||
logf("Starting voice codec");
|
||||
queue_init(&voice_queue, true);
|
||||
create_thread(voice_thread, voice_stack,
|
||||
sizeof(voice_stack), voice_thread_name
|
||||
IF_PRIO(, PRIORITY_PLAYBACK) IF_COP(, CPU, false));
|
||||
voice_thread_p = create_thread(voice_thread, voice_stack,
|
||||
sizeof(voice_stack), CREATE_THREAD_FROZEN,
|
||||
voice_thread_name
|
||||
IF_PRIO(, PRIORITY_PLAYBACK) IF_COP(, CPU));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3881,5 +3887,13 @@ void audio_init(void)
|
|||
#ifndef HAVE_FLASH_STORAGE
|
||||
audio_set_buffer_margin(global_settings.buffer_margin);
|
||||
#endif
|
||||
|
||||
/* it's safe to let the threads run now */
|
||||
thread_thaw(codec_thread_p);
|
||||
#ifdef PLAYBACK_VOICE
|
||||
if (voice_thread_p)
|
||||
thread_thaw(voice_thread_p);
|
||||
#endif
|
||||
thread_thaw(audio_thread_p);
|
||||
} /* audio_init */
|
||||
|
||||
|
|
|
@ -1163,7 +1163,7 @@ static int compare(const void* p1, const void* p2)
|
|||
*/
|
||||
static void playlist_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
bool dirty_pointers = false;
|
||||
static char tmp[MAX_PATH+1];
|
||||
|
||||
|
@ -1889,8 +1889,8 @@ void playlist_init(void)
|
|||
memset(playlist->filenames, 0,
|
||||
playlist->max_playlist_size * sizeof(int));
|
||||
create_thread(playlist_thread, playlist_stack, sizeof(playlist_stack),
|
||||
playlist_thread_name IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU, false));
|
||||
0, playlist_thread_name IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU));
|
||||
queue_init(&playlist_queue, true);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -141,6 +141,7 @@ static const struct plugin_api rockbox_api = {
|
|||
/* remote lcd */
|
||||
lcd_remote_set_contrast,
|
||||
lcd_remote_clear_display,
|
||||
lcd_remote_setmargins,
|
||||
lcd_remote_puts,
|
||||
lcd_remote_puts_scroll,
|
||||
lcd_remote_stop_scroll,
|
||||
|
@ -182,6 +183,9 @@ static const struct plugin_api rockbox_api = {
|
|||
|
||||
#if defined(HAVE_LCD_COLOR)
|
||||
lcd_yuv_blit,
|
||||
#endif
|
||||
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
|
||||
lcd_yuv_set_options,
|
||||
#endif
|
||||
/* list */
|
||||
gui_synclist_init,
|
||||
|
@ -202,6 +206,7 @@ static const struct plugin_api rockbox_api = {
|
|||
button_get_w_tmo,
|
||||
button_status,
|
||||
button_clear_queue,
|
||||
button_queue_count,
|
||||
#ifdef HAS_BUTTON_HOLD
|
||||
button_hold,
|
||||
#endif
|
||||
|
@ -463,6 +468,13 @@ static const struct plugin_api rockbox_api = {
|
|||
#endif
|
||||
&global_settings,
|
||||
&global_status,
|
||||
talk_disable_menus,
|
||||
talk_enable_menus,
|
||||
#if CONFIG_CODEC == SWCODEC
|
||||
codec_load_file,
|
||||
get_codec_filename,
|
||||
get_metadata,
|
||||
#endif
|
||||
mp3info,
|
||||
count_mp3_frames,
|
||||
create_xing_header,
|
||||
|
@ -491,6 +503,11 @@ static const struct plugin_api rockbox_api = {
|
|||
detect_original_firmware,
|
||||
detect_flashed_ramimage,
|
||||
detect_flashed_romimage,
|
||||
#endif
|
||||
led,
|
||||
#ifdef CACHE_FUNCTIONS_AS_CALL
|
||||
flush_icache,
|
||||
invalidate_icache,
|
||||
#endif
|
||||
/* new stuff at the end, sort into place next time
|
||||
the API gets incompatible */
|
||||
|
@ -499,27 +516,6 @@ static const struct plugin_api rockbox_api = {
|
|||
spinlock_init,
|
||||
spinlock_lock,
|
||||
spinlock_unlock,
|
||||
|
||||
codec_load_file,
|
||||
get_codec_filename,
|
||||
get_metadata,
|
||||
#endif
|
||||
led,
|
||||
|
||||
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
|
||||
lcd_yuv_set_options,
|
||||
#endif
|
||||
|
||||
#ifdef CACHE_FUNCTIONS_AS_CALL
|
||||
flush_icache,
|
||||
invalidate_icache,
|
||||
#endif
|
||||
talk_disable_menus,
|
||||
talk_enable_menus,
|
||||
|
||||
button_queue_count,
|
||||
#ifdef HAVE_REMOTE_LCD
|
||||
lcd_remote_setmargins,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -666,7 +662,11 @@ int plugin_load(const char* plugin, void* parameter)
|
|||
#endif
|
||||
lcd_remote_setmargins(rxm, rym);
|
||||
lcd_remote_clear_display();
|
||||
|
||||
|
||||
lcd_remote_update();
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
if (pfn_tsr_exit == NULL)
|
||||
|
|
|
@ -112,12 +112,12 @@
|
|||
#define PLUGIN_MAGIC 0x526F634B /* RocK */
|
||||
|
||||
/* increase this every time the api struct changes */
|
||||
#define PLUGIN_API_VERSION 82
|
||||
#define PLUGIN_API_VERSION 83
|
||||
|
||||
/* update this to latest version if a change to the api struct breaks
|
||||
backwards compatibility (and please take the opportunity to sort in any
|
||||
new function which are "waiting" at the end of the function table) */
|
||||
#define PLUGIN_MIN_API_VERSION 82
|
||||
#define PLUGIN_MIN_API_VERSION 83
|
||||
|
||||
/* plugin return codes */
|
||||
enum plugin_status {
|
||||
|
@ -219,6 +219,7 @@ struct plugin_api {
|
|||
/* remote lcd */
|
||||
void (*lcd_remote_set_contrast)(int x);
|
||||
void (*lcd_remote_clear_display)(void);
|
||||
void (*lcd_remote_setmargins)(int x, int y);
|
||||
void (*lcd_remote_puts)(int x, int y, const unsigned char *string);
|
||||
void (*lcd_remote_lcd_puts_scroll)(int x, int y, const unsigned char* string);
|
||||
void (*lcd_remote_lcd_stop_scroll)(void);
|
||||
|
@ -265,6 +266,10 @@ struct plugin_api {
|
|||
int x, int y, int width, int height);
|
||||
#endif
|
||||
|
||||
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
|
||||
void (*lcd_yuv_set_options)(unsigned options);
|
||||
#endif
|
||||
|
||||
/* list */
|
||||
void (*gui_synclist_init)(struct gui_synclist * lists,
|
||||
list_get_name callback_get_item_name,void * data,
|
||||
|
@ -288,6 +293,7 @@ struct plugin_api {
|
|||
long (*button_get_w_tmo)(int ticks);
|
||||
int (*button_status)(void);
|
||||
void (*button_clear_queue)(void);
|
||||
int (*button_queue_count)(void);
|
||||
#ifdef HAS_BUTTON_HOLD
|
||||
bool (*button_hold)(void);
|
||||
#endif
|
||||
|
@ -334,9 +340,10 @@ struct plugin_api {
|
|||
long (*default_event_handler_ex)(long event, void (*callback)(void *), void *parameter);
|
||||
struct thread_entry* threads;
|
||||
struct thread_entry* (*create_thread)(void (*function)(void), void* stack,
|
||||
int stack_size, const char *name
|
||||
int stack_size, unsigned flags,
|
||||
const char *name
|
||||
IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core, bool fallback));
|
||||
IF_COP(, unsigned int core));
|
||||
void (*remove_thread)(struct thread_entry *thread);
|
||||
void (*reset_poweroff_timer)(void);
|
||||
#ifndef SIMULATOR
|
||||
|
@ -359,7 +366,7 @@ struct plugin_api {
|
|||
void (*queue_init)(struct event_queue *q, bool register_queue);
|
||||
void (*queue_delete)(struct event_queue *q);
|
||||
void (*queue_post)(struct event_queue *q, long id, intptr_t data);
|
||||
void (*queue_wait_w_tmo)(struct event_queue *q, struct event *ev,
|
||||
void (*queue_wait_w_tmo)(struct event_queue *q, struct queue_event *ev,
|
||||
int ticks);
|
||||
void (*usb_acknowledge)(long id);
|
||||
#ifdef RB_PROFILE
|
||||
|
@ -572,6 +579,13 @@ struct plugin_api {
|
|||
#endif
|
||||
struct user_settings* global_settings;
|
||||
struct system_status *global_status;
|
||||
void (*talk_disable_menus)(void);
|
||||
void (*talk_enable_menus)(void);
|
||||
#if CONFIG_CODEC == SWCODEC
|
||||
int (*codec_load_file)(const char* codec, struct codec_api *api);
|
||||
const char *(*get_codec_filename)(int cod_spec);
|
||||
bool (*get_metadata)(struct mp3entry* id3, int fd, const char* trackname);
|
||||
#endif
|
||||
bool (*mp3info)(struct mp3entry *entry, const char *filename);
|
||||
int (*count_mp3_frames)(int fd, int startpos, int filesize,
|
||||
void (*progressfunc)(int));
|
||||
|
@ -609,35 +623,21 @@ struct plugin_api {
|
|||
bool (*detect_flashed_ramimage)(void);
|
||||
bool (*detect_flashed_romimage)(void);
|
||||
#endif
|
||||
/* new stuff at the end, sort into place next time
|
||||
the API gets incompatible */
|
||||
|
||||
#if (CONFIG_CODEC == SWCODEC)
|
||||
void (*spinlock_init)(struct mutex *m);
|
||||
void (*spinlock_lock)(struct mutex *m);
|
||||
void (*spinlock_unlock)(struct mutex *m);
|
||||
|
||||
int (*codec_load_file)(const char* codec, struct codec_api *api);
|
||||
const char *(*get_codec_filename)(int cod_spec);
|
||||
bool (*get_metadata)(struct mp3entry* id3, int fd, const char* trackname);
|
||||
#endif
|
||||
void (*led)(bool on);
|
||||
|
||||
#if defined(TOSHIBA_GIGABEAT_F) || defined(SANSA_E200)
|
||||
void (*lcd_yuv_set_options)(unsigned options);
|
||||
#endif
|
||||
|
||||
#ifdef CACHE_FUNCTIONS_AS_CALL
|
||||
void (*flush_icache)(void);
|
||||
void (*invalidate_icache)(void);
|
||||
#endif
|
||||
|
||||
void (*talk_disable_menus)(void);
|
||||
void (*talk_enable_menus)(void);
|
||||
/* new stuff at the end, sort into place next time
|
||||
the API gets incompatible */
|
||||
|
||||
int (*button_queue_count)(void);
|
||||
#ifdef HAVE_REMOTE_LCD
|
||||
void (*lcd_remote_setmargins)(int x, int y);
|
||||
#if (CONFIG_CODEC == SWCODEC)
|
||||
void (*spinlock_init)(struct spinlock *l IF_COP(, unsigned int flags));
|
||||
void (*spinlock_lock)(struct spinlock *l);
|
||||
void (*spinlock_unlock)(struct spinlock *l);
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -1167,9 +1167,9 @@ int main(void* parameter)
|
|||
|
||||
rb->memset(&gTread, 0, sizeof(gTread));
|
||||
gTread.foreground = true;
|
||||
rb->create_thread(thread, stack, stacksize, "CDC"
|
||||
rb->create_thread(thread, stack, stacksize, 0, "CDC"
|
||||
IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
|
||||
#ifdef DEBUG
|
||||
do
|
||||
|
|
|
@ -215,7 +215,7 @@ void thread(void)
|
|||
#endif
|
||||
long sleep_time = 5 * HZ;
|
||||
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
buffelements = sizeof(bat)/sizeof(struct batt_info);
|
||||
|
||||
|
@ -500,9 +500,9 @@ int main(void)
|
|||
|
||||
rb->queue_init(&thread_q, true); /* put the thread's queue in the bcast list */
|
||||
if(rb->create_thread(thread, thread_stack,
|
||||
sizeof(thread_stack), "Battery Benchmark"
|
||||
sizeof(thread_stack), 0, "Battery Benchmark"
|
||||
IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU, false)) == NULL)
|
||||
IF_COP(, CPU)) == NULL)
|
||||
{
|
||||
rb->splash(HZ, "Cannot create thread!");
|
||||
return PLUGIN_ERROR;
|
||||
|
|
|
@ -189,11 +189,11 @@ typedef struct
|
|||
{
|
||||
struct thread_entry *thread; /* Stream's thread */
|
||||
int status; /* Current stream status */
|
||||
struct event ev; /* Event sent to steam */
|
||||
struct queue_event ev; /* Event sent to steam */
|
||||
int have_msg; /* 1=event pending */
|
||||
int replied; /* 1=replied to last event */
|
||||
int reply; /* reply value */
|
||||
struct mutex msg_lock; /* serialization for event senders */
|
||||
struct spinlock msg_lock; /* serialization for event senders */
|
||||
uint8_t* curr_packet; /* Current stream packet beginning */
|
||||
uint8_t* curr_packet_end; /* Current stream packet end */
|
||||
|
||||
|
@ -256,7 +256,7 @@ static void str_wait_msg(Stream *str)
|
|||
|
||||
/* Returns a message waiting or blocks until one is available - removes the
|
||||
event */
|
||||
static void str_get_msg(Stream *str, struct event *ev)
|
||||
static void str_get_msg(Stream *str, struct queue_event *ev)
|
||||
{
|
||||
str_wait_msg(str);
|
||||
ev->id = str->ev.id;
|
||||
|
@ -266,7 +266,7 @@ static void str_get_msg(Stream *str, struct event *ev)
|
|||
|
||||
/* Peeks at the current message without blocking, returns the data but
|
||||
does not remove the event */
|
||||
static bool str_look_msg(Stream *str, struct event *ev)
|
||||
static bool str_look_msg(Stream *str, struct queue_event *ev)
|
||||
{
|
||||
if (!str_have_msg(str))
|
||||
return false;
|
||||
|
@ -345,9 +345,9 @@ static size_t file_remaining IBSS_ATTR;
|
|||
|
||||
#if NUM_CORES > 1
|
||||
/* Some stream variables are shared between cores */
|
||||
struct mutex stream_lock IBSS_ATTR;
|
||||
struct spinlock stream_lock IBSS_ATTR;
|
||||
static inline void init_stream_lock(void)
|
||||
{ rb->spinlock_init(&stream_lock); }
|
||||
{ rb->spinlock_init(&stream_lock, SPINLOCK_TASK_SWITCH); }
|
||||
static inline void lock_stream(void)
|
||||
{ rb->spinlock_lock(&stream_lock); }
|
||||
static inline void unlock_stream(void)
|
||||
|
@ -1050,7 +1050,7 @@ static int button_loop(void)
|
|||
|
||||
if (str_have_msg(&audio_str))
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
str_get_msg(&audio_str, &ev);
|
||||
|
||||
if (ev.id == STREAM_QUIT)
|
||||
|
@ -1375,7 +1375,7 @@ static void audio_thread(void)
|
|||
{
|
||||
if (str_have_msg(&audio_str))
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
str_look_msg(&audio_str, &ev);
|
||||
|
||||
if (ev.id == STREAM_QUIT)
|
||||
|
@ -1498,7 +1498,7 @@ static uint32_t video_stack[VIDEO_STACKSIZE / sizeof(uint32_t)] IBSS_ATTR;
|
|||
|
||||
static void video_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
const mpeg2_info_t * info;
|
||||
mpeg2_state_t state;
|
||||
char str[80];
|
||||
|
@ -1929,9 +1929,8 @@ void display_thumb(int in_file)
|
|||
video_str.status = STREAM_PLAYING;
|
||||
|
||||
if ((video_str.thread = rb->create_thread(video_thread,
|
||||
(uint8_t*)video_stack,VIDEO_STACKSIZE,"mpgvideo"
|
||||
IF_PRIO(,PRIORITY_PLAYBACK)
|
||||
IF_COP(, COP, true))) == NULL)
|
||||
(uint8_t*)video_stack,VIDEO_STACKSIZE, 0,"mpgvideo"
|
||||
IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, COP))) == NULL)
|
||||
{
|
||||
rb->splash(HZ, "Cannot create video thread!");
|
||||
}
|
||||
|
@ -2354,8 +2353,8 @@ enum plugin_status plugin_start(struct plugin_api* api, void* parameter)
|
|||
initialize_stream( &video_str, disk_buf_start, disk_buf_len, 0xe0 );
|
||||
initialize_stream( &audio_str, disk_buf_start, disk_buf_len, 0xc0 );
|
||||
|
||||
rb->spinlock_init(&audio_str.msg_lock);
|
||||
rb->spinlock_init(&video_str.msg_lock);
|
||||
rb->spinlock_init(&audio_str.msg_lock IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
rb->spinlock_init(&video_str.msg_lock IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
|
||||
audio_str.status = STREAM_BUFFERING;
|
||||
video_str.status = STREAM_PLAYING;
|
||||
|
@ -2372,14 +2371,14 @@ enum plugin_status plugin_start(struct plugin_api* api, void* parameter)
|
|||
|
||||
/* We put the video thread on the second processor for multi-core targets. */
|
||||
if ((video_str.thread = rb->create_thread(video_thread,
|
||||
(uint8_t*)video_stack,VIDEO_STACKSIZE,"mpgvideo" IF_PRIO(,PRIORITY_PLAYBACK)
|
||||
IF_COP(, COP, true))) == NULL)
|
||||
(uint8_t*)video_stack, VIDEO_STACKSIZE, 0,
|
||||
"mpgvideo" IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, COP))) == NULL)
|
||||
{
|
||||
rb->splash(HZ, "Cannot create video thread!");
|
||||
}
|
||||
else if ((audio_str.thread = rb->create_thread(audio_thread,
|
||||
(uint8_t*)audio_stack,AUDIO_STACKSIZE,"mpgaudio" IF_PRIO(,PRIORITY_PLAYBACK)
|
||||
IF_COP(, CPU, false))) == NULL)
|
||||
(uint8_t*)audio_stack,AUDIO_STACKSIZE, 0,"mpgaudio"
|
||||
IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, CPU))) == NULL)
|
||||
{
|
||||
rb->splash(HZ, "Cannot create audio thread!");
|
||||
}
|
||||
|
|
|
@ -584,8 +584,8 @@ static enum plugin_status test_track(char* filename)
|
|||
codec_playing = true;
|
||||
|
||||
if ((codecthread_id = rb->create_thread(codec_thread,
|
||||
(uint8_t*)codec_stack, codec_stack_size, "testcodec" IF_PRIO(,PRIORITY_PLAYBACK)
|
||||
IF_COP(, CPU, false))) == NULL)
|
||||
(uint8_t*)codec_stack, codec_stack_size, 0, "testcodec"
|
||||
IF_PRIO(,PRIORITY_PLAYBACK) IF_COP(, CPU))) == NULL)
|
||||
{
|
||||
log_text("Cannot create codec thread!",true);
|
||||
goto exit;
|
||||
|
|
|
@ -153,7 +153,7 @@ void usb_screen(void)
|
|||
#ifdef HAVE_MMC
|
||||
int mmc_remove_request(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
int i;
|
||||
FOR_NB_SCREENS(i)
|
||||
screens[i].clear_display();
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
|
||||
#ifndef __PCTOOL__
|
||||
/* Tag Cache thread. */
|
||||
static struct event_queue tagcache_queue;
|
||||
static struct event_queue tagcache_queue NOCACHEBSS_ATTR;
|
||||
static long tagcache_stack[(DEFAULT_STACK_SIZE + 0x4000)/sizeof(long)];
|
||||
static const char tagcache_thread_name[] = "tagcache";
|
||||
#endif
|
||||
|
@ -152,7 +152,7 @@ struct tagcache_command_entry {
|
|||
static struct tagcache_command_entry command_queue[TAGCACHE_COMMAND_QUEUE_LENGTH];
|
||||
static volatile int command_queue_widx = 0;
|
||||
static volatile int command_queue_ridx = 0;
|
||||
static struct mutex command_queue_mutex;
|
||||
static struct mutex command_queue_mutex NOCACHEBSS_ATTR;
|
||||
/* Timestamp of the last added event, so we can wait a bit before committing the
|
||||
* whole queue at once. */
|
||||
static long command_queue_timestamp = 0;
|
||||
|
@ -3377,7 +3377,7 @@ static bool delete_entry(long idx_id)
|
|||
*/
|
||||
static bool check_event_queue(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
queue_wait_w_tmo(&tagcache_queue, &ev, 0);
|
||||
switch (ev.id)
|
||||
|
@ -3972,7 +3972,7 @@ void tagcache_unload_ramcache(void)
|
|||
#ifndef __PCTOOL__
|
||||
static void tagcache_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
bool check_done = false;
|
||||
|
||||
/* If the previous cache build/update was interrupted, commit
|
||||
|
@ -4176,9 +4176,9 @@ void tagcache_init(void)
|
|||
mutex_init(&command_queue_mutex);
|
||||
queue_init(&tagcache_queue, true);
|
||||
create_thread(tagcache_thread, tagcache_stack,
|
||||
sizeof(tagcache_stack), tagcache_thread_name
|
||||
sizeof(tagcache_stack), 0, tagcache_thread_name
|
||||
IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
#else
|
||||
tc_stat.initialized = true;
|
||||
allocate_tempbuf();
|
||||
|
|
|
@ -95,7 +95,7 @@ const signed char backlight_timeout_value[19] =
|
|||
static void backlight_thread(void);
|
||||
static long backlight_stack[DEFAULT_STACK_SIZE/sizeof(long)];
|
||||
static const char backlight_thread_name[] = "backlight";
|
||||
static struct event_queue backlight_queue;
|
||||
static struct event_queue backlight_queue NOCACHEBSS_ATTR;
|
||||
|
||||
static int backlight_timer;
|
||||
static int backlight_timeout;
|
||||
|
@ -465,7 +465,7 @@ static void remote_backlight_update_state(void)
|
|||
|
||||
void backlight_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
bool locked = false;
|
||||
|
||||
while(1)
|
||||
|
@ -627,9 +627,9 @@ void backlight_init(void)
|
|||
* status if necessary. */
|
||||
|
||||
create_thread(backlight_thread, backlight_stack,
|
||||
sizeof(backlight_stack), backlight_thread_name
|
||||
sizeof(backlight_stack), 0, backlight_thread_name
|
||||
IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
tick_add_task(backlight_tick);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ static unsigned long reserve_used = 0;
|
|||
static unsigned int cache_build_ticks = 0;
|
||||
static char dircache_cur_path[MAX_PATH*2];
|
||||
|
||||
static struct event_queue dircache_queue;
|
||||
static struct event_queue dircache_queue NOCACHEBSS_ATTR;
|
||||
static long dircache_stack[(DEFAULT_STACK_SIZE + 0x900)/sizeof(long)];
|
||||
static const char dircache_thread_name[] = "dircache";
|
||||
|
||||
|
@ -147,7 +147,7 @@ static struct travel_data dir_recursion[MAX_SCAN_DEPTH];
|
|||
*/
|
||||
static bool check_event_queue(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
queue_wait_w_tmo(&dircache_queue, &ev, 0);
|
||||
switch (ev.id)
|
||||
|
@ -598,7 +598,7 @@ static int dircache_do_rebuild(void)
|
|||
*/
|
||||
static void dircache_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
while (1)
|
||||
{
|
||||
|
@ -701,8 +701,9 @@ void dircache_init(void)
|
|||
|
||||
queue_init(&dircache_queue, true);
|
||||
create_thread(dircache_thread, dircache_stack,
|
||||
sizeof(dircache_stack), dircache_thread_name IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU, false));
|
||||
sizeof(dircache_stack), 0, dircache_thread_name
|
||||
IF_PRIO(, PRIORITY_BACKGROUND)
|
||||
IF_COP(, CPU));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
#define ATA_POWER_OFF_TIMEOUT 2*HZ
|
||||
#endif
|
||||
|
||||
static struct mutex ata_mtx;
|
||||
static struct spinlock ata_spinlock NOCACHEBSS_ATTR;
|
||||
int ata_device; /* device 0 (master) or 1 (slave) */
|
||||
|
||||
int ata_spinup_time = 0;
|
||||
|
@ -83,7 +83,7 @@ static bool lba48 = false; /* set for 48 bit addressing */
|
|||
#endif
|
||||
static long ata_stack[(DEFAULT_STACK_SIZE*3)/sizeof(long)];
|
||||
static const char ata_thread_name[] = "ata";
|
||||
static struct event_queue ata_queue;
|
||||
static struct event_queue ata_queue NOCACHEBSS_ATTR;
|
||||
static bool initialized = false;
|
||||
|
||||
static long last_user_activity = -1;
|
||||
|
@ -234,7 +234,7 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
#ifdef HAVE_MULTIVOLUME
|
||||
(void)drive; /* unused for now */
|
||||
#endif
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
#endif
|
||||
|
||||
last_disk_activity = current_tick;
|
||||
|
@ -246,14 +246,14 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
spinup = true;
|
||||
if (poweroff) {
|
||||
if (ata_power_on()) {
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (perform_soft_reset()) {
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -1;
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
SET_REG(ATA_SELECT, ata_device);
|
||||
if (!wait_for_rdy())
|
||||
{
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -2;
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
ata_led(false);
|
||||
|
||||
#ifndef MAX_PHYS_SECTOR_SIZE
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
|
@ -442,7 +442,7 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
#ifdef HAVE_MULTIVOLUME
|
||||
(void)drive; /* unused for now */
|
||||
#endif
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
#endif
|
||||
|
||||
last_disk_activity = current_tick;
|
||||
|
@ -454,14 +454,14 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
spinup = true;
|
||||
if (poweroff) {
|
||||
if (ata_power_on()) {
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (perform_soft_reset()) {
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -1;
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
SET_REG(ATA_SELECT, ata_device);
|
||||
if (!wait_for_rdy())
|
||||
{
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
ata_led(false);
|
||||
return -2;
|
||||
}
|
||||
|
@ -534,7 +534,7 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
ata_led(false);
|
||||
|
||||
#ifndef MAX_PHYS_SECTOR_SIZE
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
|
@ -580,7 +580,7 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
#ifdef HAVE_MULTIVOLUME
|
||||
(void)drive; /* unused for now */
|
||||
#endif
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
|
||||
offset = start & (phys_sector_mult - 1);
|
||||
|
||||
|
@ -630,7 +630,7 @@ int ata_read_sectors(IF_MV2(int drive,)
|
|||
}
|
||||
|
||||
error:
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -646,7 +646,7 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
#ifdef HAVE_MULTIVOLUME
|
||||
(void)drive; /* unused for now */
|
||||
#endif
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
|
||||
offset = start & (phys_sector_mult - 1);
|
||||
|
||||
|
@ -707,7 +707,7 @@ int ata_write_sectors(IF_MV2(int drive,)
|
|||
}
|
||||
|
||||
error:
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -767,13 +767,13 @@ static int ata_perform_sleep(void)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
|
||||
SET_REG(ATA_SELECT, ata_device);
|
||||
|
||||
if(!wait_for_rdy()) {
|
||||
DEBUGF("ata_perform_sleep() - not RDY\n");
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -786,7 +786,7 @@ static int ata_perform_sleep(void)
|
|||
}
|
||||
|
||||
sleeping = true;
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -797,7 +797,7 @@ void ata_sleep(void)
|
|||
|
||||
void ata_sleepnow(void)
|
||||
{
|
||||
if (!spinup && !sleeping && !ata_mtx.locked && initialized)
|
||||
if (!spinup && !sleeping && !ata_spinlock.locked && initialized)
|
||||
{
|
||||
call_ata_idle_notifys(false);
|
||||
ata_perform_sleep();
|
||||
|
@ -812,14 +812,14 @@ void ata_spin(void)
|
|||
static void ata_thread(void)
|
||||
{
|
||||
static long last_sleep = 0;
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
static long last_seen_mtx_unlock = 0;
|
||||
|
||||
while (1) {
|
||||
while ( queue_empty( &ata_queue ) ) {
|
||||
if (!spinup && !sleeping)
|
||||
{
|
||||
if (!ata_mtx.locked)
|
||||
if (!ata_spinlock.locked)
|
||||
{
|
||||
if (!last_seen_mtx_unlock)
|
||||
last_seen_mtx_unlock = current_tick;
|
||||
|
@ -844,9 +844,9 @@ static void ata_thread(void)
|
|||
if ( !spinup && sleeping && !poweroff &&
|
||||
TIME_AFTER( current_tick, last_sleep + ATA_POWER_OFF_TIMEOUT ))
|
||||
{
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
ide_power_enable(false);
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
poweroff = true;
|
||||
}
|
||||
#endif
|
||||
|
@ -858,11 +858,11 @@ static void ata_thread(void)
|
|||
#ifndef USB_NONE
|
||||
case SYS_USB_CONNECTED:
|
||||
if (poweroff) {
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
ata_led(true);
|
||||
ata_power_on();
|
||||
ata_led(false);
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
}
|
||||
|
||||
/* Tell the USB thread that we are safe */
|
||||
|
@ -936,11 +936,11 @@ int ata_soft_reset(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
spinlock_lock(&ata_mtx);
|
||||
spinlock_lock(&ata_spinlock);
|
||||
|
||||
ret = perform_soft_reset();
|
||||
|
||||
spinlock_unlock(&ata_mtx);
|
||||
spinlock_unlock(&ata_spinlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1131,7 +1131,7 @@ int ata_init(void)
|
|||
bool coldstart = ata_is_coldstart();
|
||||
/* must be called before ata_device_init() */
|
||||
|
||||
spinlock_init(&ata_mtx);
|
||||
spinlock_init(&ata_spinlock IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
|
||||
ata_led(false);
|
||||
ata_device_init();
|
||||
|
@ -1205,9 +1205,9 @@ int ata_init(void)
|
|||
|
||||
last_disk_activity = current_tick;
|
||||
create_thread(ata_thread, ata_stack,
|
||||
sizeof(ata_stack), ata_thread_name
|
||||
sizeof(ata_stack), 0, ata_thread_name
|
||||
IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
initialized = true;
|
||||
|
||||
}
|
||||
|
|
|
@ -959,7 +959,7 @@ void ata_spin(void)
|
|||
|
||||
static void mmc_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
bool idle_notified = false;
|
||||
|
||||
while (1) {
|
||||
|
@ -1153,8 +1153,9 @@ int ata_init(void)
|
|||
|
||||
queue_init(&mmc_queue, true);
|
||||
create_thread(mmc_thread, mmc_stack,
|
||||
sizeof(mmc_stack), mmc_thread_name IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
sizeof(mmc_stack), 0, mmc_thread_name
|
||||
IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU));
|
||||
tick_add_task(mmc_tick);
|
||||
initialized = true;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define MAX_EVENT_AGE HZ
|
||||
#endif
|
||||
|
||||
struct event_queue button_queue;
|
||||
struct event_queue button_queue NOCACHEBSS_ATTR;
|
||||
|
||||
static long lastbtn; /* Last valid button status */
|
||||
static long last_read; /* Last button status, for debouncing/filtering */
|
||||
|
@ -300,7 +300,7 @@ int button_queue_count( void )
|
|||
|
||||
long button_get(bool block)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
int pending_count = queue_count(&button_queue);
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
|
@ -330,7 +330,7 @@ long button_get(bool block)
|
|||
|
||||
long button_get_w_tmo(int ticks)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
/* Be sure to keep boosted state. */
|
||||
|
|
|
@ -197,7 +197,7 @@ struct fat_cache_entry
|
|||
|
||||
static char fat_cache_sectors[FAT_CACHE_SIZE][SECTOR_SIZE];
|
||||
static struct fat_cache_entry fat_cache[FAT_CACHE_SIZE];
|
||||
static struct mutex cache_mutex;
|
||||
static struct mutex cache_mutex NOCACHEBSS_ATTR;
|
||||
|
||||
static long cluster2sec(IF_MV2(struct bpb* fat_bpb,) long cluster)
|
||||
{
|
||||
|
|
|
@ -282,9 +282,13 @@
|
|||
#define HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
#endif
|
||||
|
||||
#if (CONFIG_CODEC == SWCODEC) && !defined(SIMULATOR) && !defined(BOOTLOADER)
|
||||
#if (CONFIG_CODEC == SWCODEC) && !defined(BOOTLOADER)
|
||||
#ifndef SIMULATOR
|
||||
#define HAVE_PRIORITY_SCHEDULING
|
||||
#define HAVE_SCHEDULER_BOOSTCTRL
|
||||
#endif /* SIMULATOR */
|
||||
#define HAVE_SEMAPHORE_OBJECTS
|
||||
#define HAVE_EVENT_OBJECTS
|
||||
#endif
|
||||
|
||||
/* define for all cpus from SH family */
|
||||
|
@ -363,31 +367,70 @@
|
|||
#define IRAM_LCDFRAMEBUFFER
|
||||
#endif
|
||||
|
||||
/* Change this if you want to build a single-core firmware for a multicore
|
||||
* target for debugging */
|
||||
#if defined(BOOTLOADER)
|
||||
#define FORCE_SINGLE_CORE
|
||||
#endif
|
||||
|
||||
/* Core locking types - specifies type of atomic operation */
|
||||
#define CORELOCK_NONE 0
|
||||
#define SW_CORELOCK 1 /* Mutual exclusion provided by a software algorithm
|
||||
and not a special semaphore instruction */
|
||||
#define CORELOCK_SWAP 2 /* A swap (exchange) instruction */
|
||||
|
||||
/* Dual core support - not yet working on the 1G/2G and 3G iPod */
|
||||
#if defined(CPU_PP)
|
||||
#define IDLE_STACK_SIZE 0x80
|
||||
#define IDLE_STACK_WORDS 0x20
|
||||
|
||||
#if !defined(BOOTLOADER) && CONFIG_CPU != PP5002
|
||||
#if !defined(FORCE_SINGLE_CORE) && CONFIG_CPU != PP5002
|
||||
|
||||
#define NUM_CORES 2
|
||||
#define CURRENT_CORE current_core()
|
||||
/* Hopefully at some point we will learn how to mark areas of main memory as
|
||||
* not to be cached. Until then, use IRAM for variables shared across cores */
|
||||
/* Use IRAM for variables shared across cores - large memory buffers should
|
||||
* use UNCACHED_ADDR(a) and be appropriately aligned and padded */
|
||||
#define NOCACHEBSS_ATTR IBSS_ATTR
|
||||
#define NOCACHEDATA_ATTR IDATA_ATTR
|
||||
|
||||
#define IF_COP(...) __VA_ARGS__
|
||||
#define IF_COP(...) __VA_ARGS__
|
||||
#define IF_COP_VOID(...) __VA_ARGS__
|
||||
#define IF_COP_CORE(core) core
|
||||
|
||||
#if CONFIG_CPU == PP5020
|
||||
#define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
|
||||
#else
|
||||
#define CONFIG_CORELOCK CORELOCK_SWAP
|
||||
#endif
|
||||
|
||||
#endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */
|
||||
|
||||
#endif /* CPU_PP */
|
||||
|
||||
#ifndef CONFIG_CORELOCK
|
||||
#define CONFIG_CORELOCK CORELOCK_NONE
|
||||
#endif
|
||||
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
#define IF_SWCL(...) __VA_ARGS__
|
||||
#define IFN_SWCL(...)
|
||||
#else
|
||||
#define IF_SWCL(...)
|
||||
#define IFN_SWCL(...) __VA_ARGS__
|
||||
#endif /* CONFIG_CORELOCK == */
|
||||
|
||||
#ifndef NUM_CORES
|
||||
/* Default to single core */
|
||||
#define NUM_CORES 1
|
||||
#define CURRENT_CORE CPU
|
||||
#define NOCACHEBSS_ATTR
|
||||
#define NOCACHEDATA_ATTR
|
||||
#define CONFIG_CORELOCK CORELOCK_NONE
|
||||
|
||||
#define IF_COP(...)
|
||||
#define IF_COP_VOID(...) void
|
||||
#define IF_COP_CORE(core) CURRENT_CORE
|
||||
|
||||
#endif /* NUM_CORES */
|
||||
|
||||
#endif /* __CONFIG_H__ */
|
||||
|
|
|
@ -45,6 +45,10 @@
|
|||
|
||||
/* TODO: Fully implement i2c driver */
|
||||
|
||||
/* To be used by drivers that need to do multiple i2c operations
|
||||
atomically */
|
||||
extern struct spinlock i2c_spin;
|
||||
|
||||
void i2c_init(void);
|
||||
int i2c_readbyte(unsigned int dev_addr, int addr);
|
||||
int pp_i2c_send(unsigned int addr, int data0, int data1);
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <inttypes.h>
|
||||
#include "config.h"
|
||||
|
||||
#include "thread.h"
|
||||
|
||||
/* wrap-safe macros for tick comparison */
|
||||
#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
|
||||
#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
|
||||
|
@ -31,6 +33,7 @@
|
|||
|
||||
#define MAX_NUM_TICK_TASKS 8
|
||||
|
||||
#define MAX_NUM_QUEUES 32
|
||||
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
|
||||
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
|
||||
|
||||
|
@ -72,7 +75,7 @@
|
|||
#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
|
||||
#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
|
||||
|
||||
struct event
|
||||
struct queue_event
|
||||
{
|
||||
long id;
|
||||
intptr_t data;
|
||||
|
@ -91,21 +94,67 @@ struct queue_sender_list
|
|||
|
||||
struct event_queue
|
||||
{
|
||||
struct event events[QUEUE_LENGTH];
|
||||
struct thread_entry *thread;
|
||||
unsigned int read;
|
||||
unsigned int write;
|
||||
struct thread_queue queue; /* Waiter list */
|
||||
struct queue_event events[QUEUE_LENGTH]; /* list of events */
|
||||
unsigned int read; /* head of queue */
|
||||
unsigned int write; /* tail of queue */
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
struct queue_sender_list *send;
|
||||
struct queue_sender_list *send; /* list of threads waiting for
|
||||
reply to an event */
|
||||
#endif
|
||||
#if NUM_CORES > 1
|
||||
struct corelock cl; /* inter-core sync */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct mutex
|
||||
{
|
||||
uint32_t locked;
|
||||
struct thread_entry *thread;
|
||||
struct thread_entry *queue; /* Waiter list */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock cl; /* inter-core sync */
|
||||
#endif
|
||||
struct thread_entry *thread; /* thread that owns lock */
|
||||
int count; /* lock owner recursion count */
|
||||
unsigned char locked; /* locked semaphore */
|
||||
};
|
||||
|
||||
struct spinlock
|
||||
{
|
||||
#if NUM_CORES > 1
|
||||
struct corelock cl; /* inter-core sync */
|
||||
#endif
|
||||
struct thread_entry *thread; /* lock owner */
|
||||
int count; /* lock owner recursion count */
|
||||
unsigned char locked; /* is locked if nonzero */
|
||||
#if NUM_CORES > 1
|
||||
unsigned char task_switch; /* can task switch? */
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef HAVE_SEMAPHORE_OBJECTS
|
||||
struct semaphore
|
||||
{
|
||||
struct thread_entry *queue; /* Waiter list */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock cl; /* inter-core sync */
|
||||
#endif
|
||||
int count; /* # of waits remaining before unsignaled */
|
||||
int max; /* maximum # of waits to remain signaled */
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_EVENT_OBJECTS
|
||||
struct event
|
||||
{
|
||||
struct thread_entry *queues[2]; /* waiters for each state */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock cl; /* inter-core sync */
|
||||
#endif
|
||||
unsigned char automatic; /* event performs auto-reset */
|
||||
unsigned char state; /* state: 1 = signaled */
|
||||
};
|
||||
#endif
|
||||
|
||||
/* global tick variable */
|
||||
#if defined(CPU_PP) && defined(BOOTLOADER)
|
||||
/* We don't enable interrupts in the iPod bootloader, so we need to fake
|
||||
|
@ -127,6 +176,7 @@ extern void yield(void);
|
|||
extern void sleep(int ticks);
|
||||
int tick_add_task(void (*f)(void));
|
||||
int tick_remove_task(void (*f)(void));
|
||||
extern void tick_start(unsigned int interval_in_ms);
|
||||
|
||||
struct timeout;
|
||||
|
||||
|
@ -150,10 +200,17 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
|||
int ticks, intptr_t data);
|
||||
void timeout_cancel(struct timeout *tmo);
|
||||
|
||||
#define STATE_NONSIGNALED 0
|
||||
#define STATE_SIGNALED 1
|
||||
|
||||
#define WAIT_TIMEDOUT (-1)
|
||||
#define WAIT_SUCCEEDED 1
|
||||
|
||||
extern void queue_init(struct event_queue *q, bool register_queue);
|
||||
extern void queue_delete(struct event_queue *q);
|
||||
extern void queue_wait(struct event_queue *q, struct event *ev);
|
||||
extern void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks);
|
||||
extern void queue_wait(struct event_queue *q, struct queue_event *ev);
|
||||
extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
|
||||
int ticks);
|
||||
extern void queue_post(struct event_queue *q, long id, intptr_t data);
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
extern void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send);
|
||||
|
@ -168,14 +225,26 @@ extern int queue_count(const struct event_queue *q);
|
|||
extern int queue_broadcast(long id, intptr_t data);
|
||||
|
||||
extern void mutex_init(struct mutex *m);
|
||||
static inline void spinlock_init(struct mutex *m)
|
||||
{ mutex_init(m); } /* Same thing for now */
|
||||
extern void mutex_lock(struct mutex *m);
|
||||
extern void mutex_unlock(struct mutex *m);
|
||||
extern void spinlock_lock(struct mutex *m);
|
||||
extern void spinlock_unlock(struct mutex *m);
|
||||
extern void tick_start(unsigned int interval_in_ms);
|
||||
|
||||
#define SPINLOCK_TASK_SWITCH 0x10
|
||||
#define SPINLOCK_NO_TASK_SWITCH 0x00
|
||||
extern void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags));
|
||||
extern void spinlock_lock(struct spinlock *l);
|
||||
extern void spinlock_unlock(struct spinlock *l);
|
||||
extern int spinlock_lock_w_tmo(struct spinlock *l, int ticks);
|
||||
#ifdef HAVE_SEMAPHORE_OBJECTS
|
||||
extern void semaphore_init(struct semaphore *s, int max, int start);
|
||||
extern void semaphore_wait(struct semaphore *s);
|
||||
extern void semaphore_release(struct semaphore *s);
|
||||
#endif /* HAVE_SEMAPHORE_OBJECTS */
|
||||
#ifdef HAVE_EVENT_OBJECTS
|
||||
#define EVENT_AUTOMATIC 0x10
|
||||
#define EVENT_MANUAL 0x00
|
||||
extern void event_init(struct event *e, unsigned int flags);
|
||||
extern void event_wait(struct event *e, unsigned int for_state);
|
||||
extern void event_set_state(struct event *e, unsigned int state);
|
||||
#endif /* HAVE_EVENT_OBJECTS */
|
||||
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
|
||||
|
||||
#endif
|
||||
#endif /* _KERNEL_H_ */
|
||||
|
|
|
@ -139,6 +139,8 @@
|
|||
#define CPU_CTL (*(volatile unsigned char *)(0xcf004054))
|
||||
#define COP_CTL (*(volatile unsigned char *)(0xcf004058))
|
||||
|
||||
#define PROC_CTL(core) ((&CPU_CTL)[(core)*4])
|
||||
|
||||
#define PROC_SLEEP 0xca
|
||||
#define PROC_WAKE 0xce
|
||||
|
||||
|
|
|
@ -34,11 +34,15 @@
|
|||
/* Each processor has two mailboxes it can write to and two which
|
||||
it can read from. We define the first to be for sending messages
|
||||
and the second for replying to messages */
|
||||
#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000))
|
||||
#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004))
|
||||
#define CPU_REPLY (*(volatile unsigned long *)(0x60001008))
|
||||
#define COP_REPLY (*(volatile unsigned long *)(0x6000100c))
|
||||
#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010))
|
||||
#define CPU_MESSAGE (*(volatile unsigned long *)(0x60001000))
|
||||
#define COP_MESSAGE (*(volatile unsigned long *)(0x60001004))
|
||||
#define CPU_REPLY (*(volatile unsigned long *)(0x60001008))
|
||||
#define COP_REPLY (*(volatile unsigned long *)(0x6000100c))
|
||||
#define MBOX_CONTROL (*(volatile unsigned long *)(0x60001010))
|
||||
|
||||
/* Simple convenient array-like access */
|
||||
#define PROC_MESSAGE(core) ((&CPU_MESSAGE)[core])
|
||||
#define PROC_REPLY(core) ((&CPU_REPLY)[core])
|
||||
|
||||
/* Interrupts */
|
||||
#define CPU_INT_STAT (*(volatile unsigned long*)(0x60004000))
|
||||
|
@ -142,6 +146,7 @@
|
|||
/* Processors Control */
|
||||
#define CPU_CTL (*(volatile unsigned long *)(0x60007000))
|
||||
#define COP_CTL (*(volatile unsigned long *)(0x60007004))
|
||||
#define PROC_CTL(core) ((&CPU_CTL)[core])
|
||||
|
||||
#define PROC_SLEEP 0x80000000
|
||||
#define PROC_WAIT 0x40000000
|
||||
|
|
|
@ -45,6 +45,10 @@ bool detect_original_firmware(void);
|
|||
#endif
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
#if NUM_CORES > 1
|
||||
extern struct spinlock boostctrl_spin;
|
||||
#endif
|
||||
void cpu_boost_init(void);
|
||||
#define FREQ cpu_frequency
|
||||
void set_cpu_frequency(long frequency);
|
||||
#ifdef CPU_BOOST_LOGGING
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
|
||||
|
@ -31,13 +32,15 @@
|
|||
* can change it own priority to REALTIME to override user interface and
|
||||
* prevent playback skipping.
|
||||
*/
|
||||
#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
|
||||
#define LOWEST_PRIORITY 100 /* The lowest possible thread priority */
|
||||
#define PRIORITY_REALTIME 1
|
||||
#define PRIORITY_USER_INTERFACE 4 /* The main thread */
|
||||
#define PRIORITY_RECORDING 4 /* Recording thread */
|
||||
#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
|
||||
#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
|
||||
#define PRIORITY_SYSTEM 6 /* All other firmware threads */
|
||||
#define PRIORITY_BACKGROUND 8 /* Normal application threads */
|
||||
#define PRIORITY_USER_INTERFACE 4 /* The main thread */
|
||||
#define PRIORITY_RECORDING 4 /* Recording thread */
|
||||
#define PRIORITY_PLAYBACK 4 /* or REALTIME when needed */
|
||||
#define PRIORITY_BUFFERING 4 /* Codec buffering thread */
|
||||
#define PRIORITY_SYSTEM 6 /* All other firmware threads */
|
||||
#define PRIORITY_BACKGROUND 8 /* Normal application threads */
|
||||
|
||||
#if CONFIG_CODEC == SWCODEC
|
||||
#define MAXTHREADS 16
|
||||
|
@ -47,6 +50,46 @@
|
|||
|
||||
#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
|
||||
|
||||
/**
|
||||
* "Busy" values that can be swapped into a variable to indicate
|
||||
* that the variable or object pointed to is in use by another processor
|
||||
* core. When accessed, the busy value is swapped-in while the current
|
||||
* value is atomically returned. If the swap returns the busy value,
|
||||
* the processor should retry the operation until some other value is
|
||||
* returned. When modification is finished, the new value should be
|
||||
* written which unlocks it and updates it atomically.
|
||||
*
|
||||
* Procedure:
|
||||
* while ((curr_value = swap(&variable, BUSY_VALUE)) == BUSY_VALUE);
|
||||
*
|
||||
* Modify/examine object at mem location or variable. Create "new_value"
|
||||
* as suitable.
|
||||
*
|
||||
* variable = new_value or curr_value;
|
||||
*
|
||||
* To check a value for busy and perform an operation if not:
|
||||
* curr_value = swap(&variable, BUSY_VALUE);
|
||||
*
|
||||
* if (curr_value != BUSY_VALUE)
|
||||
* {
|
||||
* Modify/examine object at mem location or variable. Create "new_value"
|
||||
* as suitable.
|
||||
* variable = new_value or curr_value;
|
||||
* }
|
||||
* else
|
||||
* {
|
||||
* Do nothing - already busy
|
||||
* }
|
||||
*
|
||||
* Only ever restore when an actual value is returned or else it could leave
|
||||
* the variable locked permanently if another processor unlocked in the
|
||||
* meantime. The next access attempt would deadlock for all processors since
|
||||
* an abandoned busy status would be left behind.
|
||||
*/
|
||||
#define STATE_BUSYuptr ((void*)UINTPTR_MAX)
|
||||
#define STATE_BUSYu8 UINT8_MAX
|
||||
#define STATE_BUSYi INT_MIN
|
||||
|
||||
#ifndef SIMULATOR
|
||||
/* Need to keep structures inside the header file because debug_menu
|
||||
* needs them. */
|
||||
|
@ -58,7 +101,7 @@ struct regs
|
|||
unsigned int a[5]; /* 28-44 - a2-a6 */
|
||||
void *sp; /* 48 - Stack pointer (a7) */
|
||||
void *start; /* 52 - Thread start address, or NULL when started */
|
||||
} __attribute__((packed));
|
||||
};
|
||||
#elif CONFIG_CPU == SH7034
|
||||
struct regs
|
||||
{
|
||||
|
@ -66,7 +109,7 @@ struct regs
|
|||
void *sp; /* 28 - Stack pointer (r15) */
|
||||
void *pr; /* 32 - Procedure register */
|
||||
void *start; /* 36 - Thread start address, or NULL when started */
|
||||
} __attribute__((packed));
|
||||
};
|
||||
#elif defined(CPU_ARM)
|
||||
struct regs
|
||||
{
|
||||
|
@ -74,7 +117,7 @@ struct regs
|
|||
void *sp; /* 32 - Stack pointer (r13) */
|
||||
unsigned int lr; /* 36 - r14 (lr) */
|
||||
void *start; /* 40 - Thread start address, or NULL when started */
|
||||
} __attribute__((packed));
|
||||
};
|
||||
#endif /* CONFIG_CPU */
|
||||
#else
|
||||
struct regs
|
||||
|
@ -85,58 +128,206 @@ struct regs
|
|||
};
|
||||
#endif /* !SIMULATOR */
|
||||
|
||||
#define STATE_RUNNING 0x00000000
|
||||
#define STATE_BLOCKED 0x20000000
|
||||
#define STATE_SLEEPING 0x40000000
|
||||
#define STATE_BLOCKED_W_TMO 0x60000000
|
||||
|
||||
#define THREAD_STATE_MASK 0x60000000
|
||||
#define STATE_ARG_MASK 0x1FFFFFFF
|
||||
|
||||
#define GET_STATE_ARG(state) (state & STATE_ARG_MASK)
|
||||
#define GET_STATE(state) (state & THREAD_STATE_MASK)
|
||||
#define SET_STATE(var,state,arg) (var = (state | ((arg) & STATE_ARG_MASK)))
|
||||
#define CLEAR_STATE_ARG(var) (var &= ~STATE_ARG_MASK)
|
||||
|
||||
#define STATE_BOOSTED 0x80000000
|
||||
#define STATE_IS_BOOSTED(var) (var & STATE_BOOSTED)
|
||||
#define SET_BOOST_STATE(var) (var |= STATE_BOOSTED)
|
||||
|
||||
struct thread_entry {
|
||||
struct regs context;
|
||||
const char *name;
|
||||
void *stack;
|
||||
unsigned long statearg;
|
||||
unsigned short stack_size;
|
||||
# if NUM_CORES > 1
|
||||
unsigned char core; /* To which core threads belongs to. */
|
||||
# endif
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
unsigned char priority;
|
||||
unsigned char priority_x;
|
||||
long last_run;
|
||||
#endif
|
||||
struct thread_entry *next, *prev;
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
intptr_t retval;
|
||||
/* NOTE: The use of the word "queue" may also refer to a linked list of
|
||||
threads being maintainted that are normally dealt with in FIFO order
|
||||
and not nescessarily kernel event_queue */
|
||||
enum
|
||||
{
|
||||
/* States without a timeout must be first */
|
||||
STATE_KILLED = 0, /* Thread is killed (default) */
|
||||
STATE_RUNNING, /* Thread is currently running */
|
||||
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
|
||||
/* These states involve adding the thread to the tmo list */
|
||||
STATE_SLEEPING, /* Thread is sleeping with a timeout */
|
||||
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
|
||||
/* Miscellaneous states */
|
||||
STATE_FROZEN, /* Thread is suspended and will not run until
|
||||
thread_thaw is called with its ID */
|
||||
THREAD_NUM_STATES,
|
||||
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
|
||||
#if NUM_CORES > 1
|
||||
STATE_BUSY = STATE_BUSYu8, /* Thread slot is being examined */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct core_entry {
|
||||
struct thread_entry *running;
|
||||
struct thread_entry *sleeping;
|
||||
struct thread_entry *waking;
|
||||
struct thread_entry **wakeup_list;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
long highest_priority;
|
||||
#endif
|
||||
#if NUM_CORES > 1
|
||||
volatile bool lock_issued;
|
||||
volatile bool kernel_running;
|
||||
#define THREAD_DESTRUCT ((const char *)0x84905617)
|
||||
#endif
|
||||
|
||||
/* Link information for lists thread is in */
|
||||
struct thread_entry; /* forward */
|
||||
struct thread_list
|
||||
{
|
||||
struct thread_entry *prev; /* Previous thread in a list */
|
||||
struct thread_entry *next; /* Next thread in a list */
|
||||
};
|
||||
|
||||
/* Small objects for core-wise mutual exclusion */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
/* No reliable atomic instruction available - use Peterson's algorithm */
|
||||
struct corelock
|
||||
{
|
||||
volatile unsigned char myl[NUM_CORES];
|
||||
volatile unsigned char turn;
|
||||
} __attribute__((packed));
|
||||
|
||||
void corelock_init(struct corelock *cl);
|
||||
void corelock_lock(struct corelock *cl);
|
||||
int corelock_try_lock(struct corelock *cl);
|
||||
void corelock_unlock(struct corelock *cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
/* Use native atomic swap/exchange instruction */
|
||||
struct corelock
|
||||
{
|
||||
unsigned char locked;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define corelock_init(cl) \
|
||||
({ (cl)->locked = 0; })
|
||||
#define corelock_lock(cl) \
|
||||
({ while (test_and_set(&(cl)->locked, 1)); })
|
||||
#define corelock_try_lock(cl) \
|
||||
({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
|
||||
#define corelock_unlock(cl) \
|
||||
({ (cl)->locked = 0; })
|
||||
#else
|
||||
/* No atomic corelock op needed or just none defined */
|
||||
#define corelock_init(cl)
|
||||
#define corelock_lock(cl)
|
||||
#define corelock_try_lock(cl)
|
||||
#define corelock_unlock(cl)
|
||||
#endif /* core locking selection */
|
||||
|
||||
struct thread_queue
|
||||
{
|
||||
struct thread_entry *queue; /* list of threads waiting -
|
||||
_must_ be first member */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock cl; /* lock for atomic list operations */
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Information kept in each thread slot
|
||||
* members are arranged according to size - largest first - in order
|
||||
* to ensure both alignment and packing at the same time.
|
||||
*/
|
||||
struct thread_entry
|
||||
{
|
||||
struct regs context; /* Register context at switch -
|
||||
_must_ be first member */
|
||||
void *stack; /* Pointer to top of stack */
|
||||
const char *name; /* Thread name */
|
||||
long tmo_tick; /* Tick when thread should be woken from
|
||||
timeout */
|
||||
struct thread_list l; /* Links for blocked/waking/running -
|
||||
circular linkage in both directions */
|
||||
struct thread_list tmo; /* Links for timeout list -
|
||||
Self-pointer-terminated in reverse direction,
|
||||
NULL-terminated in forward direction */
|
||||
struct thread_queue *bqp; /* Pointer to list variable in kernel
|
||||
object where thread is blocked - used
|
||||
for implicit unblock and explicit wake */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct thread_entry **bqnlp; /* Pointer to list variable in kernel
|
||||
object where thread is blocked - non-locked
|
||||
operations will be used */
|
||||
#endif
|
||||
struct thread_entry *queue; /* List of threads waiting for thread to be
|
||||
removed */
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
intptr_t retval; /* Return value from a blocked operation */
|
||||
#endif
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
long last_run; /* Last tick when started */
|
||||
#endif
|
||||
unsigned short stack_size; /* Size of stack in bytes */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
unsigned char priority; /* Current priority */
|
||||
unsigned char priority_x; /* Inherited priority - right now just a
|
||||
runtime guarantee flag */
|
||||
#endif
|
||||
unsigned char state; /* Thread slot state (STATE_*) */
|
||||
#if NUM_CORES > 1
|
||||
unsigned char core; /* The core to which thread belongs */
|
||||
#endif
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
unsigned char boosted; /* CPU frequency boost flag */
|
||||
#endif
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock cl; /* Corelock to lock thread slot */
|
||||
#endif
|
||||
};
|
||||
|
||||
#if NUM_CORES > 1
|
||||
/* Operations to be performed just before stopping a thread and starting
|
||||
a new one if specified before calling switch_thread */
|
||||
#define TBOP_UNLOCK_LIST 0x01 /* Set a pointer variable address var_ptrp */
|
||||
#if CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
#define TBOP_SET_VARi 0x02 /* Set an int at address var_ip */
|
||||
#define TBOP_SET_VARu8 0x03 /* Set an unsigned char at address var_u8p */
|
||||
#define TBOP_VAR_TYPE_MASK 0x03 /* Mask for variable type*/
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
#define TBOP_UNLOCK_CORELOCK 0x04
|
||||
#define TBOP_UNLOCK_THREAD 0x08 /* Unlock a thread's slot */
|
||||
#define TBOP_UNLOCK_CURRENT 0x10 /* Unlock the current thread's slot */
|
||||
#define TBOP_IRQ_LEVEL 0x20 /* Set a new irq level */
|
||||
#define TBOP_SWITCH_CORE 0x40 /* Call the core switch preparation routine */
|
||||
|
||||
struct thread_blk_ops
|
||||
{
|
||||
int irq_level; /* new IRQ level to set */
|
||||
#if CONFIG_CORELOCK != SW_CORELOCK
|
||||
union
|
||||
{
|
||||
int var_iv; /* int variable value to set */
|
||||
uint8_t var_u8v; /* unsigned char valur to set */
|
||||
struct thread_entry *list_v; /* list pointer queue value to set */
|
||||
};
|
||||
#endif
|
||||
union
|
||||
{
|
||||
#if CONFIG_CORELOCK != SW_CORELOCK
|
||||
int *var_ip; /* pointer to int variable */
|
||||
uint8_t *var_u8p; /* pointer to unsigned char varuable */
|
||||
#endif
|
||||
struct thread_queue *list_p; /* pointer to list variable */
|
||||
};
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct corelock *cl_p; /* corelock to unlock */
|
||||
struct thread_entry *thread; /* thread to unlock */
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
unsigned char state; /* new thread state (performs unlock) */
|
||||
#endif /* SOFTWARE_CORELOCK */
|
||||
unsigned char flags; /* TBOP_* flags */
|
||||
};
|
||||
#endif /* NUM_CORES > 1 */
|
||||
|
||||
/* Information kept for each core
|
||||
* Member are arranged for the same reason as in thread_entry
|
||||
*/
|
||||
struct core_entry
|
||||
{
|
||||
/* "Active" lists - core is constantly active on these and are never
|
||||
locked and interrupts do not access them */
|
||||
struct thread_entry *running; /* threads that are running */
|
||||
struct thread_entry *timeout; /* threads that are on a timeout before
|
||||
running again */
|
||||
/* "Shared" lists - cores interact in a synchronized manner - access
|
||||
is locked between cores and interrupts */
|
||||
struct thread_queue waking; /* intermediate locked list that
|
||||
hold threads other core should wake up
|
||||
on next task switch */
|
||||
long next_tmo_check; /* soonest time to check tmo threads */
|
||||
#if NUM_CORES > 1
|
||||
struct thread_blk_ops blk_ops; /* operations to perform when
|
||||
blocking a thread */
|
||||
#else
|
||||
#define STAY_IRQ_LEVEL (-1)
|
||||
int irq_level; /* sets the irq level to irq_level */
|
||||
#endif /* NUM_CORES */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
unsigned char highest_priority;
|
||||
#endif
|
||||
long last_tick;
|
||||
int switch_to_irq_level;
|
||||
#define STAY_IRQ_LEVEL -1
|
||||
};
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
|
@ -145,82 +336,210 @@ struct core_entry {
|
|||
#define IF_PRIO(...)
|
||||
#endif
|
||||
|
||||
/* PortalPlayer chips have 2 cores, therefore need atomic mutexes
|
||||
* Just use it for ARM, Coldfire and whatever else well...why not?
|
||||
*/
|
||||
|
||||
/* Macros generate better code than an inline function is this case */
|
||||
#if (defined (CPU_PP) || defined (CPU_ARM)) && CONFIG_CPU != PP5020
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old; \
|
||||
asm volatile ( \
|
||||
"swpb %[old], %[v], [%[x]] \r\n" \
|
||||
: [old]"=r"(old) \
|
||||
: [v]"r"((uint32_t)v_), [x]"r"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#elif defined (CPU_COLDFIRE)
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint8_t old; \
|
||||
asm volatile ( \
|
||||
"bset.l %[v], (%[x]) \r\n" \
|
||||
"sne.b %[old] \r\n" \
|
||||
: [old]"=d,d"(old) \
|
||||
: [v]"i,d"((uint32_t)v_), [x]"a,a"((uint32_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#elif CONFIG_CPU == SH7034
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old; \
|
||||
asm volatile ( \
|
||||
"tas.b @%[x] \r\n" \
|
||||
"mov #-1, %[old] \r\n" \
|
||||
"negc %[old], %[old] \r\n" \
|
||||
: [old]"=r"(old) \
|
||||
: [v]"M"((uint32_t)v_), /* Value of v_ must be 1 */ \
|
||||
[x]"r"((uint8_t *)x_) \
|
||||
); \
|
||||
old; \
|
||||
})
|
||||
#if (defined (CPU_PP) || defined (CPU_ARM))
|
||||
/* atomic */
|
||||
#ifdef SOFTWARE_CORELOCK
|
||||
#define test_and_set(a, v, cl) \
|
||||
xchg8((a), (v), (cl))
|
||||
/* atomic */
|
||||
#define xchg8(a, v, cl) \
|
||||
({ uint32_t o; \
|
||||
corelock_lock(cl); \
|
||||
o = *(uint8_t *)(a); \
|
||||
*(uint8_t *)(a) = (v); \
|
||||
corelock_unlock(cl); \
|
||||
o; })
|
||||
#define xchg32(a, v, cl) \
|
||||
({ uint32_t o; \
|
||||
corelock_lock(cl); \
|
||||
o = *(uint32_t *)(a); \
|
||||
*(uint32_t *)(a) = (v); \
|
||||
corelock_unlock(cl); \
|
||||
o; })
|
||||
#define xchgptr(a, v, cl) \
|
||||
({ typeof (*(a)) o; \
|
||||
corelock_lock(cl); \
|
||||
o = *(a); \
|
||||
*(a) = (v); \
|
||||
corelock_unlock(cl); \
|
||||
o; })
|
||||
#else
|
||||
/* default for no asm version */
|
||||
#define test_and_set(x_, v_) \
|
||||
({ \
|
||||
uint32_t old = *(uint32_t *)x_; \
|
||||
*(uint32_t *)x_ = v_; \
|
||||
old; \
|
||||
})
|
||||
#endif
|
||||
/* atomic */
|
||||
#define test_and_set(a, v, ...) \
|
||||
xchg8((a), (v))
|
||||
#define xchg8(a, v, ...) \
|
||||
({ uint32_t o; \
|
||||
asm volatile( \
|
||||
"swpb %0, %1, [%2]" \
|
||||
: "=r"(o) \
|
||||
: "r"(v), \
|
||||
"r"((uint8_t*)(a))); \
|
||||
o; })
|
||||
/* atomic */
|
||||
#define xchg32(a, v, ...) \
|
||||
({ uint32_t o; \
|
||||
asm volatile( \
|
||||
"swp %0, %1, [%2]" \
|
||||
: "=r"(o) \
|
||||
: "r"((uint32_t)(v)), \
|
||||
"r"((uint32_t*)(a))); \
|
||||
o; })
|
||||
/* atomic */
|
||||
#define xchgptr(a, v, ...) \
|
||||
({ typeof (*(a)) o; \
|
||||
asm volatile( \
|
||||
"swp %0, %1, [%2]" \
|
||||
: "=r"(o) \
|
||||
: "r"(v), "r"(a)); \
|
||||
o; })
|
||||
#endif /* SOFTWARE_CORELOCK */
|
||||
#elif defined (CPU_COLDFIRE)
|
||||
/* atomic */
|
||||
/* one branch will be optimized away if v is a constant expression */
|
||||
#define test_and_set(a, v, ...) \
|
||||
({ uint32_t o = 0; \
|
||||
if (v) { \
|
||||
asm volatile ( \
|
||||
"bset.b #0, (%0)" \
|
||||
: : "a"((uint8_t*)(a)) \
|
||||
: "cc"); \
|
||||
} else { \
|
||||
asm volatile ( \
|
||||
"bclr.b #0, (%0)" \
|
||||
: : "a"((uint8_t*)(a)) \
|
||||
: "cc"); \
|
||||
} \
|
||||
asm volatile ("sne.b %0" \
|
||||
: "+d"(o)); \
|
||||
o; })
|
||||
#elif CONFIG_CPU == SH7034
|
||||
/* atomic */
|
||||
#define test_and_set(a, v, ...) \
|
||||
({ uint32_t o; \
|
||||
asm volatile ( \
|
||||
"tas.b @%2 \n" \
|
||||
"mov #-1, %0 \n" \
|
||||
"negc %0, %0 \n" \
|
||||
: "=r"(o) \
|
||||
: "M"((uint32_t)(v)), /* Value of_v must be 1 */ \
|
||||
"r"((uint8_t *)(a))); \
|
||||
o; })
|
||||
#endif /* CONFIG_CPU == */
|
||||
|
||||
/* defaults for no asm version */
|
||||
#ifndef test_and_set
|
||||
/* not atomic */
|
||||
#define test_and_set(a, v, ...) \
|
||||
({ uint32_t o = *(uint8_t *)(a); \
|
||||
*(uint8_t *)(a) = (v); \
|
||||
o; })
|
||||
#endif /* test_and_set */
|
||||
#ifndef xchg8
|
||||
/* not atomic */
|
||||
#define xchg8(a, v, ...) \
|
||||
({ uint32_t o = *(uint8_t *)(a); \
|
||||
*(uint8_t *)(a) = (v); \
|
||||
o; })
|
||||
#endif /* xchg8 */
|
||||
#ifndef xchg32
|
||||
/* not atomic */
|
||||
#define xchg32(a, v, ...) \
|
||||
({ uint32_t o = *(uint32_t *)(a); \
|
||||
*(uint32_t *)(a) = (v); \
|
||||
o; })
|
||||
#endif /* xchg32 */
|
||||
#ifndef xchgptr
|
||||
/* not atomic */
|
||||
#define xchgptr(a, v, ...) \
|
||||
({ typeof (*(a)) o = *(a); \
|
||||
*(a) = (v); \
|
||||
o; })
|
||||
#endif /* xchgptr */
|
||||
|
||||
void core_idle(void);
|
||||
void core_wake(IF_COP_VOID(unsigned int core));
|
||||
|
||||
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
|
||||
struct thread_entry*
|
||||
create_thread(void (*function)(void), void* stack, int stack_size,
|
||||
const char *name IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core, bool fallback));
|
||||
unsigned flags, const char *name
|
||||
IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core));
|
||||
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
void trigger_cpu_boost(void);
|
||||
#else
|
||||
#define trigger_cpu_boost()
|
||||
#endif
|
||||
|
||||
void thread_thaw(struct thread_entry *thread);
|
||||
void thread_wait(struct thread_entry *thread);
|
||||
void remove_thread(struct thread_entry *thread);
|
||||
void switch_thread(bool save_context, struct thread_entry **blocked_list);
|
||||
void switch_thread(struct thread_entry *old);
|
||||
void sleep_thread(int ticks);
|
||||
void block_thread(struct thread_entry **thread);
|
||||
void block_thread_w_tmo(struct thread_entry **thread, int timeout);
|
||||
void set_irq_level_and_block_thread(struct thread_entry **thread, int level);
|
||||
void set_irq_level_and_block_thread_w_tmo(struct thread_entry **list,
|
||||
int timeout, int level);
|
||||
void wakeup_thread(struct thread_entry **thread);
|
||||
void wakeup_thread_irq_safe(struct thread_entry **thread);
|
||||
|
||||
/**
|
||||
* Setup to allow using thread queues as locked or non-locked without speed
|
||||
* sacrifices in both core locking types.
|
||||
*
|
||||
* The blocking/waking function inline two different version of the real
|
||||
* function into the stubs when a software or other separate core locking
|
||||
* mechanism is employed.
|
||||
*
|
||||
* When a simple test-and-set or similar instruction is available, locking
|
||||
* has no cost and so one version is used and the internal worker is called
|
||||
* directly.
|
||||
*
|
||||
* CORELOCK_NONE is treated the same as when an atomic instruction can be
|
||||
* used.
|
||||
*/
|
||||
|
||||
/* Blocks the current thread on a thread queue */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
void block_thread(struct thread_queue *tq);
|
||||
void block_thread_no_listlock(struct thread_entry **list);
|
||||
#else
|
||||
void _block_thread(struct thread_queue *tq);
|
||||
static inline void block_thread(struct thread_queue *tq)
|
||||
{ _block_thread(tq); }
|
||||
static inline void block_thread_no_listlock(struct thread_entry **list)
|
||||
{ _block_thread((struct thread_queue *)list); }
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
|
||||
/* Blocks the current thread on a thread queue for a max amount of time
|
||||
* There is no "_no_listlock" version because timeout blocks without sync on
|
||||
* the blocking queues is not permitted since either core could access the
|
||||
* list at any time to do an implicit wake. In other words, objects with
|
||||
* timeout support require lockable queues. */
|
||||
void block_thread_w_tmo(struct thread_queue *tq, int timeout);
|
||||
|
||||
/* Wakes up the thread at the head of the queue */
|
||||
#define THREAD_WAKEUP_NONE ((struct thread_entry *)NULL)
|
||||
#define THREAD_WAKEUP_MISSING ((struct thread_entry *)(NULL+1))
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
struct thread_entry * wakeup_thread(struct thread_queue *tq);
|
||||
struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list);
|
||||
#else
|
||||
struct thread_entry * _wakeup_thread(struct thread_queue *list);
|
||||
static inline struct thread_entry * wakeup_thread(struct thread_queue *tq)
|
||||
{ return _wakeup_thread(tq); }
|
||||
static inline struct thread_entry * wakeup_thread_no_listlock(struct thread_entry **list)
|
||||
{ return _wakeup_thread((struct thread_queue *)list); }
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
|
||||
/* Initialize a thread_queue object. */
|
||||
static inline void thread_queue_init(struct thread_queue *tq)
|
||||
{ tq->queue = NULL; IF_SWCL(corelock_init(&tq->cl);) }
|
||||
/* A convenience function for waking an entire queue of threads. */
|
||||
static inline void thread_queue_wake(struct thread_queue *tq)
|
||||
{ while (wakeup_thread(tq) != NULL); }
|
||||
/* The no-listlock version of thread_queue_wake() */
|
||||
static inline void thread_queue_wake_no_listlock(struct thread_entry **list)
|
||||
{ while (wakeup_thread_no_listlock(list) != NULL); }
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
int thread_set_priority(struct thread_entry *thread, int priority);
|
||||
int thread_get_priority(struct thread_entry *thread);
|
||||
int thread_get_priority(struct thread_entry *thread);
|
||||
/* Yield that guarantees thread execution once per round regardless of
|
||||
thread's scheduler priority - basically a transient realtime boost
|
||||
without altering the scheduler's thread precedence. */
|
||||
|
@ -228,17 +547,20 @@ void priority_yield(void);
|
|||
#else
|
||||
#define priority_yield yield
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
#if NUM_CORES > 1
|
||||
unsigned int switch_core(unsigned int new_core);
|
||||
#endif
|
||||
struct thread_entry * thread_get_current(void);
|
||||
void init_threads(void);
|
||||
int thread_stack_usage(const struct thread_entry *thread);
|
||||
#if NUM_CORES > 1
|
||||
int idle_stack_usage(unsigned int core);
|
||||
#endif
|
||||
int thread_get_status(const struct thread_entry *thread);
|
||||
unsigned thread_get_status(const struct thread_entry *thread);
|
||||
void thread_get_name(char *buffer, int size,
|
||||
struct thread_entry *thread);
|
||||
#ifdef RB_PROFILE
|
||||
void profile_thread(void);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* THREAD_H */
|
||||
|
|
|
@ -28,15 +28,37 @@
|
|||
#include "avic-imx31.h"
|
||||
#endif
|
||||
|
||||
/* Make this nonzero to enable more elaborate checks on objects */
|
||||
#ifdef DEBUG
|
||||
#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
|
||||
#else
|
||||
#define KERNEL_OBJECT_CHECKS 0
|
||||
#endif
|
||||
|
||||
#if KERNEL_OBJECT_CHECKS
|
||||
#define KERNEL_ASSERT(exp, msg...) \
|
||||
({ if (!({ exp; })) panicf(msg); })
|
||||
#else
|
||||
#define KERNEL_ASSERT(exp, msg...) ({})
|
||||
#endif
|
||||
|
||||
#if (!defined(CPU_PP) && (CONFIG_CPU != IMX31L)) || !defined(BOOTLOADER)
|
||||
volatile long current_tick NOCACHEDATA_ATTR = 0;
|
||||
#endif
|
||||
|
||||
void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
|
||||
|
||||
extern struct core_entry cores[NUM_CORES];
|
||||
|
||||
/* This array holds all queues that are initiated. It is used for broadcast. */
|
||||
static struct event_queue *all_queues[32] NOCACHEBSS_ATTR;
|
||||
static int num_queues NOCACHEBSS_ATTR;
|
||||
static struct
|
||||
{
|
||||
int count;
|
||||
struct event_queue *queues[MAX_NUM_QUEUES];
|
||||
#if NUM_CORES > 1
|
||||
struct corelock cl;
|
||||
#endif
|
||||
} all_queues NOCACHEBSS_ATTR;
|
||||
|
||||
/****************************************************************************
|
||||
* Standard kernel stuff
|
||||
|
@ -52,8 +74,8 @@ void kernel_init(void)
|
|||
if (CURRENT_CORE == CPU)
|
||||
{
|
||||
memset(tick_funcs, 0, sizeof(tick_funcs));
|
||||
num_queues = 0;
|
||||
memset(all_queues, 0, sizeof(all_queues));
|
||||
memset(&all_queues, 0, sizeof(all_queues));
|
||||
corelock_init(&all_queues.cl);
|
||||
tick_start(1000/HZ);
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +99,7 @@ void sleep(int ticks)
|
|||
#elif defined(CPU_PP) && defined(BOOTLOADER)
|
||||
unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
|
||||
while (TIME_BEFORE(USEC_TIMER, stop))
|
||||
switch_thread(true,NULL);
|
||||
switch_thread(NULL);
|
||||
#else
|
||||
sleep_thread(ticks);
|
||||
#endif
|
||||
|
@ -88,7 +110,7 @@ void yield(void)
|
|||
#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022) || CONFIG_CPU == IMX31L) && defined(BOOTLOADER))
|
||||
/* Some targets don't like yielding in the bootloader */
|
||||
#else
|
||||
switch_thread(true, NULL);
|
||||
switch_thread(NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -104,7 +126,7 @@ static void queue_fetch_sender(struct queue_sender_list *send,
|
|||
{
|
||||
struct thread_entry **spp = &send->senders[i];
|
||||
|
||||
if (*spp)
|
||||
if(*spp)
|
||||
{
|
||||
send->curr_sender = *spp;
|
||||
*spp = NULL;
|
||||
|
@ -124,18 +146,16 @@ static void queue_release_sender(struct thread_entry **sender,
|
|||
intptr_t retval)
|
||||
{
|
||||
(*sender)->retval = retval;
|
||||
wakeup_thread_irq_safe(sender);
|
||||
#if 0
|
||||
wakeup_thread_no_listlock(sender);
|
||||
/* This should _never_ happen - there must never be multiple
|
||||
threads in this list and it is a corrupt state */
|
||||
if (*sender != NULL)
|
||||
panicf("Queue: send slot ovf");
|
||||
#endif
|
||||
KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
|
||||
}
|
||||
|
||||
/* Releases any waiting threads that are queued with queue_send -
|
||||
* reply with 0.
|
||||
* Disable IRQs before calling since it uses queue_release_sender.
|
||||
* Disable IRQs and lock before calling since it uses
|
||||
* queue_release_sender.
|
||||
*/
|
||||
static void queue_release_all_senders(struct event_queue *q)
|
||||
{
|
||||
|
@ -156,79 +176,114 @@ static void queue_release_all_senders(struct event_queue *q)
|
|||
}
|
||||
|
||||
/* Enables queue_send on the specified queue - caller allocates the extra
|
||||
data structure */
|
||||
data structure. Only queues which are taken to be owned by a thread should
|
||||
enable this. Public waiting is not permitted. */
|
||||
void queue_enable_queue_send(struct event_queue *q,
|
||||
struct queue_sender_list *send)
|
||||
{
|
||||
q->send = send;
|
||||
memset(send, 0, sizeof(struct queue_sender_list));
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
q->send = NULL;
|
||||
if(send != NULL)
|
||||
{
|
||||
memset(send, 0, sizeof(*send));
|
||||
q->send = send;
|
||||
}
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
|
||||
/* Queue must not be available for use during this call */
|
||||
void queue_init(struct event_queue *q, bool register_queue)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
|
||||
if(register_queue)
|
||||
{
|
||||
corelock_lock(&all_queues.cl);
|
||||
}
|
||||
|
||||
corelock_init(&q->cl);
|
||||
thread_queue_init(&q->queue);
|
||||
q->read = 0;
|
||||
q->write = 0;
|
||||
q->thread = NULL;
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
q->send = NULL; /* No message sending by default */
|
||||
#endif
|
||||
|
||||
if(register_queue)
|
||||
{
|
||||
if(all_queues.count >= MAX_NUM_QUEUES)
|
||||
{
|
||||
panicf("queue_init->out of queues");
|
||||
}
|
||||
/* Add it to the all_queues array */
|
||||
all_queues[num_queues++] = q;
|
||||
all_queues.queues[all_queues.count++] = q;
|
||||
corelock_unlock(&all_queues.cl);
|
||||
}
|
||||
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
/* Queue must not be available for use during this call */
|
||||
void queue_delete(struct event_queue *q)
|
||||
{
|
||||
int oldlevel;
|
||||
int i;
|
||||
bool found = false;
|
||||
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&all_queues.cl);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
/* Release theads waiting on queue */
|
||||
wakeup_thread(&q->thread);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Release waiting threads and reply to any dequeued message
|
||||
waiting for one. */
|
||||
queue_release_all_senders(q);
|
||||
queue_reply(q, 0);
|
||||
#endif
|
||||
|
||||
/* Find the queue to be deleted */
|
||||
for(i = 0;i < num_queues;i++)
|
||||
for(i = 0;i < all_queues.count;i++)
|
||||
{
|
||||
if(all_queues[i] == q)
|
||||
if(all_queues.queues[i] == q)
|
||||
{
|
||||
found = true;
|
||||
/* Move the following queues up in the list */
|
||||
all_queues.count--;
|
||||
|
||||
for(;i < all_queues.count;i++)
|
||||
{
|
||||
all_queues.queues[i] = all_queues.queues[i+1];
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(found)
|
||||
{
|
||||
/* Move the following queues up in the list */
|
||||
for(;i < num_queues-1;i++)
|
||||
{
|
||||
all_queues[i] = all_queues[i+1];
|
||||
}
|
||||
|
||||
num_queues--;
|
||||
}
|
||||
|
||||
corelock_unlock(&all_queues.cl);
|
||||
|
||||
/* Release threads waiting on queue head */
|
||||
thread_queue_wake(&q->queue);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Release waiting threads for reply and reply to any dequeued
|
||||
message waiting for one. */
|
||||
queue_release_all_senders(q);
|
||||
queue_reply(q, 0);
|
||||
#endif
|
||||
|
||||
q->read = 0;
|
||||
q->write = 0;
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
void queue_wait(struct event_queue *q, struct event *ev)
|
||||
/* NOTE: multiple threads waiting on a queue head cannot have a well-
|
||||
defined release order if timeouts are used. If multiple threads must
|
||||
access the queue head, use a dispatcher or queue_wait only. */
|
||||
void queue_wait(struct event_queue *q, struct queue_event *ev)
|
||||
{
|
||||
int oldlevel;
|
||||
unsigned int rd;
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(q->send && q->send->curr_sender)
|
||||
|
@ -240,8 +295,28 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
|||
|
||||
if (q->read == q->write)
|
||||
{
|
||||
set_irq_level_and_block_thread(&q->thread, oldlevel);
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
do
|
||||
{
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE
|
||||
cores[CURRENT_CORE].irq_level = oldlevel;
|
||||
#elif CONFIG_CORELOCK == SW_CORELOCK
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.cl_p = &q->cl;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
||||
cores[core].blk_ops.var_u8v = 0;
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
block_thread(&q->queue);
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
}
|
||||
/* A message that woke us could now be gone */
|
||||
while (q->read == q->write);
|
||||
}
|
||||
|
||||
rd = q->read++ & QUEUE_LENGTH_MASK;
|
||||
|
@ -254,13 +329,17 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
|||
queue_fetch_sender(q->send, rd);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if (q->send && q->send->curr_sender)
|
||||
|
@ -269,13 +348,30 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
|||
queue_release_sender(&q->send->curr_sender, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
if (q->read == q->write && ticks > 0)
|
||||
{
|
||||
set_irq_level_and_block_thread_w_tmo(&q->thread, ticks, oldlevel);
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE
|
||||
cores[CURRENT_CORE].irq_level = oldlevel;
|
||||
#elif CONFIG_CORELOCK == SW_CORELOCK
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.cl_p = &q->cl;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
||||
cores[core].blk_ops.var_u8v = 0;
|
||||
#endif
|
||||
block_thread_w_tmo(&q->queue, ticks);
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
}
|
||||
|
||||
/* no worry about a removed message here - status is checked inside
|
||||
locks - perhaps verify if timeout or false alarm */
|
||||
if (q->read != q->write)
|
||||
{
|
||||
unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
|
||||
|
@ -293,15 +389,19 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
|||
{
|
||||
ev->id = SYS_TIMEOUT;
|
||||
}
|
||||
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
void queue_post(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int oldlevel;
|
||||
unsigned int wr;
|
||||
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[wr].id = id;
|
||||
|
@ -320,20 +420,24 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
}
|
||||
#endif
|
||||
|
||||
wakeup_thread_irq_safe(&q->thread);
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
}
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* No wakeup_thread_irq_safe here because IRQ handlers are not allowed
|
||||
use of this function - we only aim to protect the queue integrity by
|
||||
turning them off. */
|
||||
/* IRQ handlers are not allowed use of this function - we only aim to
|
||||
protect the queue integrity by turning them off. */
|
||||
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int oldlevel;
|
||||
unsigned int wr;
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
wr = q->write++ & QUEUE_LENGTH_MASK;
|
||||
|
||||
q->events[wr].id = id;
|
||||
|
@ -341,21 +445,38 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
|
||||
if(q->send)
|
||||
{
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
struct thread_entry **spp = &q->send->senders[wr];
|
||||
|
||||
if (*spp)
|
||||
if(*spp)
|
||||
{
|
||||
/* overflow protect - unblock any thread waiting at this index */
|
||||
queue_release_sender(spp, 0);
|
||||
}
|
||||
|
||||
wakeup_thread(&q->thread);
|
||||
set_irq_level_and_block_thread(spp, oldlevel);
|
||||
return thread_get_current()->retval;
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE
|
||||
cores[core].irq_level = oldlevel;
|
||||
#elif CONFIG_CORELOCK == SW_CORELOCK
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.cl_p = &q->cl;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
cores[core].blk_ops.irq_level = oldlevel;
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARu8 | TBOP_IRQ_LEVEL;
|
||||
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
||||
cores[core].blk_ops.var_u8v = 0;
|
||||
#endif
|
||||
block_thread_no_listlock(spp);
|
||||
return cores[core].running->retval;
|
||||
}
|
||||
|
||||
/* Function as queue_post if sending is not enabled */
|
||||
wakeup_thread(&q->thread);
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
|
||||
return 0;
|
||||
|
@ -365,21 +486,52 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
/* Query if the last message dequeued was added by queue_send or not */
|
||||
bool queue_in_queue_send(struct event_queue *q)
|
||||
{
|
||||
return q->send && q->send->curr_sender;
|
||||
bool in_send;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
#endif
|
||||
|
||||
in_send = q->send && q->send->curr_sender;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
#endif
|
||||
|
||||
return in_send;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Replies with retval to any dequeued message sent with queue_send */
|
||||
/* Replies with retval to the last dequeued message sent with queue_send */
|
||||
void queue_reply(struct event_queue *q, intptr_t retval)
|
||||
{
|
||||
/* No IRQ lock here since IRQs cannot change this */
|
||||
if(q->send && q->send->curr_sender)
|
||||
{
|
||||
queue_release_sender(&q->send->curr_sender, retval);
|
||||
#if NUM_CORES > 1
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
/* Double-check locking */
|
||||
if(q->send && q->send->curr_sender)
|
||||
{
|
||||
#endif
|
||||
|
||||
queue_release_sender(&q->send->curr_sender, retval);
|
||||
|
||||
#if NUM_CORES > 1
|
||||
}
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
||||
|
||||
/* Poll queue to see if a message exists - careful in using the result if
|
||||
* queue_remove_from_head is called when messages are posted - possibly use
|
||||
* queue_wait_w_tmo(&q, 0) in that case or else a removed message that
|
||||
* unsignals the queue may cause an unwanted block */
|
||||
bool queue_empty(const struct event_queue* q)
|
||||
{
|
||||
return ( q->read == q->write );
|
||||
|
@ -387,23 +539,30 @@ bool queue_empty(const struct event_queue* q)
|
|||
|
||||
void queue_clear(struct event_queue* q)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Release all thread waiting in the queue for a reply -
|
||||
/* Release all threads waiting in the queue for a reply -
|
||||
dequeued sent message will be handled by owning thread */
|
||||
queue_release_all_senders(q);
|
||||
#endif
|
||||
|
||||
q->read = 0;
|
||||
q->write = 0;
|
||||
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
void queue_remove_from_head(struct event_queue *q, long id)
|
||||
{
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
int oldlevel;
|
||||
|
||||
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&q->cl);
|
||||
|
||||
while(q->read != q->write)
|
||||
{
|
||||
|
@ -428,7 +587,8 @@ void queue_remove_from_head(struct event_queue *q, long id)
|
|||
#endif
|
||||
q->read++;
|
||||
}
|
||||
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
set_irq_level(oldlevel);
|
||||
}
|
||||
|
||||
|
@ -446,13 +606,23 @@ int queue_count(const struct event_queue *q)
|
|||
int queue_broadcast(long id, intptr_t data)
|
||||
{
|
||||
int i;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
||||
corelock_lock(&all_queues.cl);
|
||||
#endif
|
||||
|
||||
for(i = 0;i < num_queues;i++)
|
||||
for(i = 0;i < all_queues.count;i++)
|
||||
{
|
||||
queue_post(all_queues[i], id, data);
|
||||
queue_post(all_queues.queues[i], id, data);
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
corelock_unlock(&all_queues.cl);
|
||||
set_irq_level(oldlevel);
|
||||
#endif
|
||||
|
||||
return num_queues;
|
||||
return i;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -567,6 +737,7 @@ void TIMER1(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Run through the list of tick tasks (using main core) */
|
||||
TIMER1_VAL; /* Read value to ack IRQ */
|
||||
|
||||
/* Run through the list of tick tasks using main CPU core -
|
||||
|
@ -580,24 +751,8 @@ void TIMER1(void)
|
|||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
#ifdef CPU_PP502x
|
||||
{
|
||||
/* If COP is sleeping - give it a kick */
|
||||
/* TODO: Use a mailbox in addition to make sure it doesn't go to
|
||||
* sleep if kicked just as it's headed to rest to make sure its
|
||||
* tick checks won't be jittery. Don't bother at all if it owns no
|
||||
* threads. */
|
||||
unsigned int cop_ctl;
|
||||
|
||||
cop_ctl = COP_CTL;
|
||||
if (cop_ctl & PROC_SLEEP)
|
||||
{
|
||||
COP_CTL = cop_ctl & ~PROC_SLEEP;
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* TODO: PP5002 */
|
||||
#endif
|
||||
/* Pulse the COP */
|
||||
core_wake(COP);
|
||||
#endif /* NUM_CORES */
|
||||
|
||||
current_tick++;
|
||||
|
@ -837,49 +992,391 @@ void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
|||
|
||||
#endif /* INCLUDE_TIMEOUT_API */
|
||||
|
||||
#ifndef SIMULATOR
|
||||
/*
|
||||
* Simulator versions in uisimulator/SIMVER/
|
||||
*/
|
||||
|
||||
/****************************************************************************
|
||||
* Simple mutex functions
|
||||
* Simple mutex functions ;)
|
||||
****************************************************************************/
|
||||
void mutex_init(struct mutex *m)
|
||||
{
|
||||
m->locked = false;
|
||||
m->queue = NULL;
|
||||
m->thread = NULL;
|
||||
m->count = 0;
|
||||
m->locked = 0;
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_init(&m->cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
void mutex_lock(struct mutex *m)
|
||||
{
|
||||
if (test_and_set(&m->locked, 1))
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
struct thread_entry *const thread = cores[core].running;
|
||||
|
||||
if(thread == m->thread)
|
||||
{
|
||||
/* Wait until the lock is open... */
|
||||
block_thread(&m->thread);
|
||||
m->count++;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Repeat some stuff here or else all the variation is too difficult to
|
||||
read */
|
||||
#if CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
/* peek at lock until it's no longer busy */
|
||||
unsigned int locked;
|
||||
while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
|
||||
if(locked == 0)
|
||||
{
|
||||
m->thread = thread;
|
||||
m->locked = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Block until the lock is open... */
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
||||
cores[core].blk_ops.var_u8p = &m->locked;
|
||||
cores[core].blk_ops.var_u8v = 1;
|
||||
#else
|
||||
corelock_lock(&m->cl);
|
||||
if (m->locked == 0)
|
||||
{
|
||||
m->locked = 1;
|
||||
m->thread = thread;
|
||||
corelock_unlock(&m->cl);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Block until the lock is open... */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
||||
cores[core].blk_ops.cl_p = &m->cl;
|
||||
#endif
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
|
||||
block_thread_no_listlock(&m->queue);
|
||||
}
|
||||
|
||||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
if (m->thread == NULL)
|
||||
m->locked = 0;
|
||||
else
|
||||
wakeup_thread(&m->thread);
|
||||
}
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
|
||||
"mutex_unlock->wrong thread (recurse)");
|
||||
|
||||
void spinlock_lock(struct mutex *m)
|
||||
{
|
||||
while (test_and_set(&m->locked, 1))
|
||||
if(m->count > 0)
|
||||
{
|
||||
/* wait until the lock is open... */
|
||||
switch_thread(true, NULL);
|
||||
/* this thread still owns lock */
|
||||
m->count--;
|
||||
return;
|
||||
}
|
||||
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
/* lock out other cores */
|
||||
corelock_lock(&m->cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
/* wait for peeker to move on */
|
||||
while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
|
||||
#endif
|
||||
|
||||
/* transfer to next queued thread if any */
|
||||
m->thread = wakeup_thread_no_listlock(&m->queue);
|
||||
|
||||
if(m->thread == NULL)
|
||||
{
|
||||
m->locked = 0; /* release lock */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&m->cl);
|
||||
#endif
|
||||
}
|
||||
else /* another thread is waiting - remain locked */
|
||||
{
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&m->cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
m->locked = 1;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void spinlock_unlock(struct mutex *m)
|
||||
/****************************************************************************
|
||||
* Simpl-er mutex functions ;)
|
||||
****************************************************************************/
|
||||
void spinlock_init(struct spinlock *l IF_COP(, unsigned int flags))
|
||||
{
|
||||
m->locked = 0;
|
||||
l->locked = 0;
|
||||
l->thread = NULL;
|
||||
l->count = 0;
|
||||
#if NUM_CORES > 1
|
||||
l->task_switch = flags & SPINLOCK_TASK_SWITCH;
|
||||
corelock_init(&l->cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* ndef SIMULATOR */
|
||||
void spinlock_lock(struct spinlock *l)
|
||||
{
|
||||
struct thread_entry *const thread = cores[CURRENT_CORE].running;
|
||||
|
||||
if (l->thread == thread)
|
||||
{
|
||||
l->count++;
|
||||
return;
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (l->task_switch != 0)
|
||||
#endif
|
||||
{
|
||||
/* Let other threads run until the lock is free */
|
||||
while(test_and_set(&l->locked, 1, &l->cl) != 0)
|
||||
{
|
||||
/* spin and switch until the lock is open... */
|
||||
switch_thread(NULL);
|
||||
}
|
||||
}
|
||||
#if NUM_CORES > 1
|
||||
else
|
||||
{
|
||||
/* Use the corelock purely */
|
||||
corelock_lock(&l->cl);
|
||||
}
|
||||
#endif
|
||||
|
||||
l->thread = thread;
|
||||
}
|
||||
|
||||
void spinlock_unlock(struct spinlock *l)
|
||||
{
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
|
||||
"spinlock_unlock->wrong thread");
|
||||
|
||||
if (l->count > 0)
|
||||
{
|
||||
/* this thread still owns lock */
|
||||
l->count--;
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear owner */
|
||||
l->thread = NULL;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
if (l->task_switch != 0)
|
||||
#endif
|
||||
{
|
||||
/* release lock */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
/* This must be done since our unlock could be missed by the
|
||||
test_and_set and leave the object locked permanently */
|
||||
corelock_lock(&l->cl);
|
||||
#endif
|
||||
l->locked = 0;
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
corelock_unlock(&l->cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Simple semaphore functions ;)
|
||||
****************************************************************************/
|
||||
#ifdef HAVE_SEMAPHORE_OBJECTS
|
||||
void semaphore_init(struct semaphore *s, int max, int start)
|
||||
{
|
||||
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
|
||||
"semaphore_init->inv arg");
|
||||
s->queue = NULL;
|
||||
s->max = max;
|
||||
s->count = start;
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_init(&s->cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
void semaphore_wait(struct semaphore *s)
|
||||
{
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_lock(&s->cl);
|
||||
if(--s->count >= 0)
|
||||
{
|
||||
corelock_unlock(&s->cl);
|
||||
return;
|
||||
}
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
int count;
|
||||
while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
|
||||
if(--count >= 0)
|
||||
{
|
||||
s->count = count;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* too many waits - block until dequeued */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
||||
cores[core].blk_ops.cl_p = &s->cl;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARi;
|
||||
cores[core].blk_ops.var_ip = &s->count;
|
||||
cores[core].blk_ops.var_iv = count;
|
||||
#endif
|
||||
block_thread_no_listlock(&s->queue);
|
||||
}
|
||||
|
||||
void semaphore_release(struct semaphore *s)
|
||||
{
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_lock(&s->cl);
|
||||
if (s->count < s->max)
|
||||
{
|
||||
if (++s->count <= 0)
|
||||
{
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
int count;
|
||||
while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
|
||||
if(count < s->max)
|
||||
{
|
||||
if(++count <= 0)
|
||||
{
|
||||
#endif /* CONFIG_CORELOCK */
|
||||
|
||||
/* there should be threads in this queue */
|
||||
KERNEL_ASSERT(s->queue.queue != NULL, "semaphore->wakeup");
|
||||
/* a thread was queued - wake it up */
|
||||
wakeup_thread_no_listlock(&s->queue);
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&s->cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
s->count = count;
|
||||
#endif
|
||||
}
|
||||
#endif /* HAVE_SEMAPHORE_OBJECTS */
|
||||
|
||||
/****************************************************************************
|
||||
* Simple event functions ;)
|
||||
****************************************************************************/
|
||||
#ifdef HAVE_EVENT_OBJECTS
|
||||
void event_init(struct event *e, unsigned int flags)
|
||||
{
|
||||
e->queues[STATE_NONSIGNALED] = NULL;
|
||||
e->queues[STATE_SIGNALED] = NULL;
|
||||
e->state = flags & STATE_SIGNALED;
|
||||
e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_init(&e->cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
void event_wait(struct event *e, unsigned int for_state)
|
||||
{
|
||||
unsigned int last_state;
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_lock(&e->cl);
|
||||
last_state = e->state;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
|
||||
#endif
|
||||
|
||||
if(e->automatic != 0)
|
||||
{
|
||||
/* wait for false always satisfied by definition
|
||||
or if it just changed to false */
|
||||
if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
|
||||
{
|
||||
/* automatic - unsignal */
|
||||
e->state = STATE_NONSIGNALED;
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&e->cl);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
/* block until state matches */
|
||||
}
|
||||
else if(for_state == last_state)
|
||||
{
|
||||
/* the state being waited for is the current state */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&e->cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
e->state = last_state;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
/* current state does not match wait-for state */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
||||
cores[core].blk_ops.cl_p = &e->cl;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
const unsigned int core = CURRENT_CORE;
|
||||
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
||||
cores[core].blk_ops.var_u8p = &e->state;
|
||||
cores[core].blk_ops.var_u8v = last_state;
|
||||
#endif
|
||||
block_thread_no_listlock(&e->queues[for_state]);
|
||||
}
|
||||
}
|
||||
|
||||
void event_set_state(struct event *e, unsigned int state)
|
||||
{
|
||||
unsigned int last_state;
|
||||
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_lock(&e->cl);
|
||||
last_state = e->state;
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
|
||||
#endif
|
||||
|
||||
if(last_state == state)
|
||||
{
|
||||
/* no change */
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&e->cl);
|
||||
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
||||
e->state = last_state;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
if(state == STATE_SIGNALED)
|
||||
{
|
||||
if(e->automatic != 0)
|
||||
{
|
||||
struct thread_entry *thread;
|
||||
/* no thread should have ever blocked for unsignaled */
|
||||
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL,
|
||||
"set_event_state->queue[NS]:S");
|
||||
/* pass to next thread and keep unsignaled - "pulse" */
|
||||
thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
|
||||
e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* release all threads waiting for signaled */
|
||||
thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
|
||||
e->state = STATE_SIGNALED;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* release all threads waiting for unsignaled */
|
||||
|
||||
/* no thread should have ever blocked if automatic */
|
||||
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED].queue == NULL ||
|
||||
e->automatic == 0, "set_event_state->queue[NS]:NS");
|
||||
|
||||
thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
|
||||
e->state = STATE_NONSIGNALED;
|
||||
}
|
||||
|
||||
#if CONFIG_CORELOCK == SW_CORELOCK
|
||||
corelock_unlock(&e->cl);
|
||||
#endif
|
||||
}
|
||||
#endif /* HAVE_EVENT_OBJECTS */
|
||||
|
|
|
@ -1230,7 +1230,7 @@ static void mpeg_thread(void)
|
|||
{
|
||||
static int pause_tick = 0;
|
||||
static unsigned int pause_track = 0;
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
int len;
|
||||
int free_space_left;
|
||||
int unplayed_space_left;
|
||||
|
@ -2910,8 +2910,9 @@ void audio_init(void)
|
|||
queue_init(&mpeg_queue, true);
|
||||
#endif /* !SIMULATOR */
|
||||
create_thread(mpeg_thread, mpeg_stack,
|
||||
sizeof(mpeg_stack), mpeg_thread_name IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
sizeof(mpeg_stack), 0, mpeg_thread_name
|
||||
IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU));
|
||||
|
||||
memset(trackdata, sizeof(trackdata), 0);
|
||||
|
||||
|
|
|
@ -213,8 +213,8 @@ enum
|
|||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct event_queue pcmrec_queue;
|
||||
static struct queue_sender_list pcmrec_queue_send;
|
||||
static struct event_queue pcmrec_queue NOCACHEBSS_ATTR;
|
||||
static struct queue_sender_list pcmrec_queue_send NOCACHEBSS_ATTR;
|
||||
static long pcmrec_stack[3*DEFAULT_STACK_SIZE/sizeof(long)];
|
||||
static const char pcmrec_thread_name[] = "pcmrec";
|
||||
static struct thread_entry *pcmrec_thread_p;
|
||||
|
@ -365,8 +365,8 @@ void pcm_rec_init(void)
|
|||
queue_enable_queue_send(&pcmrec_queue, &pcmrec_queue_send);
|
||||
pcmrec_thread_p =
|
||||
create_thread(pcmrec_thread, pcmrec_stack, sizeof(pcmrec_stack),
|
||||
pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
|
||||
IF_COP(, CPU, false));
|
||||
0, pcmrec_thread_name IF_PRIO(, PRIORITY_RECORDING)
|
||||
IF_COP(, CPU));
|
||||
} /* pcm_rec_init */
|
||||
|
||||
/** audio_* group **/
|
||||
|
@ -1437,7 +1437,7 @@ static void pcmrec_resume(void)
|
|||
static void pcmrec_thread(void) __attribute__((noreturn));
|
||||
static void pcmrec_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
logf("thread pcmrec start");
|
||||
|
||||
|
|
|
@ -1103,9 +1103,9 @@ void powermgmt_init(void)
|
|||
{
|
||||
/* init history to 0 */
|
||||
memset(power_history, 0x00, sizeof(power_history));
|
||||
create_thread(power_thread, power_stack, sizeof(power_stack),
|
||||
create_thread(power_thread, power_stack, sizeof(power_stack), 0,
|
||||
power_thread_name IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
}
|
||||
|
||||
#endif /* SIMULATOR */
|
||||
|
|
|
@ -63,8 +63,8 @@ void rolo_restart_cop(void)
|
|||
{
|
||||
/* There should be free thread slots aplenty */
|
||||
create_thread(rolo_restart_cop, cop_idlestackbegin, IDLE_STACK_SIZE,
|
||||
"rolo COP" IF_PRIO(, PRIORITY_REALTIME)
|
||||
IF_COP(, COP, false));
|
||||
0, "rolo COP" IF_PRIO(, PRIORITY_REALTIME)
|
||||
IF_COP(, COP));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ struct scrollinfo lcd_scroll[LCD_SCROLLABLE_LINES];
|
|||
|
||||
#ifdef HAVE_REMOTE_LCD
|
||||
struct scrollinfo lcd_remote_scroll[LCD_REMOTE_SCROLLABLE_LINES];
|
||||
struct event_queue scroll_queue;
|
||||
struct event_queue scroll_queue NOCACHEBSS_ATTR;
|
||||
#endif
|
||||
|
||||
struct scroll_screen_info lcd_scroll_info =
|
||||
|
@ -150,7 +150,7 @@ static void sync_display_ticks(void)
|
|||
|
||||
static bool scroll_process_message(int delay)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
do
|
||||
{
|
||||
|
@ -268,7 +268,7 @@ void scroll_init(void)
|
|||
queue_init(&scroll_queue, true);
|
||||
#endif
|
||||
create_thread(scroll_thread, scroll_stack,
|
||||
sizeof(scroll_stack), scroll_name
|
||||
sizeof(scroll_stack), 0, scroll_name
|
||||
IF_PRIO(, PRIORITY_USER_INTERFACE)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
}
|
||||
|
|
|
@ -35,6 +35,13 @@ long cpu_frequency NOCACHEBSS_ATTR = CPU_FREQ;
|
|||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
static int boost_counter NOCACHEBSS_ATTR = 0;
|
||||
static bool cpu_idle NOCACHEBSS_ATTR = false;
|
||||
#if NUM_CORES > 1
|
||||
struct spinlock boostctrl_spin NOCACHEBSS_ATTR;
|
||||
void cpu_boost_init(void)
|
||||
{
|
||||
spinlock_init(&boostctrl_spin, SPINLOCK_NO_TASK_SWITCH);
|
||||
}
|
||||
#endif
|
||||
|
||||
int get_cpu_boost_counter(void)
|
||||
{
|
||||
|
@ -52,25 +59,51 @@ int cpu_boost_log_getcount(void)
|
|||
}
|
||||
char * cpu_boost_log_getlog_first(void)
|
||||
{
|
||||
char *first;
|
||||
#if NUM_CORES > 1
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
first = NULL;
|
||||
|
||||
if (cpu_boost_calls_count)
|
||||
{
|
||||
cpu_boost_track_message = 1;
|
||||
return cpu_boost_calls[cpu_boost_first];
|
||||
first = cpu_boost_calls[cpu_boost_first];
|
||||
}
|
||||
else return NULL;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
spinlock_unlock(&boostctrl_spin);
|
||||
#endif
|
||||
}
|
||||
char * cpu_boost_log_getlog_next(void)
|
||||
{
|
||||
int message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG;
|
||||
int message;
|
||||
char *next;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG;
|
||||
next = NULL;
|
||||
|
||||
if (cpu_boost_track_message < cpu_boost_calls_count)
|
||||
{
|
||||
cpu_boost_track_message++;
|
||||
return cpu_boost_calls[message];
|
||||
next = cpu_boost_calls[message];
|
||||
}
|
||||
else return NULL;
|
||||
|
||||
#if NUM_CORES > 1
|
||||
spinlock_unlock(&boostctrl_spin);
|
||||
#endif
|
||||
}
|
||||
void cpu_boost_(bool on_off, char* location, int line)
|
||||
{
|
||||
#if NUM_CORES > 1
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
if (cpu_boost_calls_count == MAX_BOOST_LOG)
|
||||
{
|
||||
cpu_boost_first = (cpu_boost_first+1)%MAX_BOOST_LOG;
|
||||
|
@ -88,32 +121,46 @@ void cpu_boost_(bool on_off, char* location, int line)
|
|||
#else
|
||||
void cpu_boost(bool on_off)
|
||||
{
|
||||
#if NUM_CORES > 1
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
#endif /* CPU_BOOST_LOGGING */
|
||||
if(on_off)
|
||||
{
|
||||
/* Boost the frequency if not already boosted */
|
||||
if(boost_counter++ == 0)
|
||||
if(++boost_counter == 1)
|
||||
set_cpu_frequency(CPUFREQ_MAX);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Lower the frequency if the counter reaches 0 */
|
||||
if(--boost_counter == 0)
|
||||
if(--boost_counter <= 0)
|
||||
{
|
||||
if(cpu_idle)
|
||||
set_cpu_frequency(CPUFREQ_DEFAULT);
|
||||
else
|
||||
set_cpu_frequency(CPUFREQ_NORMAL);
|
||||
}
|
||||
|
||||
/* Safety measure */
|
||||
if(boost_counter < 0)
|
||||
boost_counter = 0;
|
||||
/* Safety measure */
|
||||
if (boost_counter < 0)
|
||||
{
|
||||
boost_counter = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
spinlock_unlock(&boostctrl_spin);
|
||||
#endif
|
||||
}
|
||||
|
||||
void cpu_idle_mode(bool on_off)
|
||||
{
|
||||
#if NUM_CORES > 1
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
cpu_idle = on_off;
|
||||
|
||||
/* We need to adjust the frequency immediately if the CPU
|
||||
|
@ -125,6 +172,10 @@ void cpu_idle_mode(bool on_off)
|
|||
else
|
||||
set_cpu_frequency(CPUFREQ_NORMAL);
|
||||
}
|
||||
|
||||
#if NUM_CORES > 1
|
||||
spinlock_unlock(&boostctrl_spin);
|
||||
#endif
|
||||
}
|
||||
#endif /* HAVE_ADJUSTABLE_CPU_FREQ */
|
||||
|
||||
|
@ -199,6 +250,7 @@ void UIE(unsigned int pc, unsigned int num)
|
|||
/* TODO: perhaps add button handling in here when we get a polling
|
||||
driver some day.
|
||||
*/
|
||||
core_idle();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -132,18 +132,18 @@ static int pp_i2c_send_byte(unsigned int addr, int data0)
|
|||
}
|
||||
|
||||
/* Public functions */
|
||||
static struct mutex i2c_mutex;
|
||||
struct spinlock i2c_spin NOCACHEBSS_ATTR;
|
||||
|
||||
int i2c_readbytes(unsigned int dev_addr, int addr, int len, unsigned char *data) {
|
||||
unsigned int temp;
|
||||
int i;
|
||||
spinlock_lock(&i2c_mutex);
|
||||
spinlock_lock(&i2c_spin);
|
||||
pp_i2c_send_byte(dev_addr, addr);
|
||||
for (i = 0; i < len; i++) {
|
||||
pp_i2c_read_byte(dev_addr, &temp);
|
||||
data[i] = temp;
|
||||
}
|
||||
spinlock_unlock(&i2c_mutex);
|
||||
spinlock_unlock(&i2c_spin);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -151,10 +151,10 @@ int i2c_readbyte(unsigned int dev_addr, int addr)
|
|||
{
|
||||
int data;
|
||||
|
||||
spinlock_lock(&i2c_mutex);
|
||||
spinlock_lock(&i2c_spin);
|
||||
pp_i2c_send_byte(dev_addr, addr);
|
||||
pp_i2c_read_byte(dev_addr, &data);
|
||||
spinlock_unlock(&i2c_mutex);
|
||||
spinlock_unlock(&i2c_spin);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
@ -167,9 +167,9 @@ int pp_i2c_send(unsigned int addr, int data0, int data1)
|
|||
data[0] = data0;
|
||||
data[1] = data1;
|
||||
|
||||
spinlock_lock(&i2c_mutex);
|
||||
spinlock_lock(&i2c_spin);
|
||||
retval = pp_i2c_send_bytes(addr, 2, data);
|
||||
spinlock_unlock(&i2c_mutex);
|
||||
spinlock_unlock(&i2c_spin);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ void i2c_init(void)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
spinlock_init(&i2c_mutex);
|
||||
spinlock_init(&i2c_spin IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
|
||||
i2c_readbyte(0x8, 0);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "hwcompat.h"
|
||||
#include "kernel.h"
|
||||
|
||||
static struct mutex adc_mutex NOCACHEBSS_ATTR;
|
||||
static struct spinlock adc_spin NOCACHEBSS_ATTR;
|
||||
|
||||
/* used in the 2nd gen ADC interrupt */
|
||||
static unsigned int_data;
|
||||
|
@ -33,7 +33,7 @@ unsigned short adc_scan(int channel)
|
|||
unsigned short data = 0;
|
||||
|
||||
(void)channel; /* there is only one */
|
||||
spinlock_lock(&adc_mutex);
|
||||
spinlock_lock(&adc_spin);
|
||||
|
||||
if ((IPOD_HW_REVISION >> 16) == 1)
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ unsigned short adc_scan(int channel)
|
|||
|
||||
data = int_data & 0xff;
|
||||
}
|
||||
spinlock_unlock(&adc_mutex);
|
||||
spinlock_unlock(&adc_spin);
|
||||
return data;
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ void ipod_2g_adc_int(void)
|
|||
|
||||
void adc_init(void)
|
||||
{
|
||||
spinlock_init(&adc_mutex);
|
||||
spinlock_init(&adc_spin IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
|
||||
GPIOB_ENABLE |= 0x1e; /* enable B1..B4 */
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#include "i2c-pp.h"
|
||||
#include "as3514.h"
|
||||
|
||||
static struct mutex adc_mutex NOCACHEBSS_ATTR;
|
||||
|
||||
/* Read 10-bit channel data */
|
||||
unsigned short adc_read(int channel)
|
||||
{
|
||||
|
@ -30,7 +28,7 @@ unsigned short adc_read(int channel)
|
|||
|
||||
if ((unsigned)channel < NUM_ADC_CHANNELS)
|
||||
{
|
||||
spinlock_lock(&adc_mutex);
|
||||
spinlock_lock(&i2c_spin);
|
||||
|
||||
/* Select channel */
|
||||
if (pp_i2c_send( AS3514_I2C_ADDR, ADC_0, (channel << 4)) >= 0)
|
||||
|
@ -44,7 +42,7 @@ unsigned short adc_read(int channel)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&adc_mutex);
|
||||
spinlock_unlock(&i2c_spin);
|
||||
}
|
||||
|
||||
return data;
|
||||
|
@ -52,5 +50,4 @@ unsigned short adc_read(int channel)
|
|||
|
||||
void adc_init(void)
|
||||
{
|
||||
spinlock_init(&adc_mutex);
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ static struct sd_card_status sd_status[NUM_VOLUMES] =
|
|||
/* Shoot for around 75% usage */
|
||||
static long sd_stack [(DEFAULT_STACK_SIZE*2 + 0x1c0)/sizeof(long)];
|
||||
static const char sd_thread_name[] = "ata/sd";
|
||||
static struct mutex sd_mtx;
|
||||
static struct spinlock sd_spin NOCACHEBSS_ATTR;
|
||||
static struct event_queue sd_queue;
|
||||
|
||||
/* Posted when card plugged status has changed */
|
||||
|
@ -801,7 +801,7 @@ int ata_read_sectors(IF_MV2(int drive,) unsigned long start, int incount,
|
|||
|
||||
/* TODO: Add DMA support. */
|
||||
|
||||
spinlock_lock(&sd_mtx);
|
||||
spinlock_lock(&sd_spin);
|
||||
|
||||
ata_led(true);
|
||||
|
||||
|
@ -888,7 +888,7 @@ ata_read_retry:
|
|||
while (1)
|
||||
{
|
||||
ata_led(false);
|
||||
spinlock_unlock(&sd_mtx);
|
||||
spinlock_unlock(&sd_spin);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -916,7 +916,7 @@ int ata_write_sectors(IF_MV2(int drive,) unsigned long start, int count,
|
|||
const unsigned char *buf, *buf_end;
|
||||
int bank;
|
||||
|
||||
spinlock_lock(&sd_mtx);
|
||||
spinlock_lock(&sd_spin);
|
||||
|
||||
ata_led(true);
|
||||
|
||||
|
@ -1016,7 +1016,7 @@ ata_write_retry:
|
|||
while (1)
|
||||
{
|
||||
ata_led(false);
|
||||
spinlock_unlock(&sd_mtx);
|
||||
spinlock_unlock(&sd_spin);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -1034,7 +1034,7 @@ ata_write_error:
|
|||
static void sd_thread(void) __attribute__((noreturn));
|
||||
static void sd_thread(void)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
bool idle_notified = false;
|
||||
|
||||
while (1)
|
||||
|
@ -1050,10 +1050,9 @@ static void sd_thread(void)
|
|||
|
||||
/* Lock to keep us from messing with this variable while an init
|
||||
may be in progress */
|
||||
spinlock_lock(&sd_mtx);
|
||||
spinlock_lock(&sd_spin);
|
||||
card_info[1].initialized = 0;
|
||||
sd_status[1].retry = 0;
|
||||
spinlock_unlock(&sd_mtx);
|
||||
|
||||
/* Either unmount because the card was pulled or unmount and
|
||||
remount if already mounted since multiple messages may be
|
||||
|
@ -1073,6 +1072,8 @@ static void sd_thread(void)
|
|||
|
||||
if (action != SDA_NONE)
|
||||
queue_broadcast(SYS_FS_CHANGED, 0);
|
||||
|
||||
spinlock_unlock(&sd_spin);
|
||||
break;
|
||||
} /* SD_HOTSWAP */
|
||||
#endif /* HAVE_HOTSWAP */
|
||||
|
@ -1155,9 +1156,9 @@ int ata_init(void)
|
|||
{
|
||||
initialized = true;
|
||||
|
||||
spinlock_init(&sd_mtx);
|
||||
spinlock_init(&sd_spin IF_COP(, SPINLOCK_TASK_SWITCH));
|
||||
|
||||
spinlock_lock(&sd_mtx);
|
||||
spinlock_lock(&sd_spin);
|
||||
|
||||
/* init controller */
|
||||
outl(inl(0x70000088) & ~(0x4), 0x70000088);
|
||||
|
@ -1181,8 +1182,8 @@ int ata_init(void)
|
|||
ret = currcard->initialized;
|
||||
|
||||
queue_init(&sd_queue, true);
|
||||
create_thread(sd_thread, sd_stack, sizeof(sd_stack),
|
||||
sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU, false));
|
||||
create_thread(sd_thread, sd_stack, sizeof(sd_stack), 0,
|
||||
sd_thread_name IF_PRIO(, PRIORITY_SYSTEM) IF_COP(, CPU));
|
||||
|
||||
/* enable interupt for the mSD card */
|
||||
sleep(HZ/10);
|
||||
|
@ -1195,7 +1196,7 @@ int ata_init(void)
|
|||
GPIOA_INT_CLR = 0x80;
|
||||
GPIOA_INT_EN |= 0x80;
|
||||
#endif
|
||||
spinlock_unlock(&sd_mtx);
|
||||
spinlock_unlock(&sd_spin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -21,10 +21,6 @@
|
|||
#include "i2s.h"
|
||||
#include "i2c-pp.h"
|
||||
|
||||
#if NUM_CORES > 1
|
||||
struct mutex boostctrl_mtx NOCACHEBSS_ATTR;
|
||||
#endif
|
||||
|
||||
#ifndef BOOTLOADER
|
||||
extern void TIMER1(void);
|
||||
extern void TIMER2(void);
|
||||
|
@ -129,16 +125,42 @@ static void init_cache(void)
|
|||
}
|
||||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
void scale_suspend_core(bool suspend) ICODE_ATTR;
|
||||
void scale_suspend_core(bool suspend)
|
||||
{
|
||||
unsigned int core = CURRENT_CORE;
|
||||
unsigned int othercore = 1 - core;
|
||||
static unsigned long proc_bits IBSS_ATTR;
|
||||
static int oldstatus IBSS_ATTR;
|
||||
|
||||
if (suspend)
|
||||
{
|
||||
oldstatus = set_interrupt_status(IRQ_FIQ_DISABLED, IRQ_FIQ_STATUS);
|
||||
proc_bits = PROC_CTL(othercore) & 0xc0000000;
|
||||
PROC_CTL(othercore) = 0x40000000; nop;
|
||||
PROC_CTL(core) = 0x48000003; nop;
|
||||
}
|
||||
else
|
||||
{
|
||||
PROC_CTL(core) = 0x4800001f; nop;
|
||||
if (proc_bits == 0)
|
||||
PROC_CTL(othercore) = 0;
|
||||
set_interrupt_status(oldstatus, IRQ_FIQ_STATUS);
|
||||
}
|
||||
}
|
||||
|
||||
void set_cpu_frequency(long frequency) ICODE_ATTR;
|
||||
void set_cpu_frequency(long frequency)
|
||||
#else
|
||||
static void pp_set_cpu_frequency(long frequency)
|
||||
#endif
|
||||
{
|
||||
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
|
||||
/* Using mutex or spinlock isn't safe here. */
|
||||
while (test_and_set(&boostctrl_mtx.locked, 1)) ;
|
||||
spinlock_lock(&boostctrl_spin);
|
||||
#endif
|
||||
|
||||
scale_suspend_core(true);
|
||||
|
||||
cpu_frequency = frequency;
|
||||
|
||||
switch (frequency)
|
||||
|
@ -149,17 +171,20 @@ static void pp_set_cpu_frequency(long frequency)
|
|||
* have this limitation (and the post divider?) */
|
||||
case CPUFREQ_MAX:
|
||||
CLOCK_SOURCE = 0x10007772; /* source #1: 24MHz, #2, #3, #4: PLL */
|
||||
DEV_TIMING1 = 0x00000808;
|
||||
DEV_TIMING1 = 0x00000303;
|
||||
#if CONFIG_CPU == PP5020
|
||||
PLL_CONTROL = 0x8a020a03; /* 10/3 * 24MHz */
|
||||
PLL_STATUS = 0xd19b; /* unlock frequencies > 66MHz */
|
||||
PLL_CONTROL = 0x8a020a03; /* repeat setup */
|
||||
scale_suspend_core(false);
|
||||
udelay(500); /* wait for relock */
|
||||
#elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024)
|
||||
PLL_CONTROL = 0x8a121403; /* (20/3 * 24MHz) / 2 */
|
||||
scale_suspend_core(false);
|
||||
udelay(250);
|
||||
while (!(PLL_STATUS & 0x80000000)); /* wait for relock */
|
||||
#endif
|
||||
scale_suspend_core(true);
|
||||
break;
|
||||
|
||||
case CPUFREQ_NORMAL:
|
||||
|
@ -167,18 +192,23 @@ static void pp_set_cpu_frequency(long frequency)
|
|||
DEV_TIMING1 = 0x00000303;
|
||||
#if CONFIG_CPU == PP5020
|
||||
PLL_CONTROL = 0x8a020504; /* 5/4 * 24MHz */
|
||||
scale_suspend_core(false);
|
||||
udelay(500); /* wait for relock */
|
||||
#elif (CONFIG_CPU == PP5022) || (CONFIG_CPU == PP5024)
|
||||
PLL_CONTROL = 0x8a220501; /* (5/1 * 24MHz) / 4 */
|
||||
scale_suspend_core(false);
|
||||
udelay(250);
|
||||
while (!(PLL_STATUS & 0x80000000)); /* wait for relock */
|
||||
#endif
|
||||
scale_suspend_core(true);
|
||||
break;
|
||||
|
||||
case CPUFREQ_SLEEP:
|
||||
CLOCK_SOURCE = 0x10002202; /* source #2: 32kHz, #1, #3, #4: 24MHz */
|
||||
PLL_CONTROL &= ~0x80000000; /* disable PLL */
|
||||
scale_suspend_core(false);
|
||||
udelay(10000); /* let 32kHz source stabilize? */
|
||||
scale_suspend_core(true);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -186,12 +216,19 @@ static void pp_set_cpu_frequency(long frequency)
|
|||
DEV_TIMING1 = 0x00000303;
|
||||
PLL_CONTROL &= ~0x80000000; /* disable PLL */
|
||||
cpu_frequency = CPUFREQ_DEFAULT;
|
||||
PROC_CTL(CURRENT_CORE) = 0x4800001f; nop;
|
||||
break;
|
||||
}
|
||||
|
||||
if (frequency == CPUFREQ_MAX)
|
||||
DEV_TIMING1 = 0x00000808;
|
||||
|
||||
CLOCK_SOURCE = (CLOCK_SOURCE & ~0xf0000000) | 0x20000000; /* select source #2 */
|
||||
|
||||
scale_suspend_core(false);
|
||||
|
||||
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
|
||||
boostctrl_mtx.locked = 0;
|
||||
spinlock_unlock(&boostctrl_spin);
|
||||
#endif
|
||||
}
|
||||
#endif /* !BOOTLOADER */
|
||||
|
@ -256,7 +293,7 @@ void system_init(void)
|
|||
|
||||
#ifdef HAVE_ADJUSTABLE_CPU_FREQ
|
||||
#if NUM_CORES > 1
|
||||
spinlock_init(&boostctrl_mtx);
|
||||
cpu_boost_init();
|
||||
#endif
|
||||
#else
|
||||
pp_set_cpu_frequency(CPUFREQ_MAX);
|
||||
|
|
|
@ -46,6 +46,10 @@
|
|||
#define inw(a) (*(volatile unsigned short *) (a))
|
||||
#define outw(a,b) (*(volatile unsigned short *) (b) = (a))
|
||||
|
||||
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && NUM_CORES > 1
|
||||
extern struct spinlock boostctrl_spin;
|
||||
#endif
|
||||
|
||||
static inline void udelay(unsigned usecs)
|
||||
{
|
||||
unsigned stop = USEC_TIMER + usecs;
|
||||
|
@ -107,7 +111,6 @@ void flush_icache(void);
|
|||
|
||||
#endif /* CPU_PP502x */
|
||||
|
||||
|
||||
#endif /* CPU_PP */
|
||||
|
||||
#endif /* SYSTEM_TARGET_H */
|
||||
|
|
|
@ -708,7 +708,7 @@ int main(void)
|
|||
|
||||
|
||||
|
||||
create_thread(mpeg_thread, stack - 0x2000, 0x4000);
|
||||
create_thread(mpeg_thread, stack - 0x2000, 0x4000, 0);
|
||||
|
||||
|
||||
|
||||
|
@ -1004,7 +1004,7 @@ void mpeg_thread(void)
|
|||
|
||||
{
|
||||
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
int len;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ int main(void)
|
|||
char buf[40];
|
||||
char str[32];
|
||||
int i=0;
|
||||
struct event *ev;
|
||||
struct queue_event *ev;
|
||||
|
||||
/* Clear it all! */
|
||||
SSR1 &= ~(SCI_RDRF | SCI_ORER | SCI_PER | SCI_FER);
|
||||
|
@ -69,8 +69,8 @@ int main(void)
|
|||
|
||||
queue_init(&main_q);
|
||||
|
||||
create_thread(t1, s1, 1024);
|
||||
create_thread(t2, s2, 1024);
|
||||
create_thread(t1, s1, 1024, 0);
|
||||
create_thread(t2, s2, 1024, 0);
|
||||
|
||||
while(1)
|
||||
{
|
||||
|
|
2437
firmware/thread.c
2437
firmware/thread.c
File diff suppressed because it is too large
Load diff
|
@ -66,7 +66,7 @@ static int usb_mmc_countdown = 0;
|
|||
static long usb_stack[(DEFAULT_STACK_SIZE + 0x800)/sizeof(long)];
|
||||
static const char usb_thread_name[] = "usb";
|
||||
#endif
|
||||
static struct event_queue usb_queue;
|
||||
static struct event_queue usb_queue NOCACHEBSS_ATTR;
|
||||
static int last_usb_status;
|
||||
static bool usb_monitor_enabled;
|
||||
|
||||
|
@ -119,7 +119,7 @@ static void usb_thread(void)
|
|||
{
|
||||
int num_acks_to_expect = -1;
|
||||
bool waiting_for_ack;
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
waiting_for_ack = false;
|
||||
|
||||
|
@ -307,9 +307,9 @@ void usb_init(void)
|
|||
#ifndef BOOTLOADER
|
||||
queue_init(&usb_queue, true);
|
||||
|
||||
create_thread(usb_thread, usb_stack, sizeof(usb_stack),
|
||||
create_thread(usb_thread, usb_stack, sizeof(usb_stack), 0,
|
||||
usb_thread_name IF_PRIO(, PRIORITY_SYSTEM)
|
||||
IF_COP(, CPU, false));
|
||||
IF_COP(, CPU));
|
||||
|
||||
tick_add_task(usb_tick);
|
||||
#endif
|
||||
|
@ -318,7 +318,7 @@ void usb_init(void)
|
|||
|
||||
void usb_wait_for_disconnect(struct event_queue *q)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
/* Don't return until we get SYS_USB_DISCONNECTED */
|
||||
while(1)
|
||||
|
@ -334,7 +334,7 @@ void usb_wait_for_disconnect(struct event_queue *q)
|
|||
|
||||
int usb_wait_for_disconnect_w_tmo(struct event_queue *q, int ticks)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
/* Don't return until we get SYS_USB_DISCONNECTED or SYS_TIMEOUT */
|
||||
while(1)
|
||||
|
|
|
@ -743,7 +743,7 @@ int button_queue_count( void )
|
|||
|
||||
long button_get(bool block)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
|
||||
if ( block || !queue_empty(&button_queue) ) {
|
||||
queue_wait(&button_queue, &ev);
|
||||
|
@ -755,7 +755,7 @@ long button_get(bool block)
|
|||
|
||||
long button_get_w_tmo(int ticks)
|
||||
{
|
||||
struct event ev;
|
||||
struct queue_event ev;
|
||||
queue_wait_w_tmo(&button_queue, &ev, ticks);
|
||||
if (ev.id == SYS_TIMEOUT)
|
||||
ev.id = BUTTON_NONE;
|
||||
|
|
|
@ -29,7 +29,7 @@ volatile long current_tick = 0;
|
|||
static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
|
||||
|
||||
/* This array holds all queues that are initiated. It is used for broadcast. */
|
||||
static struct event_queue *all_queues[32];
|
||||
static struct event_queue *all_queues[MAX_NUM_QUEUES];
|
||||
static int num_queues = 0;
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
|
@ -53,7 +53,7 @@ static void queue_release_sender(struct thread_entry **sender,
|
|||
intptr_t retval)
|
||||
{
|
||||
(*sender)->retval = retval;
|
||||
wakeup_thread(sender);
|
||||
wakeup_thread_no_listlock(sender);
|
||||
if(*sender != NULL)
|
||||
{
|
||||
fprintf(stderr, "queue->send slot ovf: %p\n", *sender);
|
||||
|
@ -98,14 +98,14 @@ void queue_init(struct event_queue *q, bool register_queue)
|
|||
{
|
||||
q->read = 0;
|
||||
q->write = 0;
|
||||
q->thread = NULL;
|
||||
thread_queue_init(&q->queue);
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
q->send = NULL; /* No message sending by default */
|
||||
#endif
|
||||
|
||||
if(register_queue)
|
||||
{
|
||||
if(num_queues >= 32)
|
||||
if(num_queues >= MAX_NUM_QUEUES)
|
||||
{
|
||||
fprintf(stderr, "queue_init->out of queues");
|
||||
exit(-1);
|
||||
|
@ -142,7 +142,7 @@ void queue_delete(struct event_queue *q)
|
|||
}
|
||||
|
||||
/* Release threads waiting on queue head */
|
||||
wakeup_thread(&q->thread);
|
||||
thread_queue_wake(&q->queue);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
/* Release waiting threads and reply to any dequeued message
|
||||
|
@ -155,7 +155,7 @@ void queue_delete(struct event_queue *q)
|
|||
q->write = 0;
|
||||
}
|
||||
|
||||
void queue_wait(struct event_queue *q, struct event *ev)
|
||||
void queue_wait(struct event_queue *q, struct queue_event *ev)
|
||||
{
|
||||
unsigned int rd;
|
||||
|
||||
|
@ -169,7 +169,11 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
|||
|
||||
if (q->read == q->write)
|
||||
{
|
||||
block_thread(&q->thread);
|
||||
do
|
||||
{
|
||||
block_thread(&q->queue);
|
||||
}
|
||||
while (q->read == q->write);
|
||||
}
|
||||
|
||||
rd = q->read++ & QUEUE_LENGTH_MASK;
|
||||
|
@ -184,7 +188,7 @@ void queue_wait(struct event_queue *q, struct event *ev)
|
|||
#endif
|
||||
}
|
||||
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
||||
void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
||||
{
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if (q->send && q->send->curr_sender)
|
||||
|
@ -196,7 +200,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
|||
|
||||
if (q->read == q->write && ticks > 0)
|
||||
{
|
||||
block_thread_w_tmo(&q->thread, ticks);
|
||||
block_thread_w_tmo(&q->queue, ticks);
|
||||
}
|
||||
|
||||
if(q->read != q->write)
|
||||
|
@ -238,7 +242,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
}
|
||||
#endif
|
||||
|
||||
wakeup_thread(&q->thread);
|
||||
wakeup_thread(&q->queue);
|
||||
}
|
||||
|
||||
/* Special thread-synced queue_post for button driver or any other preemptive sim thread */
|
||||
|
@ -268,9 +272,9 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
queue_release_sender(spp, 0);
|
||||
}
|
||||
|
||||
wakeup_thread(&q->thread);
|
||||
wakeup_thread(&q->queue);
|
||||
|
||||
block_thread(spp);
|
||||
block_thread_no_listlock(spp);
|
||||
return thread_get_current()->retval;
|
||||
}
|
||||
|
||||
|
@ -370,7 +374,7 @@ int queue_syncbroadcast(long id, intptr_t data)
|
|||
|
||||
void yield(void)
|
||||
{
|
||||
switch_thread(true, NULL);
|
||||
switch_thread(NULL);
|
||||
}
|
||||
|
||||
void sleep(int ticks)
|
||||
|
@ -431,39 +435,218 @@ int tick_remove_task(void (*f)(void))
|
|||
multitasking, but is better than nothing at all */
|
||||
void mutex_init(struct mutex *m)
|
||||
{
|
||||
m->queue = NULL;
|
||||
m->thread = NULL;
|
||||
m->count = 0;
|
||||
m->locked = 0;
|
||||
}
|
||||
|
||||
void mutex_lock(struct mutex *m)
|
||||
{
|
||||
if (test_and_set(&m->locked, 1))
|
||||
struct thread_entry *const thread = thread_get_current();
|
||||
|
||||
if(thread == m->thread)
|
||||
{
|
||||
block_thread(&m->thread);
|
||||
m->count++;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_and_set(&m->locked, 1))
|
||||
{
|
||||
m->thread = thread;
|
||||
return;
|
||||
}
|
||||
|
||||
block_thread_no_listlock(&m->queue);
|
||||
}
|
||||
|
||||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
if (m->thread != NULL)
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
if(m->thread != thread_get_current())
|
||||
{
|
||||
wakeup_thread(&m->thread);
|
||||
fprintf(stderr, "spinlock_unlock->wrong thread");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (m->count > 0)
|
||||
{
|
||||
/* this thread still owns lock */
|
||||
m->count--;
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
||||
m->thread = wakeup_thread_no_listlock(&m->queue);
|
||||
|
||||
if (m->thread == NULL)
|
||||
{
|
||||
/* release lock */
|
||||
m->locked = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void spinlock_lock(struct mutex *l)
|
||||
void spinlock_init(struct spinlock *l)
|
||||
{
|
||||
l->locked = 0;
|
||||
l->thread = NULL;
|
||||
l->count = 0;
|
||||
}
|
||||
|
||||
void spinlock_lock(struct spinlock *l)
|
||||
{
|
||||
struct thread_entry *const thread = thread_get_current();
|
||||
|
||||
if (l->thread == thread)
|
||||
{
|
||||
l->count++;
|
||||
return;
|
||||
}
|
||||
|
||||
while(test_and_set(&l->locked, 1))
|
||||
{
|
||||
switch_thread(true, NULL);
|
||||
switch_thread(NULL);
|
||||
}
|
||||
|
||||
l->thread = thread;
|
||||
}
|
||||
|
||||
void spinlock_unlock(struct spinlock *l)
|
||||
{
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
if(l->thread != thread_get_current())
|
||||
{
|
||||
fprintf(stderr, "spinlock_unlock->wrong thread");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (l->count > 0)
|
||||
{
|
||||
/* this thread still owns lock */
|
||||
l->count--;
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear owner */
|
||||
l->thread = NULL;
|
||||
l->locked = 0;
|
||||
}
|
||||
|
||||
void semaphore_init(struct semaphore *s, int max, int start)
|
||||
{
|
||||
if(max <= 0 || start < 0 || start > max)
|
||||
{
|
||||
fprintf(stderr, "semaphore_init->inv arg");
|
||||
exit(-1);
|
||||
}
|
||||
s->queue = NULL;
|
||||
s->max = max;
|
||||
s->count = start;
|
||||
}
|
||||
|
||||
void semaphore_wait(struct semaphore *s)
|
||||
{
|
||||
if(--s->count >= 0)
|
||||
return;
|
||||
block_thread_no_listlock(&s->queue);
|
||||
}
|
||||
|
||||
void semaphore_release(struct semaphore *s)
|
||||
{
|
||||
if(s->count < s->max)
|
||||
{
|
||||
if(++s->count <= 0)
|
||||
{
|
||||
if(s->queue == NULL)
|
||||
{
|
||||
/* there should be threads in this queue */
|
||||
fprintf(stderr, "semaphore->wakeup");
|
||||
exit(-1);
|
||||
}
|
||||
/* a thread was queued - wake it up */
|
||||
wakeup_thread_no_listlock(&s->queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void spinlock_unlock(struct mutex *l)
|
||||
void event_init(struct event *e, unsigned int flags)
|
||||
{
|
||||
l->locked = 0;
|
||||
e->queues[STATE_NONSIGNALED] = NULL;
|
||||
e->queues[STATE_SIGNALED] = NULL;
|
||||
e->state = flags & STATE_SIGNALED;
|
||||
e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
|
||||
}
|
||||
|
||||
void event_wait(struct event *e, unsigned int for_state)
|
||||
{
|
||||
unsigned int last_state = e->state;
|
||||
|
||||
if(e->automatic != 0)
|
||||
{
|
||||
/* wait for false always satisfied by definition
|
||||
or if it just changed to false */
|
||||
if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
|
||||
{
|
||||
/* automatic - unsignal */
|
||||
e->state = STATE_NONSIGNALED;
|
||||
return;
|
||||
}
|
||||
/* block until state matches */
|
||||
}
|
||||
else if(for_state == last_state)
|
||||
{
|
||||
/* the state being waited for is the current state */
|
||||
return;
|
||||
}
|
||||
|
||||
/* current state does not match wait-for state */
|
||||
block_thread_no_listlock(&e->queues[for_state]);
|
||||
}
|
||||
|
||||
void event_set_state(struct event *e, unsigned int state)
|
||||
{
|
||||
unsigned int last_state = e->state;
|
||||
|
||||
if(last_state == state)
|
||||
{
|
||||
/* no change */
|
||||
return;
|
||||
}
|
||||
|
||||
if(state == STATE_SIGNALED)
|
||||
{
|
||||
if(e->automatic != 0)
|
||||
{
|
||||
struct thread_entry *thread;
|
||||
|
||||
if(e->queues[STATE_NONSIGNALED] != NULL)
|
||||
{
|
||||
/* no thread should have ever blocked for nonsignaled */
|
||||
fprintf(stderr, "set_event_state->queue[NS]:S");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* pass to next thread and keep unsignaled - "pulse" */
|
||||
thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
|
||||
e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* release all threads waiting for signaled */
|
||||
thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
|
||||
e->state = STATE_SIGNALED;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* release all threads waiting for unsignaled */
|
||||
if(e->queues[STATE_NONSIGNALED] != NULL && e->automatic != 0)
|
||||
{
|
||||
/* no thread should have ever blocked */
|
||||
fprintf(stderr, "set_event_state->queue[NS]:NS");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
|
||||
e->state = STATE_NONSIGNALED;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ bool thread_sdl_init(void *param)
|
|||
running->stack = " ";
|
||||
running->stack_size = 8;
|
||||
running->name = "main";
|
||||
running->statearg = STATE_RUNNING;
|
||||
running->state = STATE_RUNNING;
|
||||
running->context.c = SDL_CreateCond();
|
||||
|
||||
if (running->context.c == NULL)
|
||||
|
@ -154,65 +154,6 @@ bool thread_sdl_init(void *param)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int find_empty_thread_slot(void)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (n = 0; n < MAXTHREADS; n++)
|
||||
{
|
||||
if (threads[n].name == NULL)
|
||||
break;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static void add_to_list(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (*list == NULL)
|
||||
{
|
||||
/* Insert into unoccupied list */
|
||||
thread->next = thread;
|
||||
thread->prev = thread;
|
||||
*list = thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Insert last */
|
||||
thread->next = *list;
|
||||
thread->prev = (*list)->prev;
|
||||
thread->prev->next = thread;
|
||||
(*list)->prev = thread;
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_from_list(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (thread == thread->next)
|
||||
{
|
||||
/* The only item */
|
||||
*list = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread == *list)
|
||||
{
|
||||
/* List becomes next item */
|
||||
*list = thread->next;
|
||||
}
|
||||
|
||||
/* Fix links to jump over the removed entry. */
|
||||
thread->prev->next = thread->next;
|
||||
thread->next->prev = thread->prev;
|
||||
}
|
||||
|
||||
struct thread_entry *thread_get_current(void)
|
||||
{
|
||||
return running;
|
||||
}
|
||||
|
||||
void thread_sdl_lock(void)
|
||||
{
|
||||
SDL_LockMutex(m);
|
||||
|
@ -223,7 +164,68 @@ void thread_sdl_unlock(void)
|
|||
SDL_UnlockMutex(m);
|
||||
}
|
||||
|
||||
void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
||||
static int find_empty_thread_slot(void)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (n = 0; n < MAXTHREADS; n++)
|
||||
{
|
||||
int state = threads[n].state;
|
||||
|
||||
if (state == STATE_KILLED)
|
||||
break;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static void add_to_list_l(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (*list == NULL)
|
||||
{
|
||||
/* Insert into unoccupied list */
|
||||
thread->l.next = thread;
|
||||
thread->l.prev = thread;
|
||||
*list = thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Insert last */
|
||||
thread->l.next = *list;
|
||||
thread->l.prev = (*list)->l.prev;
|
||||
thread->l.prev->l.next = thread;
|
||||
(*list)->l.prev = thread;
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_from_list_l(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (thread == thread->l.next)
|
||||
{
|
||||
/* The only item */
|
||||
*list = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread == *list)
|
||||
{
|
||||
/* List becomes next item */
|
||||
*list = thread->l.next;
|
||||
}
|
||||
|
||||
/* Fix links to jump over the removed entry. */
|
||||
thread->l.prev->l.next = thread->l.next;
|
||||
thread->l.next->l.prev = thread->l.prev;
|
||||
}
|
||||
|
||||
struct thread_entry *thread_get_current(void)
|
||||
{
|
||||
return running;
|
||||
}
|
||||
|
||||
void switch_thread(struct thread_entry *old)
|
||||
{
|
||||
struct thread_entry *current = running;
|
||||
|
||||
|
@ -235,7 +237,7 @@ void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
|||
if (threads_exit)
|
||||
remove_thread(NULL);
|
||||
|
||||
(void)save_context; (void)blocked_list;
|
||||
(void)old;
|
||||
}
|
||||
|
||||
void sleep_thread(int ticks)
|
||||
|
@ -244,7 +246,7 @@ void sleep_thread(int ticks)
|
|||
int rem;
|
||||
|
||||
current = running;
|
||||
current->statearg = STATE_SLEEPING;
|
||||
current->state = STATE_SLEEPING;
|
||||
|
||||
rem = (SDL_GetTicks() - start_tick) % (1000/HZ);
|
||||
if (rem < 0)
|
||||
|
@ -267,7 +269,7 @@ void sleep_thread(int ticks)
|
|||
|
||||
running = current;
|
||||
|
||||
current->statearg = STATE_RUNNING;
|
||||
current->state = STATE_RUNNING;
|
||||
|
||||
if (threads_exit)
|
||||
remove_thread(NULL);
|
||||
|
@ -289,10 +291,21 @@ int runthread(void *data)
|
|||
if (setjmp(*current_jmpbuf) == 0)
|
||||
{
|
||||
/* Run the thread routine */
|
||||
current->context.start();
|
||||
THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
|
||||
current - threads, THREAD_SDL_GET_NAME(current));
|
||||
/* Thread routine returned - suicide */
|
||||
if (current->state == STATE_FROZEN)
|
||||
{
|
||||
SDL_CondWait(current->context.c, m);
|
||||
running = current;
|
||||
|
||||
}
|
||||
|
||||
if (!threads_exit)
|
||||
{
|
||||
current->context.start();
|
||||
THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
|
||||
current - threads, THREAD_SDL_GET_NAME(current));
|
||||
/* Thread routine returned - suicide */
|
||||
}
|
||||
|
||||
remove_thread(NULL);
|
||||
}
|
||||
else
|
||||
|
@ -306,7 +319,7 @@ int runthread(void *data)
|
|||
|
||||
struct thread_entry*
|
||||
create_thread(void (*function)(void), void* stack, int stack_size,
|
||||
const char *name)
|
||||
unsigned flags, const char *name)
|
||||
{
|
||||
/** Avoid compiler warnings */
|
||||
SDL_Thread* t;
|
||||
|
@ -340,7 +353,8 @@ struct thread_entry*
|
|||
threads[slot].stack = stack;
|
||||
threads[slot].stack_size = stack_size;
|
||||
threads[slot].name = name;
|
||||
threads[slot].statearg = STATE_RUNNING;
|
||||
threads[slot].state = (flags & CREATE_THREAD_FROZEN) ?
|
||||
STATE_FROZEN : STATE_RUNNING;
|
||||
threads[slot].context.start = function;
|
||||
threads[slot].context.t = t;
|
||||
threads[slot].context.c = cond;
|
||||
|
@ -351,12 +365,13 @@ struct thread_entry*
|
|||
return &threads[slot];
|
||||
}
|
||||
|
||||
void block_thread(struct thread_entry **list)
|
||||
void _block_thread(struct thread_queue *tq)
|
||||
{
|
||||
struct thread_entry *thread = running;
|
||||
|
||||
thread->statearg = STATE_BLOCKED;
|
||||
add_to_list(list, thread);
|
||||
thread->state = STATE_BLOCKED;
|
||||
thread->bqp = tq;
|
||||
add_to_list_l(&tq->queue, thread);
|
||||
|
||||
SDL_CondWait(thread->context.c, m);
|
||||
running = thread;
|
||||
|
@ -365,44 +380,56 @@ void block_thread(struct thread_entry **list)
|
|||
remove_thread(NULL);
|
||||
}
|
||||
|
||||
void block_thread_w_tmo(struct thread_entry **list, int ticks)
|
||||
void block_thread_w_tmo(struct thread_queue *tq, int ticks)
|
||||
{
|
||||
struct thread_entry *thread = running;
|
||||
|
||||
thread->statearg = STATE_BLOCKED_W_TMO;
|
||||
add_to_list(list, thread);
|
||||
thread->state = STATE_BLOCKED_W_TMO;
|
||||
thread->bqp = tq;
|
||||
add_to_list_l(&tq->queue, thread);
|
||||
|
||||
SDL_CondWaitTimeout(thread->context.c, m, (1000/HZ) * ticks);
|
||||
running = thread;
|
||||
|
||||
if (thread->statearg == STATE_BLOCKED_W_TMO)
|
||||
if (thread->state == STATE_BLOCKED_W_TMO)
|
||||
{
|
||||
/* Timed out */
|
||||
remove_from_list(list, thread);
|
||||
thread->statearg = STATE_RUNNING;
|
||||
remove_from_list_l(&tq->queue, thread);
|
||||
thread->state = STATE_RUNNING;
|
||||
}
|
||||
|
||||
if (threads_exit)
|
||||
remove_thread(NULL);
|
||||
}
|
||||
|
||||
void wakeup_thread(struct thread_entry **list)
|
||||
struct thread_entry * _wakeup_thread(struct thread_queue *tq)
|
||||
{
|
||||
struct thread_entry *thread = *list;
|
||||
struct thread_entry *thread = tq->queue;
|
||||
|
||||
if (thread == NULL)
|
||||
{
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
switch (thread->statearg)
|
||||
switch (thread->state)
|
||||
{
|
||||
case STATE_BLOCKED:
|
||||
case STATE_BLOCKED_W_TMO:
|
||||
remove_from_list(list, thread);
|
||||
thread->statearg = STATE_RUNNING;
|
||||
remove_from_list_l(&tq->queue, thread);
|
||||
thread->state = STATE_RUNNING;
|
||||
SDL_CondSignal(thread->context.c);
|
||||
return thread;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void thread_thaw(struct thread_entry *thread)
|
||||
{
|
||||
if (thread->state == STATE_FROZEN)
|
||||
{
|
||||
thread->state = STATE_RUNNING;
|
||||
SDL_CondSignal(thread->context.c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,12 +461,24 @@ void remove_thread(struct thread_entry *thread)
|
|||
thread->context.t = NULL;
|
||||
|
||||
if (thread != current)
|
||||
{
|
||||
switch (thread->state)
|
||||
{
|
||||
case STATE_BLOCKED:
|
||||
case STATE_BLOCKED_W_TMO:
|
||||
/* Remove thread from object it's waiting on */
|
||||
remove_from_list_l(&thread->bqp->queue, thread);
|
||||
break;
|
||||
}
|
||||
|
||||
SDL_CondSignal(c);
|
||||
}
|
||||
|
||||
THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
|
||||
thread - threads, THREAD_SDL_GET_NAME(thread));
|
||||
|
||||
thread->name = NULL;
|
||||
thread_queue_wake_no_listlock(&thread->queue);
|
||||
thread->state = STATE_KILLED;
|
||||
|
||||
SDL_DestroyCond(c);
|
||||
|
||||
|
@ -453,15 +492,26 @@ void remove_thread(struct thread_entry *thread)
|
|||
SDL_KillThread(t);
|
||||
}
|
||||
|
||||
void thread_wait(struct thread_entry *thread)
|
||||
{
|
||||
if (thread == NULL)
|
||||
thread = running;
|
||||
|
||||
if (thread->state != STATE_KILLED)
|
||||
{
|
||||
block_thread_no_listlock(&thread->queue);
|
||||
}
|
||||
}
|
||||
|
||||
int thread_stack_usage(const struct thread_entry *thread)
|
||||
{
|
||||
return 50;
|
||||
(void)thread;
|
||||
}
|
||||
|
||||
int thread_get_status(const struct thread_entry *thread)
|
||||
unsigned thread_get_status(const struct thread_entry *thread)
|
||||
{
|
||||
return thread->statearg;
|
||||
return thread->state;
|
||||
}
|
||||
|
||||
/* Return name if one or ID if none */
|
||||
|
|
Loading…
Reference in a new issue