Change the thread api a bit.

* Remove THREAD_ID_CURRENT macro in favor of a thread_self() function, this allows thread functions to be simpler.
* thread_self_entry() shortcut for kernel.c.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29521 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Thomas Martitz 2011-03-05 17:48:06 +00:00
parent 0b0f99b18e
commit cc889e9d60
13 changed files with 67 additions and 55 deletions

View file

@ -567,7 +567,7 @@ void codec_thread_resume(void)
bool is_codec_thread(void)
{
return thread_get_current() == codec_thread_id;
return thread_self() == codec_thread_id;
}
#ifdef HAVE_PRIORITY_SCHEDULING

View file

@ -779,6 +779,7 @@ static const struct plugin_api rockbox_api = {
/* new stuff at the end, sort into place next time
the API gets incompatible */
thread_self,
};
int plugin_load(const char* plugin, const void* parameter)

View file

@ -145,7 +145,7 @@ void* plugin_get_buffer(size_t *buffer_size);
#define PLUGIN_MAGIC 0x526F634B /* RocK */
/* increase this every time the api struct changes */
#define PLUGIN_API_VERSION 200
#define PLUGIN_API_VERSION 201
/* update this to latest version if a change to the api struct breaks
backwards compatibility (and please take the opportunity to sort in any
@ -909,6 +909,7 @@ struct plugin_api {
/* new stuff at the end, sort into place next time
the API gets incompatible */
unsigned int (*thread_self)(void);
};
/* plugin header */

View file

@ -468,7 +468,7 @@ static void audio_thread(void)
struct audio_thread_data td;
#ifdef HAVE_PRIORITY_SCHEDULING
/* Up the priority since the core DSP over-yields internally */
int old_priority = rb->thread_set_priority(THREAD_ID_CURRENT,
int old_priority = rb->thread_set_priority(rb->thread_self(),
PRIORITY_PLAYBACK-4);
#endif
@ -514,7 +514,7 @@ static void audio_thread(void)
default:
{
#ifdef HAVE_PRIORITY_SCHEDULING
rb->thread_set_priority(THREAD_ID_CURRENT, old_priority);
rb->thread_set_priority(rb->thread_self(), old_priority);
#endif
return;
}

View file

@ -897,8 +897,8 @@ static void pcmrec_flush(unsigned flush_num)
priority until finished */
logf("pcmrec: boost (%s)",
num >= flood_watermark ? "num" : "time");
prio_pcmrec = thread_set_priority(THREAD_ID_CURRENT,
thread_get_priority(THREAD_ID_CURRENT) - 4);
prio_pcmrec = thread_set_priority(thread_self(),
thread_get_priority(thread_self()) - 4);
prio_codec = codec_thread_set_priority(
codec_thread_get_priority() - 4);
}
@ -950,7 +950,7 @@ static void pcmrec_flush(unsigned flush_num)
{
/* return to original priorities */
logf("pcmrec: unboost priority");
thread_set_priority(THREAD_ID_CURRENT, prio_pcmrec);
thread_set_priority(thread_self(), prio_pcmrec);
codec_thread_set_priority(prio_codec);
}

View file

@ -99,8 +99,7 @@ static void ata_lock_init(struct ata_lock *l)
static void ata_lock_lock(struct ata_lock *l)
{
struct thread_entry * const current =
thread_id_entry(THREAD_ID_CURRENT);
struct thread_entry * const current = thread_self_entry();
if (current == l->thread)
{

View file

@ -323,9 +323,6 @@ struct thread_entry
#define THREAD_ID_SLOT_MASK 0x00ff
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
/* Specify current thread in a function taking an ID. */
#define THREAD_ID_CURRENT ((unsigned int)-1)
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
@ -445,7 +442,13 @@ int thread_get_io_priority(unsigned int thread_id);
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#endif
unsigned int thread_get_current(void);
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread.
* INTERNAL: Intended for use by kernel and not for programs. */
struct thread_entry* thread_self_entry(void);
/* Debugging info - only! */
int thread_stack_usage(const struct thread_entry *thread);

View file

@ -509,7 +509,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
#ifdef HAVE_PRIORITY_SCHEDULING
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
QUEUE_GET_THREAD(q) == thread_self_entry(),
"queue_wait->wrong thread\n");
#endif
@ -527,7 +527,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
if (rd != q->write) /* A waking message could disappear */
break;
current = thread_id_entry(THREAD_ID_CURRENT);
current = thread_self_entry();
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
@ -559,7 +559,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_id_entry(THREAD_ID_CURRENT),
QUEUE_GET_THREAD(q) == thread_self_entry(),
"queue_wait_w_tmo->wrong thread\n");
#endif
@ -573,7 +573,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
wr = q->write;
if (rd == wr && ticks > 0)
{
struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
struct thread_entry *current = thread_self_entry();
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
@ -658,7 +658,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{
struct queue_sender_list *send = q->send;
struct thread_entry **spp = &send->senders[wr];
struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
struct thread_entry *current = thread_self_entry();
if(UNLIKELY(*spp))
{
@ -893,7 +893,7 @@ void mutex_init(struct mutex *m)
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
struct thread_entry *current = thread_id_entry(THREAD_ID_CURRENT);
struct thread_entry *current = thread_self_entry();
if(current == mutex_get_thread(m))
{
@ -932,10 +932,10 @@ void mutex_lock(struct mutex *m)
void mutex_unlock(struct mutex *m)
{
/* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(mutex_get_thread(m) == thread_id_entry(THREAD_ID_CURRENT),
KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
"mutex_unlock->wrong thread (%s != %s)\n",
mutex_get_thread(m)->name,
thread_id_entry(THREAD_ID_CURRENT)->name);
thread_self_entry()->name);
if(m->recursion > 0)
{
@ -1019,7 +1019,7 @@ int semaphore_wait(struct semaphore *s, int timeout)
else
{
/* too many waits - block until count is upped... */
struct thread_entry * current = thread_id_entry(THREAD_ID_CURRENT);
struct thread_entry * current = thread_self_entry();
IF_COP( current->obj_cl = &s->cl; )
current->bqp = &s->queue;
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was

View file

@ -71,14 +71,14 @@ static void storage_wait_turn(IF_MD_NONVOID(int drive))
#ifndef HAVE_MULTIDRIVE
int drive=0;
#endif
int my_prio = thread_get_io_priority(THREAD_ID_CURRENT);
int my_prio = thread_get_io_priority(thread_self());
int loops=my_prio;
while(storage_should_wait(drive, my_prio) && (loops--)>=0)
{
sleep(STORAGE_DELAY_UNIT);
}
storage_last_thread[drive] = thread_get_current();
storage_last_thread[drive] = thread_self();
storage_last_activity[drive] = current_tick;
}
#endif

View file

@ -197,7 +197,7 @@ static enum
static inline void charging_set_thread_priority(bool charging)
{
#ifdef HAVE_PRIORITY_SCHEDULING
thread_set_priority(THREAD_ID_CURRENT,
thread_set_priority(thread_self(),
charging ? PRIORITY_REALTIME : PRIORITY_SYSTEM);
#endif
(void)charging;

View file

@ -253,9 +253,7 @@ void * sim_thread_unlock(void)
struct thread_entry * thread_id_entry(unsigned int thread_id)
{
return (thread_id == THREAD_ID_CURRENT) ?
cores[CURRENT_CORE].running :
&threads[thread_id & THREAD_ID_SLOT_MASK];
return &threads[thread_id & THREAD_ID_SLOT_MASK];
}
static void add_to_list_l(struct thread_entry **list,
@ -299,11 +297,16 @@ static void remove_from_list_l(struct thread_entry **list,
thread->l.next->l.prev = thread->l.prev;
}
unsigned int thread_get_current(void)
unsigned int thread_self(void)
{
return cores[CURRENT_CORE].running->id;
}
struct thread_entry* thread_self_entry(void)
{
return cores[CURRENT_CORE].running;
}
void switch_thread(void)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
@ -562,7 +565,7 @@ void remove_thread(unsigned int thread_id)
SDL_Thread *t;
SDL_sem *s;
if (thread_id != THREAD_ID_CURRENT && thread->id != thread_id)
if (thread->id != thread_id)
return;
int oldlevel = disable_irq_save();
@ -629,11 +632,11 @@ void remove_thread(unsigned int thread_id)
void thread_exit(void)
{
remove_thread(THREAD_ID_CURRENT);
unsigned int id = thread_self();
remove_thread(id);
/* This should never and must never be reached - if it is, the
* state is corrupted */
THREAD_PANICF("thread_exit->K:*R (ID: %d)",
thread_id_entry(THREAD_ID_CURRENT)->id);
THREAD_PANICF("thread_exit->K:*R (ID: %d)", id);
while (1);
}
@ -642,8 +645,7 @@ void thread_wait(unsigned int thread_id)
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *thread = thread_id_entry(thread_id);
if (thread_id == THREAD_ID_CURRENT ||
(thread->id == thread_id && thread->state != STATE_KILLED))
if (thread->id == thread_id && thread->state != STATE_KILLED)
{
current->bqp = &thread->queue;
block_thread(current);

View file

@ -1517,9 +1517,27 @@ static struct thread_entry * find_empty_thread_slot(void)
*/
struct thread_entry * thread_id_entry(unsigned int thread_id)
{
return (thread_id == THREAD_ID_CURRENT) ?
cores[CURRENT_CORE].running :
&threads[thread_id & THREAD_ID_SLOT_MASK];
return &threads[thread_id & THREAD_ID_SLOT_MASK];
}
/*---------------------------------------------------------------------------
* Return the thread id of the calling thread
* --------------------------------------------------------------------------
*/
unsigned int thread_self(void)
{
return cores[CURRENT_CORE].running->id;
}
/*---------------------------------------------------------------------------
* Return the thread entry of the calling thread.
*
* INTERNAL: Intended for use by kernel and not for programs.
*---------------------------------------------------------------------------
*/
struct thread_entry* thread_self_entry(void)
{
return cores[CURRENT_CORE].running;
}
/*---------------------------------------------------------------------------
@ -1675,8 +1693,7 @@ void thread_wait(unsigned int thread_id)
corelock_lock(&thread->waiter_cl);
/* Be sure it hasn't been killed yet */
if (thread_id == THREAD_ID_CURRENT ||
(thread->id == thread_id && thread->state != STATE_KILLED))
if (thread->id == thread_id && thread->state != STATE_KILLED)
{
IF_COP( current->obj_cl = &thread->waiter_cl; )
current->bqp = &thread->queue;
@ -1973,8 +1990,7 @@ int thread_set_priority(unsigned int thread_id, int priority)
LOCK_THREAD(thread);
/* Make sure it's not killed */
if (thread_id == THREAD_ID_CURRENT ||
(thread->id == thread_id && thread->state != STATE_KILLED))
if (thread->id == thread_id && thread->state != STATE_KILLED)
{
int old_priority = thread->priority;
@ -2099,8 +2115,7 @@ int thread_get_priority(unsigned int thread_id)
/* Simply check without locking slot. It may or may not be valid by the
* time the function returns anyway. If all tests pass, it is the
* correct value for when it was valid. */
if (thread_id != THREAD_ID_CURRENT &&
(thread->id != thread_id || thread->state == STATE_KILLED))
if (thread->id != thread_id || thread->state == STATE_KILLED)
base_priority = -1;
return base_priority;
@ -2143,15 +2158,6 @@ void thread_thaw(unsigned int thread_id)
restore_irq(oldlevel);
}
/*---------------------------------------------------------------------------
* Return the ID of the currently executing thread.
*---------------------------------------------------------------------------
*/
unsigned int thread_get_current(void)
{
return cores[CURRENT_CORE].running->id;
}
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Switch the processor that the currently executing thread runs on.

View file

@ -220,7 +220,7 @@ static inline void usb_slave_mode(bool on)
{
trigger_cpu_boost();
#ifdef HAVE_PRIORITY_SCHEDULING
thread_set_priority(THREAD_ID_CURRENT, PRIORITY_REALTIME);
thread_set_priority(thread_self(), PRIORITY_REALTIME);
#endif
disk_unmount_all();
usb_attach();
@ -229,7 +229,7 @@ static inline void usb_slave_mode(bool on)
{
usb_enable(false);
#ifdef HAVE_PRIORITY_SCHEDULING
thread_set_priority(THREAD_ID_CURRENT, PRIORITY_SYSTEM);
thread_set_priority(thread_self(), PRIORITY_SYSTEM);
#endif
/* Entered exclusive mode */
rc = disk_mount_all();