Base scheduler queues off linked lists and do cleanup/consolidation

Abstracts threading from itself a bit, changes the way its queues are
handled and does type hiding for that as well.

Do alot here due to already required major brain surgery.

Threads may now be on a run queue and a wait queue simultaneously so
that the expired timer only has to wake the thread but not remove it
from the wait queue which simplifies the implicit wake handling.

List formats change for wait queues-- doubly-linked, not circular.
Timeout queue is now singly-linked. The run queue is still circular
as before.

Adds a better thread slot allocator that may keep the slot marked as
used regardless of the thread state. Assists in dumping special tasks
that switch_thread was tasked to perform (blocking tasks).

Deletes alot of code yet surprisingly, gets larger than expected.
Well, I'm not not minding that for the time being-- omlettes and break
a few eggs and all that.

Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
This commit is contained in:
Michael Sevakis 2014-08-08 06:33:51 -04:00
parent eb63d8b4a2
commit 6ed00870ab
20 changed files with 1550 additions and 2057 deletions

View file

@ -151,25 +151,21 @@ static const char* threads_getname(int selected_item, void *data,
selected_item -= NUM_CORES;
#endif
const char *fmtstr = "%2d: ---";
struct thread_debug_info threadinfo;
if (thread_get_debug_info(selected_item, &threadinfo) <= 0)
if (thread_get_debug_info(selected_item, &threadinfo) > 0)
{
snprintf(buffer, buffer_len, "%2d: ---", selected_item);
return buffer;
fmtstr = "%2d:" IF_COP(" (%d)") " %s" IF_PRIO(" %d %d")
IFN_SDL(" %2d%%") " %s";
}
snprintf(buffer, buffer_len,
"%2d: " IF_COP("(%d) ") "%s " IF_PRIO("%d %d ") "%2d%% %s",
snprintf(buffer, buffer_len, fmtstr,
selected_item,
#if NUM_CORES > 1
threadinfo.core,
#endif
IF_COP(threadinfo.core,)
threadinfo.statusstr,
#ifdef HAVE_PRIORITY_SCHEDULING
threadinfo.base_priority,
threadinfo.current_priority,
#endif
threadinfo.stack_usage,
IF_PRIO(threadinfo.base_priority, threadinfo.current_priority,)
IFN_SDL(threadinfo.stack_usage,)
threadinfo.name);
return buffer;
@ -187,16 +183,9 @@ static bool dbg_os(void)
{
struct simplelist_info info;
simplelist_info_init(&info, IF_COP("Core and ") "Stack usage:",
#if NUM_CORES == 1
MAXTHREADS,
#else
MAXTHREADS+NUM_CORES,
#endif
NULL);
#ifndef ROCKBOX_HAS_LOGF
MAXTHREADS IF_COP( + NUM_CORES ), NULL);
info.hide_selection = true;
info.scroll_all = true;
#endif
info.action_callback = dbg_threads_action_callback;
info.get_name = threads_getname;
return simplelist_show_list(&info);

View file

@ -86,6 +86,8 @@ static inline void load_context(const void* addr)
);
}
#ifdef RB_PROFILE
/*---------------------------------------------------------------------------
* Call this from asm to make sure the sp is pointing to the
* correct place before the context is saved.
@ -99,3 +101,6 @@ static inline void _profile_thread_stopped(int current_thread)
:: [id] "r" (current_thread)
: "cc", "memory");
}
#define profile_thread_stopped _profile_thread_stopped
#endif /* RB_PROFILE */

View file

@ -118,15 +118,17 @@ int get_cpu_boost_counter(void);
#define ALIGN_UP(n, a) ALIGN_DOWN((n)+((a)-1),a)
/* align start and end of buffer to nearest integer multiple of a */
#define ALIGN_BUFFER(ptr,len,align) \
{\
uintptr_t tmp_ptr1 = (uintptr_t)ptr; \
uintptr_t tmp_ptr2 = tmp_ptr1 + len;\
tmp_ptr1 = ALIGN_UP(tmp_ptr1,align); \
tmp_ptr2 = ALIGN_DOWN(tmp_ptr2,align); \
len = tmp_ptr2 - tmp_ptr1; \
ptr = (typeof(ptr))tmp_ptr1; \
}
#define ALIGN_BUFFER(ptr, size, align) \
({ \
size_t __sz = (size); \
size_t __ali = (align); \
uintptr_t __a1 = (uintptr_t)(ptr); \
uintptr_t __a2 = __a1 + __sz; \
__a1 = ALIGN_UP(__a1, __ali); \
__a2 = ALIGN_DOWN(__a2, __ali); \
(ptr) = (typeof (ptr))__a1; \
(size) = __a2 > __a1 ? __a2 - __a1 : 0; \
})
#define PTR_ADD(ptr, x) ((typeof(ptr))((char*)(ptr) + (x)))
#define PTR_SUB(ptr, x) ((typeof(ptr))((char*)(ptr) - (x)))
@ -150,11 +152,16 @@ int get_cpu_boost_counter(void);
#endif
/* Get the byte offset of a type's member */
#define OFFSETOF(type, membername) ((off_t)&((type *)0)->membername)
#ifndef offsetof
#define offsetof(type, member) __builtin_offsetof(type, member)
#endif
/* Get the type pointer from one of its members */
#define TYPE_FROM_MEMBER(type, memberptr, membername) \
((type *)((intptr_t)(memberptr) - OFFSETOF(type, membername)))
/* Get the containing item of *ptr in type */
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof (((type *)0)->member) *__mptr = (ptr); \
(type *)((void *)(__mptr) - offsetof(type, member)); })
#endif
/* returns index of first set bit or 32 if no bits are set */
#if defined(CPU_ARM) && ARM_ARCH >= 5 && !defined(__thumb__)
@ -324,6 +331,11 @@ static inline uint32_t swaw32_hw(uint32_t value)
* for all ARM CPUs. */
#ifdef CPU_ARM
#define HAVE_CPU_CACHE_ALIGN
#define MIN_STACK_ALIGN 8
#endif
#ifndef MIN_STACK_ALIGN
#define MIN_STACK_ALIGN (sizeof (uintptr_t))
#endif
/* Calculate CACHEALIGN_SIZE from CACHEALIGN_BITS */

View file

@ -39,10 +39,9 @@
*/
struct mrsw_lock
{
int volatile count; /* rd/wr counter; >0 = reader(s), <0 = writer */
struct thread_entry *queue;
struct blocker_splay splay; /* priority inheritance info
for waiters */
int volatile count; /* counter; >0 = reader(s), <0 = writer */
struct __wait_queue queue; /* waiter list */
struct blocker_splay splay; /* priority inheritance/owner info */
uint8_t rdrecursion[MAXTHREADS]; /* per-thread reader recursion counts */
IF_COP( struct corelock cl; )
};

View file

@ -26,7 +26,7 @@
struct mutex
{
struct thread_entry *queue; /* waiter list */
struct __wait_queue queue; /* waiter list */
int recursion; /* lock owner recursion count */
struct blocker blocker; /* priority inheritance info
for waiters and owner*/

View file

@ -88,7 +88,7 @@ struct queue_sender_list
/* If non-NULL, there is a thread waiting for the corresponding event */
/* Must be statically allocated to put in non-cached ram. */
struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
struct thread_entry *list; /* list of senders in map */
struct __wait_queue list; /* list of senders in map */
/* Send info for last message dequeued or NULL if replied or not sent */
struct thread_entry * volatile curr_sender;
#ifdef HAVE_PRIORITY_SCHEDULING
@ -108,7 +108,7 @@ struct queue_sender_list
struct event_queue
{
struct thread_entry *queue; /* waiter list */
struct __wait_queue queue; /* waiter list */
struct queue_event events[QUEUE_LENGTH]; /* list of events */
unsigned int volatile read; /* head of queue */
unsigned int volatile write; /* tail of queue */

View file

@ -26,7 +26,7 @@
struct semaphore
{
struct thread_entry *queue; /* Waiter list */
struct __wait_queue queue; /* Waiter list */
int volatile count; /* # of waits remaining before unsignaled */
int max; /* maximum # of waits to remain signaled */
IF_COP( struct corelock cl; ) /* multiprocessor sync */

View file

@ -26,6 +26,7 @@
#include <stdbool.h>
#include "config.h"
#include "gcc_extensions.h"
#include "linked_list.h"
#include "bitarray.h"
#include "corelock.h"
@ -52,7 +53,7 @@
#define PRIORITY_REALTIME_4 4
#define PRIORITY_REALTIME 4 /* Lowest realtime range */
#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
#define PRIORITY_USER_INTERFACE 16 /* The main thread */
#define PRIORITY_USER_INTERFACE 16 /* For most UI thrads */
#define PRIORITY_RECORDING 16 /* Recording thread */
#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
@ -61,6 +62,7 @@
#define NUM_PRIORITIES 32
#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
#define PRIORITY_MAIN_THREAD PRIORITY_USER_INTERFACE
#define IO_PRIORITY_IMMEDIATE 0
#define IO_PRIORITY_BACKGROUND 32
@ -108,6 +110,9 @@ extern unsigned sleep(unsigned ticks);
#define IFN_PRIO(...) __VA_ARGS__
#endif
#define __wait_queue lld_head
#define __wait_queue_node lld_node
/* Basic structure describing the owner of an object */
struct blocker
{
@ -168,6 +173,7 @@ int thread_get_priority(unsigned int thread_id);
void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id);
#endif /* HAVE_IO_PRIORITY */
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#endif
@ -186,11 +192,21 @@ int core_get_debug_info(unsigned int core, struct core_debug_info *infop);
#endif /* NUM_CORES */
#ifdef HAVE_SDL_THREADS
#define IF_SDL(x...) x
#define IFN_SDL(x...)
#else
#define IF_SDL(x...)
#define IFN_SDL(x...) x
#endif
struct thread_debug_info
{
char statusstr[4];
char name[32];
#ifndef HAVE_SDL_THREADS
unsigned int stack_usage;
#endif
#if NUM_CORES > 1
unsigned int core;
#endif

View file

@ -19,7 +19,8 @@
*
****************************************************************************/
#include "kernel-internal.h"
#include "mrsw-lock.h"
#include <string.h>
#include "mrsw_lock.h"
#ifdef HAVE_PRIORITY_SCHEDULING
@ -34,13 +35,14 @@ mrsw_reader_claim(struct mrsw_lock *mrsw, struct thread_entry *current,
static FORCE_INLINE void
mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
int count, unsigned int slotnum)
struct thread_entry *first, int count,
unsigned int slotnum)
{
/* If no writer is queued or has ownership then noone is queued;
if a writer owns it, then the reader would be blocked instead.
Therefore, if the queue has threads, then the next after the
owning readers is a writer and this is not the last reader. */
if (mrsw->queue)
if (first)
corelock_lock(&mrsw->splay.cl);
threadbit_clear_bit(&mrsw->splay.mask, slotnum);
@ -61,10 +63,10 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
threadbit_popcount(&mrsw->splay.mask));
/* switch owner to sole remaining reader */
slotnum = threadbit_ffs(&mrsw->splay.mask);
mrsw->splay.blocker.thread = thread_id_entry(slotnum);
mrsw->splay.blocker.thread = __thread_slot_entry(slotnum);
}
if (mrsw->queue)
if (first)
{
priority_disinherit(current, &mrsw->splay.blocker);
corelock_unlock(&mrsw->splay.cl);
@ -72,23 +74,25 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
}
static FORCE_INLINE unsigned int
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, unsigned int slotnum)
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread,
unsigned int slotnum)
{
threadbit_clear_bit(&mrsw->splay.mask, slotnum);
return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER);
return wakeup_thread(thread, WAKEUP_TRANSFER);
}
static FORCE_INLINE unsigned int
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw)
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{
return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER);
return wakeup_thread(thread, WAKEUP_TRANSFER);
(void)mrsw;
}
static FORCE_INLINE unsigned int
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first)
{
unsigned int result = wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER_MULTI);
mrsw->count = thread_self_entry()->retval;
unsigned int result = wakeup_thread(first, WAKEUP_TRANSFER_MULTI);
mrsw->count = __running_self_entry()->retval;
return result;
}
@ -97,32 +101,36 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
#define mrsw_reader_claim(mrsw, current, count, slotnum) \
do {} while (0)
#define mrsw_reader_relinquish(mrsw, current, count, slotnum) \
#define mrsw_reader_relinquish(mrsw, current, first, count, slotnum) \
do {} while (0)
static FORCE_INLINE unsigned int
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw)
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{
mrsw->splay.blocker.thread = mrsw->queue;
return wakeup_thread(&mrsw->queue);
mrsw->splay.blocker.thread = thread;
return wakeup_thread(thread);
}
static FORCE_INLINE unsigned int
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw)
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{
mrsw->splay.blocker.thread = mrsw->queue;
return wakeup_thread(&mrsw->queue);
mrsw->splay.blocker.thread = thread;
return wakeup_thread(thread);
}
static FORCE_INLINE unsigned int
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first)
{
mrsw->splay.blocker.thread = NULL;
int count = 0;
int count = 1;
while (mrsw->queue && mrsw->queue->retval != 0)
while (1)
{
wakeup_thread(&mrsw->queue);
wakeup_thread(first);
if (!(first = WQ_THREAD_FIRST(&mrsw->queue)) || first->retval == 0)
break;
count++;
}
@ -138,14 +146,11 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
void mrsw_init(struct mrsw_lock *mrsw)
{
mrsw->count = 0;
mrsw->queue = NULL;
mrsw->splay.blocker.thread = NULL;
wait_queue_init(&mrsw->queue);
blocker_splay_init(&mrsw->splay);
#ifdef HAVE_PRIORITY_SCHEDULING
mrsw->splay.blocker.priority = PRIORITY_IDLE;
threadbit_clear(&mrsw->splay.mask);
corelock_init(&mrsw->splay.cl);
memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion));
#endif /* HAVE_PRIORITY_SCHEDULING */
#endif
corelock_init(&mrsw->cl);
}
@ -154,7 +159,7 @@ void mrsw_init(struct mrsw_lock *mrsw)
* access recursively. The current writer is ignored and gets access. */
void mrsw_read_acquire(struct mrsw_lock *mrsw)
{
struct thread_entry *current = thread_self_entry();
struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
return; /* Read request while holding write access; pass */
@ -178,7 +183,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
int count = mrsw->count;
if (LIKELY(count >= 0 && !mrsw->queue))
if (LIKELY(count >= 0 && mrsw->queue.head == NULL))
{
/* Lock open to readers:
IFN_PRIO, mrsw->count tracks reader recursion */
@ -189,13 +194,10 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
}
/* A writer owns it or is waiting; block... */
IF_COP( current->obj_cl = &mrsw->cl; )
IF_PRIO( current->blocker = &mrsw->splay.blocker; )
current->bqp = &mrsw->queue;
current->retval = 1; /* indicate multi-wake candidate */
disable_irq();
block_thread(current, TIMEOUT_BLOCK);
block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker);
corelock_unlock(&mrsw->cl);
@ -207,7 +209,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
* leave opens up access to writer threads. The current writer is ignored. */
void mrsw_read_release(struct mrsw_lock *mrsw)
{
struct thread_entry *current = thread_self_entry();
struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
return; /* Read release while holding write access; ignore */
@ -237,17 +239,18 @@ void mrsw_read_release(struct mrsw_lock *mrsw)
unsigned int result = THREAD_NONE;
const int oldlevel = disable_irq_save();
if (--count == 0 && mrsw->queue)
struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue);
if (--count == 0 && thread != NULL)
{
/* No readers remain and a writer is waiting */
mrsw->count = -1;
result = mrsw_reader_wakeup_writer(mrsw IF_PRIO(, slotnum));
result = mrsw_reader_wakeup_writer(mrsw, thread IF_PRIO(, slotnum));
}
else
{
/* Giving up readership; we may be the last, or not */
mrsw->count = count;
mrsw_reader_relinquish(mrsw, current, count, slotnum);
mrsw_reader_relinquish(mrsw, current, thread, count, slotnum);
}
restore_irq(oldlevel);
@ -265,7 +268,7 @@ void mrsw_read_release(struct mrsw_lock *mrsw)
* safely call recursively. */
void mrsw_write_acquire(struct mrsw_lock *mrsw)
{
struct thread_entry *current = thread_self_entry();
struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread)
{
@ -288,13 +291,10 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw)
}
/* Readers present or a writer owns it - block... */
IF_COP( current->obj_cl = &mrsw->cl; )
IF_PRIO( current->blocker = &mrsw->splay.blocker; )
current->bqp = &mrsw->queue;
current->retval = 0; /* indicate single-wake candidate */
disable_irq();
block_thread(current, TIMEOUT_BLOCK);
block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker);
corelock_unlock(&mrsw->cl);
@ -305,9 +305,9 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw)
/* Release writer thread lock and open the lock to readers and writers */
void mrsw_write_release(struct mrsw_lock *mrsw)
{
KERNEL_ASSERT(thread_self_entry() == mrsw->splay.blocker.thread,
KERNEL_ASSERT(__running_self_entry() == mrsw->splay.blocker.thread,
"mrsw_write_release->wrong thread (%s != %s)\n",
thread_self_entry()->name,
__running_self_entry()->name,
mrsw->splay.blocker.thread->name);
int count = mrsw->count;
@ -323,15 +323,16 @@ void mrsw_write_release(struct mrsw_lock *mrsw)
corelock_lock(&mrsw->cl);
const int oldlevel = disable_irq_save();
if (mrsw->queue == NULL) /* 'count' becomes zero */
struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue);
if (thread == NULL) /* 'count' becomes zero */
{
mrsw->splay.blocker.thread = NULL;
mrsw->count = 0;
}
else if (mrsw->queue->retval == 0) /* 'count' stays -1 */
result = mrsw_writer_wakeup_writer(mrsw);
else if (thread->retval == 0) /* 'count' stays -1 */
result = mrsw_writer_wakeup_writer(mrsw, thread);
else /* 'count' becomes # of readers */
result = mrsw_writer_wakeup_readers(mrsw);
result = mrsw_writer_wakeup_readers(mrsw, thread);
restore_irq(oldlevel);
corelock_unlock(&mrsw->cl);

View file

@ -30,20 +30,19 @@
* the object is available to other threads */
void mutex_init(struct mutex *m)
{
corelock_init(&m->cl);
m->queue = NULL;
wait_queue_init(&m->queue);
m->recursion = 0;
m->blocker.thread = NULL;
blocker_init(&m->blocker);
#ifdef HAVE_PRIORITY_SCHEDULING
m->blocker.priority = PRIORITY_IDLE;
m->no_preempt = false;
#endif
corelock_init(&m->cl);
}
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
struct thread_entry *current = thread_self_entry();
struct thread_entry *current = __running_self_entry();
if(current == m->blocker.thread)
{
@ -65,12 +64,8 @@ void mutex_lock(struct mutex *m)
}
/* block until the lock is open... */
IF_COP( current->obj_cl = &m->cl; )
IF_PRIO( current->blocker = &m->blocker; )
current->bqp = &m->queue;
disable_irq();
block_thread(current, TIMEOUT_BLOCK);
block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);
corelock_unlock(&m->cl);
@ -82,10 +77,10 @@ void mutex_lock(struct mutex *m)
void mutex_unlock(struct mutex *m)
{
/* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(m->blocker.thread == thread_self_entry(),
KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
"mutex_unlock->wrong thread (%s != %s)\n",
m->blocker.thread->name,
thread_self_entry()->name);
__running_self_entry()->name);
if(m->recursion > 0)
{
@ -98,7 +93,8 @@ void mutex_unlock(struct mutex *m)
corelock_lock(&m->cl);
/* transfer to next queued thread if any */
if(LIKELY(m->queue == NULL))
struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
if(LIKELY(thread == NULL))
{
/* no threads waiting - open the lock */
m->blocker.thread = NULL;
@ -107,11 +103,7 @@ void mutex_unlock(struct mutex *m)
}
const int oldlevel = disable_irq_save();
/* Tranfer of owning thread is handled in the wakeup protocol
* if priorities are enabled otherwise just set it from the
* queue head. */
IFN_PRIO( m->blocker.thread = m->queue; )
unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
restore_irq(oldlevel);
corelock_unlock(&m->cl);

View file

@ -3,8 +3,8 @@
#include <errno.h>
#include <pthread.h>
#include "/usr/include/semaphore.h"
#include "thread-internal.h"
#include "kernel.h"
#include "thread.h"
#define NSEC_PER_SEC 1000000000L
static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
@ -25,11 +25,6 @@ struct thread_init_data {
__thread struct thread_entry *_current;
struct thread_entry* thread_self_entry(void)
{
return _current;
}
unsigned int thread_self(void)
{
return (unsigned) pthread_self();
@ -70,12 +65,10 @@ static void *trampoline(void *arg)
if (data->start_frozen)
{
struct corelock thaw_lock;
struct thread_entry *queue = NULL;
corelock_init(&thaw_lock);
corelock_lock(&thaw_lock);
_current->lock = &thaw_lock;
_current->bqp = &queue;
sem_post(&data->init_sem);
block_thread_switch(_current, _current->lock);
_current->lock = NULL;
@ -97,7 +90,7 @@ void thread_thaw(unsigned int thread_id)
if (e->lock)
{
corelock_lock(e->lock);
wakeup_thread(e->bqp);
wakeup_thread(e);
corelock_unlock(e->lock);
}
/* else: no lock. must be running already */
@ -135,7 +128,7 @@ unsigned int create_thread(void (*function)(void),
data->entry = entry;
pthread_cond_init(&entry->cond, NULL);
entry->runnable = true;
entry->l = (struct thread_list) { NULL, NULL };
sem_init(&data->init_sem, 0, 0);
if (pthread_create(&retval, NULL, trampoline, data) < 0)
@ -153,58 +146,19 @@ unsigned int create_thread(void (*function)(void),
return retval;
}
static void add_to_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (*list == NULL)
{
/* Insert into unoccupied list */
thread->l.next = thread;
thread->l.prev = thread;
*list = thread;
}
else
{
/* Insert last */
thread->l.next = *list;
thread->l.prev = (*list)->l.prev;
thread->l.prev->l.next = thread;
(*list)->l.prev = thread;
}
}
static void remove_from_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (thread == thread->l.next)
{
/* The only item */
*list = NULL;
return;
}
if (thread == *list)
{
/* List becomes next item */
*list = thread->l.next;
}
/* Fix links to jump over the removed entry. */
thread->l.prev->l.next = thread->l.next;
thread->l.next->l.prev = thread->l.prev;
}
/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
* to a corelock instance, and this corelock must be held by the caller */
void block_thread_switch(struct thread_entry *t, struct corelock *cl)
{
t->runnable = false;
add_to_list_l(t->bqp, t);
if (wait_queue_ptr(t))
wait_queue_register(t);
while(!t->runnable)
pthread_cond_wait(&t->cond, &cl->mutex);
}
void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl)
void block_thread_switch_w_tmo(struct thread_entry *t, int timeout,
struct corelock *cl)
{
int err = 0;
struct timespec ts;
@ -213,30 +167,25 @@ void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corel
timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
t->runnable = false;
add_to_list_l(t->bqp, t);
wait_queue_register(t->wqp, t);
while(!t->runnable && !err)
err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
if (err == ETIMEDOUT)
{ /* the thread timed out and was not explicitely woken up.
* we need to do this now to mark it runnable again */
remove_from_list_l(t->bqp, t);
t->runnable = true;
if (t->wakeup_ext_cb)
t->wakeup_ext_cb(t);
/* NOTE: objects do their own removal upon timer expiration */
}
}
unsigned int wakeup_thread(struct thread_entry **list)
unsigned int wakeup_thread(struct thread_entry *t)
{
struct thread_entry *t = *list;
if (t)
{
remove_from_list_l(list, t);
if (t->wqp)
wait_queue_remove(t);
t->runnable = true;
pthread_cond_signal(&t->cond);
}
return THREAD_NONE;
return THREAD_OK;
}

View file

@ -51,7 +51,7 @@ static struct
* q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
* q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
* \/ \/ \/
* q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-<
* q->send->list: 0<-|T0|<->|T1|<->|T2|<-------->|T3|->0
* q->send->curr_sender: /\
*
* Thread has E0 in its own struct queue_event.
@ -65,20 +65,20 @@ static struct
* more efficent to reject the majority of cases that don't need this
* called.
*/
static void queue_release_sender(struct thread_entry * volatile * sender,
intptr_t retval)
static void queue_release_sender_inner(
struct thread_entry * volatile * sender, intptr_t retval)
{
struct thread_entry *thread = *sender;
*sender = NULL; /* Clear slot. */
#ifdef HAVE_WAKEUP_EXT_CB
thread->wakeup_ext_cb = NULL; /* Clear callback. */
#endif
thread->retval = retval; /* Assign thread-local return value. */
*thread->bqp = thread; /* Move blocking queue head to thread since
wakeup_thread wakes the first thread in
the list. */
wakeup_thread(thread->bqp, WAKEUP_RELEASE);
wakeup_thread(thread, WAKEUP_RELEASE);
}
static inline void queue_release_sender(
struct thread_entry * volatile * sender, intptr_t retval)
{
if(UNLIKELY(*sender))
queue_release_sender_inner(sender, retval);
}
/* Releases any waiting threads that are queued with queue_send -
@ -93,26 +93,11 @@ static void queue_release_all_senders(struct event_queue *q)
{
struct thread_entry **spp =
&q->send->senders[i & QUEUE_LENGTH_MASK];
if(*spp)
{
queue_release_sender(spp, 0);
}
}
}
}
#ifdef HAVE_WAKEUP_EXT_CB
/* Callback to do extra forced removal steps from sender list in addition
* to the normal blocking queue removal and priority dis-inherit */
static void queue_remove_sender_thread_cb(struct thread_entry *thread)
{
*((struct thread_entry **)thread->retval) = NULL;
thread->wakeup_ext_cb = NULL;
thread->retval = 0;
}
#endif /* HAVE_WAKEUP_EXT_CB */
/* Enables queue_send on the specified queue - caller allocates the extra
* data structure. Only queues which are taken to be owned by a thread should
* enable this however an official owner is not compulsory but must be
@ -132,11 +117,12 @@ void queue_enable_queue_send(struct event_queue *q,
if(send != NULL && q->send == NULL)
{
memset(send, 0, sizeof(*send));
wait_queue_init(&send->list);
#ifdef HAVE_PRIORITY_SCHEDULING
send->blocker.priority = PRIORITY_IDLE;
blocker_init(&send->blocker);
if(owner_id != 0)
{
send->blocker.thread = thread_id_entry(owner_id);
send->blocker.thread = __thread_id_entry(owner_id);
q->blocker_p = &send->blocker;
}
#endif
@ -154,24 +140,14 @@ static inline void queue_do_unblock_sender(struct queue_sender_list *send,
unsigned int i)
{
if(send)
{
struct thread_entry **spp = &send->senders[i];
if(UNLIKELY(*spp))
{
queue_release_sender(spp, 0);
}
}
queue_release_sender(&send->senders[i], 0);
}
/* Perform the auto-reply sequence */
static inline void queue_do_auto_reply(struct queue_sender_list *send)
{
if(send && send->curr_sender)
{
/* auto-reply */
if(send)
queue_release_sender(&send->curr_sender, 0);
}
}
/* Moves waiting thread's refrence from the senders array to the
@ -191,7 +167,6 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
/* Move thread reference from array to the next thread
that queue_reply will release */
send->curr_sender = *spp;
(*spp)->retval = (intptr_t)spp;
*spp = NULL;
}
/* else message was posted asynchronously with queue_post */
@ -205,18 +180,28 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
#define queue_do_fetch_sender(send, rd)
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
static void queue_wake_waiter_inner(struct thread_entry *thread)
{
wakeup_thread(thread, WAKEUP_DEFAULT);
}
static inline void queue_wake_waiter(struct event_queue *q)
{
struct thread_entry *thread = WQ_THREAD_FIRST(&q->queue);
if(thread != NULL)
queue_wake_waiter_inner(thread);
}
/* Queue must not be available for use during this call */
void queue_init(struct event_queue *q, bool register_queue)
{
int oldlevel = disable_irq_save();
if(register_queue)
{
corelock_lock(&all_queues.cl);
}
corelock_init(&q->cl);
q->queue = NULL;
wait_queue_init(&q->queue);
/* What garbage is in write is irrelevant because of the masking design-
* any other functions the empty the queue do this as well so that
* queue_count and queue_empty return sane values in the case of a
@ -261,7 +246,7 @@ void queue_delete(struct event_queue *q)
corelock_unlock(&all_queues.cl);
/* Release thread(s) waiting on queue head */
thread_queue_wake(&q->queue);
wait_queue_wake(&q->queue);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
if(q->send)
@ -293,7 +278,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
#ifdef HAVE_PRIORITY_SCHEDULING
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_self_entry(),
QUEUE_GET_THREAD(q) == __running_self_entry(),
"queue_wait->wrong thread\n");
#endif
@ -307,18 +292,12 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
while(1)
{
struct thread_entry *current;
rd = q->read;
if (rd != q->write) /* A waking message could disappear */
break;
current = thread_self_entry();
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
block_thread(current, TIMEOUT_BLOCK);
struct thread_entry *current = __running_self_entry();
block_thread(current, TIMEOUT_BLOCK, &q->queue, NULL);
corelock_unlock(&q->cl);
switch_thread();
@ -349,16 +328,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
int oldlevel;
unsigned int rd, wr;
/* this function works only with a positive number (or zero) of ticks */
if (ticks == TIMEOUT_BLOCK)
{
queue_wait(q, ev);
return;
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_self_entry(),
QUEUE_GET_THREAD(q) == __running_self_entry(),
"queue_wait_w_tmo->wrong thread\n");
#endif
@ -372,14 +344,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
rd = q->read;
wr = q->write;
if (rd == wr && ticks > 0)
if (rd == wr && ticks != 0)
{
struct thread_entry *current = thread_self_entry();
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
block_thread(current, ticks);
struct thread_entry *current = __running_self_entry();
block_thread(current, ticks, &q->queue, NULL);
corelock_unlock(&q->cl);
switch_thread();
@ -389,6 +357,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
rd = q->read;
wr = q->write;
wait_queue_try_remove(current);
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@ -436,7 +406,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
queue_do_unblock_sender(q->send, wr);
/* Wakeup a waiting thread if any */
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
queue_wake_waiter(q);
corelock_unlock(&q->cl);
restore_irq(oldlevel);
@ -465,28 +435,17 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{
struct queue_sender_list *send = q->send;
struct thread_entry **spp = &send->senders[wr];
struct thread_entry *current = thread_self_entry();
struct thread_entry *current = __running_self_entry();
if(UNLIKELY(*spp))
{
/* overflow protect - unblock any thread waiting at this index */
queue_release_sender(spp, 0);
}
/* Wakeup a waiting thread if any */
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
queue_wake_waiter(q);
/* Save thread in slot, add to list and wait for reply */
*spp = current;
IF_COP( current->obj_cl = &q->cl; )
IF_PRIO( current->blocker = q->blocker_p; )
#ifdef HAVE_WAKEUP_EXT_CB
current->wakeup_ext_cb = queue_remove_sender_thread_cb;
#endif
current->retval = (intptr_t)spp;
current->bqp = &send->list;
block_thread(current, TIMEOUT_BLOCK);
block_thread(current, TIMEOUT_BLOCK, &send->list, q->blocker_p);
corelock_unlock(&q->cl);
switch_thread();
@ -495,7 +454,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
}
/* Function as queue_post if sending is not enabled */
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
queue_wake_waiter(q);
corelock_unlock(&q->cl);
restore_irq(oldlevel);
@ -530,16 +489,12 @@ void queue_reply(struct event_queue *q, intptr_t retval)
{
if(q->send && q->send->curr_sender)
{
struct queue_sender_list *sender;
int oldlevel = disable_irq_save();
corelock_lock(&q->cl);
sender = q->send;
/* Double-check locking */
if(LIKELY(sender && sender->curr_sender))
queue_release_sender(&sender->curr_sender, retval);
struct queue_sender_list *send = q->send;
if(send)
queue_release_sender(&send->curr_sender, retval);
corelock_unlock(&q->cl);
restore_irq(oldlevel);

View file

@ -24,6 +24,7 @@
/****************************************************************************
* Simple semaphore functions ;)
****************************************************************************/
/* Initialize the semaphore object.
* max = maximum up count the semaphore may assume (max >= 1)
* start = initial count of semaphore (0 <= count <= max) */
@ -31,7 +32,7 @@ void semaphore_init(struct semaphore *s, int max, int start)
{
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
"semaphore_init->inv arg\n");
s->queue = NULL;
wait_queue_init(&s->queue);
s->max = max;
s->count = start;
corelock_init(&s->cl);
@ -42,44 +43,49 @@ void semaphore_init(struct semaphore *s, int max, int start)
* safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
int ret;
int oldlevel;
int count;
int ret = OBJ_WAIT_TIMEDOUT;
oldlevel = disable_irq_save();
int oldlevel = disable_irq_save();
corelock_lock(&s->cl);
count = s->count;
int count = s->count;
if(LIKELY(count > 0))
{
/* count is not zero; down it */
s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED;
}
else if(timeout == 0)
{
/* just polling it */
ret = OBJ_WAIT_TIMEDOUT;
}
else
else if(timeout != 0)
{
/* too many waits - block until count is upped... */
struct thread_entry * current = thread_self_entry();
IF_COP( current->obj_cl = &s->cl; )
current->bqp = &s->queue;
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
* explicit in semaphore_release */
current->retval = OBJ_WAIT_TIMEDOUT;
struct thread_entry *current = __running_self_entry();
block_thread(current, timeout);
block_thread(current, timeout, &s->queue, NULL);
corelock_unlock(&s->cl);
/* ...and turn control over to next thread */
switch_thread();
return current->retval;
/* if explicit wake indicated; do no more */
if(LIKELY(!wait_queue_ptr(current)))
return OBJ_WAIT_SUCCEEDED;
disable_irq();
corelock_lock(&s->cl);
/* see if anyone got us after the expired wait */
if(wait_queue_try_remove(current))
{
count = s->count;
if(count > 0)
{
/* down it lately */
s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED;
}
}
}
/* else just polling it */
corelock_unlock(&s->cl);
restore_irq(oldlevel);
@ -93,18 +99,17 @@ int semaphore_wait(struct semaphore *s, int timeout)
void semaphore_release(struct semaphore *s)
{
unsigned int result = THREAD_NONE;
int oldlevel;
oldlevel = disable_irq_save();
int oldlevel = disable_irq_save();
corelock_lock(&s->cl);
if(LIKELY(s->queue != NULL))
struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue);
if(LIKELY(thread != NULL))
{
/* a thread was queued - wake it up and keep count at 0 */
KERNEL_ASSERT(s->count == 0,
"semaphore_release->threads queued but count=%d!\n", s->count);
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
result = wakeup_thread(&s->queue, WAKEUP_DEFAULT);
result = wakeup_thread(thread, WAKEUP_DEFAULT);
}
else
{

View file

@ -18,39 +18,222 @@
* KIND, either express or implied.
*
****************************************************************************/
#include "thread-internal.h"
#include "kernel-internal.h"
#include "system.h"
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
const char __main_thread_name_str[] = "main";
/* Array indexing is more efficient in inlines if the elements are a native
word size (100s of bytes fewer instructions) */
#if NUM_CORES > 1
static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR;
struct core_entry *__cores[NUM_CORES] IBSS_ATTR;
#else
struct core_entry __cores[NUM_CORES] IBSS_ATTR;
#endif
static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR;
struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR;
/** Internal functions **/
/*---------------------------------------------------------------------------
* Find an empty thread slot or NULL if none found. The slot returned will
* be locked on multicore.
*---------------------------------------------------------------------------
*/
static struct threadalloc
{
threadbit_t avail;
#if NUM_CORES > 1
struct corelock cl;
#endif
} threadalloc SHAREDBSS_ATTR;
/*---------------------------------------------------------------------------
* Initialize the thread allocator
*---------------------------------------------------------------------------
*/
void thread_alloc_init(void)
{
corelock_init(&threadalloc.cl);
for (unsigned int core = 0; core < NUM_CORES; core++)
{
#if NUM_CORES > 1
struct core_entry *c = &__core_entries[core];
__cores[core] = c;
#else
struct core_entry *c = &__cores[core];
#endif
rtr_queue_init(&c->rtr);
corelock_init(&c->rtr_cl);
tmo_queue_init(&c->tmo);
c->next_tmo_check = current_tick; /* Something not in the past */
}
for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++)
{
struct thread_entry *t = &__thread_entries[slotnum];
__threads[slotnum] = t;
corelock_init(&t->waiter_cl);
corelock_init(&t->slot_cl);
t->id = THREAD_ID_INIT(slotnum);
threadbit_set_bit(&threadalloc.avail, slotnum);
}
}
/*---------------------------------------------------------------------------
* Allocate a thread alot
*---------------------------------------------------------------------------
*/
struct thread_entry * thread_alloc(void)
{
struct thread_entry *thread = NULL;
corelock_lock(&threadalloc.cl);
unsigned int slotnum = threadbit_ffs(&threadalloc.avail);
if (slotnum < MAXTHREADS)
{
threadbit_clear_bit(&threadalloc.avail, slotnum);
thread = __threads[slotnum];
}
corelock_unlock(&threadalloc.cl);
return thread;
}
/*---------------------------------------------------------------------------
* Free the thread slot of 'thread'
*---------------------------------------------------------------------------
*/
void thread_free(struct thread_entry *thread)
{
corelock_lock(&threadalloc.cl);
threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id));
corelock_unlock(&threadalloc.cl);
}
/*---------------------------------------------------------------------------
* Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
*---------------------------------------------------------------------------
*/
void new_thread_id(struct thread_entry *thread)
{
uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT);
/* If wrapped to 0, make it 1 */
if ((id & THREAD_ID_VERSION_MASK) == 0)
id |= (1u << THREAD_ID_VERSION_SHIFT);
thread->id = id;
}
/*---------------------------------------------------------------------------
* Wakeup an entire queue of threads - returns bitwise-or of return bitmask
* from each operation or THREAD_NONE of nothing was awakened. Object owning
* the queue must be locked first.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
* from each operation or THREAD_NONE of nothing was awakened.
*---------------------------------------------------------------------------
*/
unsigned int thread_queue_wake(struct thread_entry **list)
unsigned int wait_queue_wake(struct __wait_queue *wqp)
{
unsigned result = THREAD_NONE;
struct thread_entry *thread;
for (;;)
{
unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
if (rc == THREAD_NONE)
break; /* No more threads */
result |= rc;
}
while ((thread = WQ_THREAD_FIRST(wqp)))
result |= wakeup_thread(thread, WAKEUP_DEFAULT);
return result;
}
/** Debug screen stuff **/
/** Public functions **/
#ifdef RB_PROFILE
void profile_thread(void)
{
profstart(THREAD_ID_SLOT(__running_self_entry()->id));
}
#endif
/*---------------------------------------------------------------------------
* returns the stack space used in bytes
* Return the thread id of the calling thread
* --------------------------------------------------------------------------
*/
unsigned int thread_self(void)
{
return __running_self_entry()->id;
}
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
/*---------------------------------------------------------------------------
* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*---------------------------------------------------------------------------
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* Handled */
switch_thread();
}
/** Debug screen stuff **/
void format_thread_name(char *buf, size_t bufsize,
const struct thread_entry *thread)
{
const char *name = thread->name;
if (!name)
name = "";
const char *fmt = *name ? "%s" : "%s%08lX";
snprintf(buf, bufsize, fmt, name, thread->id);
}
#ifndef HAVE_SDL_THREADS
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the stack ever used during runtime.
*---------------------------------------------------------------------------
*/
static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
@ -69,13 +252,9 @@ static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
return usage;
}
#endif /* HAVE_SDL_THREADS */
#if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
* runtime.
*---------------------------------------------------------------------------
*/
int core_get_debug_info(unsigned int core, struct core_debug_info *infop)
{
extern uintptr_t * const idle_stacks[NUM_CORES];
@ -105,29 +284,29 @@ int thread_get_debug_info(unsigned int thread_id,
if (!infop)
return -1;
unsigned int slot = THREAD_ID_SLOT(thread_id);
if (slot >= MAXTHREADS)
unsigned int slotnum = THREAD_ID_SLOT(thread_id);
if (slotnum >= MAXTHREADS)
return -1;
extern struct thread_entry threads[MAXTHREADS];
struct thread_entry *thread = &threads[slot];
struct thread_entry *thread = __thread_slot_entry(slotnum);
int oldlevel = disable_irq_save();
LOCK_THREAD(thread);
corelock_lock(&threadalloc.cl);
corelock_lock(&thread->slot_cl);
unsigned int state = thread->state;
if (state != STATE_KILLED)
{
const char *name = thread->name;
if (!name)
name = "";
int ret = 0;
if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0)
{
bool cpu_boost = false;
#ifdef HAVE_SCHEDULER_BOOSTCTRL
cpu_boost = thread->cpu_boost;
#endif
#ifndef HAVE_SDL_THREADS
infop->stack_usage = stack_usage(thread->stack, thread->stack_size);
#endif
#if NUM_CORES > 1
infop->core = thread->core;
#endif
@ -140,13 +319,13 @@ int thread_get_debug_info(unsigned int thread_id,
cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '),
status_chars[state]);
const char *fmt = *name ? "%s" : "%s%08lX";
snprintf(infop->name, sizeof (infop->name), fmt, name,
thread->id);
format_thread_name(infop->name, sizeof (infop->name), thread);
ret = 1;
}
UNLOCK_THREAD(thread);
corelock_unlock(&thread->slot_cl);
corelock_unlock(&threadalloc.cl);
restore_irq(oldlevel);
return state == STATE_KILLED ? 0 : 1;
return ret;
}

View file

@ -78,30 +78,11 @@ struct priority_distribution
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
#define __rtr_queue lldc_head
#define __rtr_queue_node lldc_node
struct thread_blk_ops
{
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Link information for lists thread is in */
struct thread_entry; /* forward */
struct thread_list
{
struct thread_entry *prev; /* Previous thread in a list */
struct thread_entry *next; /* Next thread in a list */
};
#define __tmo_queue ll_head
#define __tmo_queue_node ll_node
/* Information kept in each thread slot
* members are arranged according to size - largest first - in order
@ -111,44 +92,24 @@ struct thread_entry
{
struct regs context; /* Register context at switch -
_must_ be first member */
#ifndef HAVE_SDL_THREADS
uintptr_t *stack; /* Pointer to top of stack */
#endif
const char *name; /* Thread name */
long tmo_tick; /* Tick when thread should be woken from
timeout -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_list l; /* Links for blocked/waking/running -
circular linkage in both directions */
struct thread_list tmo; /* Links for timeout list -
Circular in reverse direction, NULL-terminated in
forward direction -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_entry **bqp; /* Pointer to list variable in kernel
object where thread is blocked - used
for implicit unblock and explicit wake
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#ifdef HAVE_CORELOCK_OBJECT
struct corelock *obj_cl; /* Object corelock where thead is blocked -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
long tmo_tick; /* Tick when thread should be woken */
struct __rtr_queue_node rtr; /* Node for run queue */
struct __tmo_queue_node tmo; /* Links for timeout list */
struct __wait_queue_node wq; /* Node for wait queue */
struct __wait_queue *volatile wqp; /* Pointer to registered wait queue */
#if NUM_CORES > 1
struct corelock waiter_cl; /* Corelock for thread_wait */
struct corelock slot_cl; /* Corelock to lock thread slot */
unsigned char core; /* The core to which thread belongs */
#endif
struct thread_entry *queue; /* List of threads waiting for thread to be
struct __wait_queue queue; /* List of threads waiting for thread to be
removed */
#ifdef HAVE_WAKEUP_EXT_CB
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
performs special steps needed when being
forced off of an object's wait queue that
go beyond the standard wait queue removal
and priority disinheritance */
/* Only enabled when using queue_send for now */
#endif
#if defined(HAVE_SEMAPHORE_OBJECTS) || \
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \
NUM_CORES > 1
volatile intptr_t retval; /* Return value from a blocked operation/
misc. use */
#endif
uint32_t id; /* Current slot id */
int __errno; /* Thread error number (errno tls) */
#ifdef HAVE_PRIORITY_SCHEDULING
@ -166,7 +127,9 @@ struct thread_entry
unsigned char priority; /* Scheduled priority (higher of base or
all threads blocked by this one) */
#endif
#ifndef HAVE_SDL_THREADS
unsigned short stack_size; /* Size of stack in bytes */
#endif
unsigned char state; /* Thread slot state (STATE_*) */
#ifdef HAVE_SCHEDULER_BOOSTCTRL
unsigned char cpu_boost; /* CPU frequency boost flag */
@ -176,29 +139,6 @@ struct thread_entry
#endif
};
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct thread_entry *running; /* threads that are running (RTR) */
struct thread_entry *timeout; /* threads that are on a timeout before
running again */
struct thread_entry *block_task; /* Task going off running list */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr; /* Summary of running and ready-to-run
threads */
#endif
long next_tmo_check; /* soonest time to check tmo threads */
#ifdef HAVE_CORELOCK_OBJECT
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xffffff00
@ -206,38 +146,128 @@ struct core_entry
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
/* Thread locking */
#if NUM_CORES > 1
#define LOCK_THREAD(thread) \
({ corelock_lock(&(thread)->slot_cl); })
#define TRY_LOCK_THREAD(thread) \
({ corelock_try_lock(&(thread)->slot_cl); })
#define UNLOCK_THREAD(thread) \
({ corelock_unlock(&(thread)->slot_cl); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ unsigned int _core = (thread)->core; \
cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
#else /* NUM_CORES == 1*/
#define LOCK_THREAD(thread) \
({ (void)(thread); })
#define TRY_LOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ (void)(thread); })
#endif /* NUM_CORES */
#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct __rtr_queue rtr; /* Threads that are runnable */
struct __tmo_queue tmo; /* Threads on a bounded wait */
struct thread_entry *running; /* Currently running thread */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr_dist; /* Summary of runnables */
#endif
long next_tmo_check; /* Next due timeout check */
#if NUM_CORES > 1
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
/* Hide a few scheduler details from itself to make allocation more flexible */
#define __main_thread_name \
({ extern const char __main_thread_name_str[]; \
__main_thread_name_str; })
static FORCE_INLINE
void * __get_main_stack(size_t *stacksize)
{
#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
extern uintptr_t stackbegin[];
extern uintptr_t stackend[];
#else
extern uintptr_t *stackbegin;
extern uintptr_t *stackend;
#endif
*stacksize = (uintptr_t)stackend - (uintptr_t)stackbegin;
return stackbegin;
}
void format_thread_name(char *buf, size_t bufsize,
const struct thread_entry *thread);
static FORCE_INLINE
struct core_entry * __core_id_entry(unsigned int core)
{
#if NUM_CORES > 1
extern struct core_entry * __cores[NUM_CORES];
return __cores[core];
#else
extern struct core_entry __cores[NUM_CORES];
return &__cores[core];
#endif
}
#define __running_self_entry() \
__core_id_entry(CURRENT_CORE)->running
static FORCE_INLINE
struct thread_entry * __thread_slot_entry(unsigned int slotnum)
{
extern struct thread_entry * __threads[MAXTHREADS];
return __threads[slotnum];
}
#define __thread_id_entry(id) \
__thread_slot_entry(THREAD_ID_SLOT(id))
#define THREAD_FROM(p, member) \
container_of(p, struct thread_entry, member)
#define RTR_EMPTY(rtrp) \
({ (rtrp)->head == NULL; })
#define RTR_THREAD_FIRST(rtrp) \
({ THREAD_FROM((rtrp)->head, rtr); })
#define RTR_THREAD_NEXT(thread) \
({ THREAD_FROM((thread)->rtr.next, rtr); })
#define TMO_THREAD_FIRST(tmop) \
({ struct __tmo_queue *__tmop = (tmop); \
__tmop->head ? THREAD_FROM(__tmop->head, tmo) : NULL; })
#define TMO_THREAD_NEXT(thread) \
({ struct __tmo_queue_node *__next = (thread)->tmo.next; \
__next ? THREAD_FROM(__next, tmo) : NULL; })
#define WQ_THREAD_FIRST(wqp) \
({ struct __wait_queue *__wqp = (wqp); \
__wqp->head ? THREAD_FROM(__wqp->head, wq) : NULL; })
#define WQ_THREAD_NEXT(thread) \
({ struct __wait_queue_node *__next = (thread)->wq.next; \
__next ? THREAD_FROM(__next, wq) : NULL; })
void thread_alloc_init(void) INIT_ATTR;
struct thread_entry * thread_alloc(void);
void thread_free(struct thread_entry *thread);
void new_thread_id(struct thread_entry *thread);
/* Switch to next runnable thread */
void switch_thread(void);
/* Blocks a thread for at least the specified number of ticks (0 = wait until
* next tick) */
void sleep_thread(int ticks);
/* Blocks the current thread on a thread queue (< 0 == infinite) */
void block_thread(struct thread_entry *current, int timeout);
void block_thread_(struct thread_entry *current, int timeout);
#ifdef HAVE_PRIORITY_SCHEDULING
#define block_thread(thread, timeout, __wqp, bl) \
({ struct thread_entry *__t = (thread); \
__t->wqp = (__wqp); \
if (!__builtin_constant_p(bl) || (bl)) \
__t->blocker = (bl); \
block_thread_(__t, (timeout)); })
#else
#define block_thread(thread, timeout, __wqp, bl...) \
({ struct thread_entry *__t = (thread); \
__t->wqp = (__wqp); \
block_thread_(__t, (timeout)); })
#endif
/* Return bit flags for thread wakeup */
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
@ -246,7 +276,7 @@ void block_thread(struct thread_entry *current, int timeout);
higher priority than current were woken) */
/* A convenience function for waking an entire queue of threads. */
unsigned int thread_queue_wake(struct thread_entry **list);
unsigned int wait_queue_wake(struct __wait_queue *wqp);
/* Wakeup a thread at the head of a list */
enum wakeup_thread_protocol
@ -257,36 +287,139 @@ enum wakeup_thread_protocol
WAKEUP_TRANSFER_MULTI,
};
unsigned int wakeup_thread_(struct thread_entry **list
unsigned int wakeup_thread_(struct thread_entry *thread
IF_PRIO(, enum wakeup_thread_protocol proto));
#ifdef HAVE_PRIORITY_SCHEDULING
#define wakeup_thread(list, proto) \
wakeup_thread_((list), (proto))
#else /* !HAVE_PRIORITY_SCHEDULING */
#define wakeup_thread(list, proto...) \
wakeup_thread_((list));
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_IO_PRIORITY
void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id);
#endif /* HAVE_IO_PRIORITY */
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#define wakeup_thread(thread, proto) \
wakeup_thread_((thread), (proto))
#else
#define wakeup_thread(thread, proto...) \
wakeup_thread_((thread));
#endif
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread */
struct thread_entry* thread_self_entry(void);
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
#ifdef RB_PROFILE
void profile_thread(void);
#endif
static inline void rtr_queue_init(struct __rtr_queue *rtrp)
{
lldc_init(rtrp);
}
static inline void rtr_queue_make_first(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
rtrp->head = &thread->rtr;
}
static inline void rtr_queue_add(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
lldc_insert_last(rtrp, &thread->rtr);
}
static inline void rtr_queue_remove(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
lldc_remove(rtrp, &thread->rtr);
}
#define TMO_NOT_QUEUED (NULL + 1)
static inline bool tmo_is_queued(struct thread_entry *thread)
{
return thread->tmo.next != TMO_NOT_QUEUED;
}
static inline void tmo_set_dequeued(struct thread_entry *thread)
{
thread->tmo.next = TMO_NOT_QUEUED;
}
static inline void tmo_queue_init(struct __tmo_queue *tmop)
{
ll_init(tmop);
}
static inline void tmo_queue_expire(struct __tmo_queue *tmop,
struct thread_entry *prev,
struct thread_entry *thread)
{
ll_remove_next(tmop, prev ? &prev->tmo : NULL);
tmo_set_dequeued(thread);
}
static inline void tmo_queue_remove(struct __tmo_queue *tmop,
struct thread_entry *thread)
{
if (tmo_is_queued(thread))
{
ll_remove(tmop, &thread->tmo);
tmo_set_dequeued(thread);
}
}
static inline void tmo_queue_register(struct __tmo_queue *tmop,
struct thread_entry *thread)
{
if (!tmo_is_queued(thread))
ll_insert_last(tmop, &thread->tmo);
}
static inline void wait_queue_init(struct __wait_queue *wqp)
{
lld_init(wqp);
}
static inline void wait_queue_register(struct thread_entry *thread)
{
lld_insert_last(thread->wqp, &thread->wq);
}
static inline struct __wait_queue *
wait_queue_ptr(struct thread_entry *thread)
{
return thread->wqp;
}
static inline struct __wait_queue *
wait_queue_remove(struct thread_entry *thread)
{
struct __wait_queue *wqp = thread->wqp;
thread->wqp = NULL;
lld_remove(wqp, &thread->wq);
return wqp;
}
static inline struct __wait_queue *
wait_queue_try_remove(struct thread_entry *thread)
{
struct __wait_queue *wqp = thread->wqp;
if (wqp)
{
thread->wqp = NULL;
lld_remove(wqp, &thread->wq);
}
return wqp;
}
static inline void blocker_init(struct blocker *bl)
{
bl->thread = NULL;
#ifdef HAVE_PRIORITY_SCHEDULING
bl->priority = PRIORITY_IDLE;
#endif
}
static inline void blocker_splay_init(struct blocker_splay *blsplay)
{
blocker_init(&blsplay->blocker);
#ifdef HAVE_PRIORITY_SCHEDULING
threadbit_clear(&blsplay->mask);
#endif
corelock_init(&blsplay->cl);
}
#endif /* THREAD_INTERNAL_H */

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
#include "../thread-internal.h"
int * __errno(void)
{
return &thread_self_entry()->__errno;
return &__running_self_entry()->__errno;
}

View file

@ -125,6 +125,7 @@ SECTIONS
.idle_stacks (NOLOAD) :
{
*(.idle_stacks)
. = ALIGN(8);
#if NUM_CORES > 1
cpu_idlestackbegin = .;
. += IDLE_STACK_SIZE;

View file

@ -82,46 +82,22 @@ static void INIT_ATTR core_thread_init(unsigned int core)
* to use a stack from an unloaded module until another thread runs on it.
*---------------------------------------------------------------------------
*/
static inline void NORETURN_ATTR __attribute__((always_inline))
thread_final_exit(struct thread_entry *current)
static void __attribute__((naked, noinline, noreturn))
thread_exit_finalize(unsigned int core, struct thread_entry *current)
{
asm volatile (
"cmp %1, #0 \n" /* CPU? */
"ldr r2, =idle_stacks \n" /* switch to idle stack */
"ldr sp, [r2, r0, lsl #2] \n"
"add sp, sp, %0*4 \n"
"cmp r0, #0 \n" /* CPU? */
"mov r4, r1 \n"
"blne commit_dcache \n"
"mov r0, %0 \n" /* copy thread parameter */
"mov sp, %2 \n" /* switch to idle stack */
"bl thread_final_exit_do \n" /* finish removal */
: : "r"(current),
"r"(current->core),
"r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
: "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
force inputs out
of scratch regs */
while (1);
}
"mov r0, r4 \n"
"b thread_exit_final \n"
: : "i"(IDLE_STACK_WORDS));
/*---------------------------------------------------------------------------
* Perform core switch steps that need to take place inside switch_thread.
*
* These steps must take place while before changing the processor and after
* having entered switch_thread since switch_thread may not do a normal return
* because the stack being used for anything the compiler saved will not belong
* to the thread's destination core and it may have been recycled for other
* purposes by the time a normal context load has taken place. switch_thread
* will also clobber anything stashed in the thread's context or stored in the
* nonvolatile registers if it is saved there before the call since the
* compiler's order of operations cannot be known for certain.
*/
static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
{
/* Flush our data to ram */
commit_dcache();
/* Stash thread in r4 slot */
thread->context.r[0] = (uint32_t)thread;
/* Stash restart address in r5 slot */
thread->context.r[1] = thread->context.start;
/* Save sp in context.sp while still running on old core */
thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
while (1);
(void)core; (void)current;
}
/*---------------------------------------------------------------------------
@ -136,31 +112,32 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
/*---------------------------------------------------------------------------
* This actually performs the core switch.
*/
static void __attribute__((naked))
switch_thread_core(unsigned int core, struct thread_entry *thread)
static void __attribute__((naked, noinline))
switch_thread_core(unsigned int old_core, struct thread_entry *thread)
{
/* Pure asm for this because compiler behavior isn't sufficiently predictable.
* Stack access also isn't permitted until restoring the original stack and
* context. */
asm volatile (
"stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */
"ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */
"ldr r2, [r2, r0, lsl #2] \n"
"add r2, r2, %0*4 \n"
"stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */
"mov sp, r2 \n" /* switch stacks */
"adr r2, 1f \n" /* r2 = new core restart address */
"str r2, [r1, #40] \n" /* thread->context.start = r2 */
"ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */
"1: \n"
"ldr sp, [r0, #32] \n" /* Reload original sp from context structure */
"mov r1, #0 \n" /* Clear start address */
"stmfd sp!, { r4-r5, lr } \n" /* can't use the first two ctx fields */
"add r2, r1, #8 \n"
"stmia r2, { r6-r11, sp } \n" /* save remaining context */
"adr r2, .new_core_restart \n" /* save context ptr + restart address */
"str r2, [r1, #40] \n" /* make 'start' non-null */
"stmia r1, { r1-r2 } \n"
"ldr r2, =idle_stacks \n" /* switch to idle stack on old core */
"ldr sp, [r2, r0, lsl #2] \n"
"add sp, sp, %0*4 \n"
"stmfd sp!, { r0-r1 } \n"
"bl commit_dcache \n" /* write back everything */
"ldmfd sp!, { r0-r1 } \n"
"b switch_core_final \n"
".new_core_restart: \n"
"mov r1, #0 \n" /* mark as started */
"str r1, [r0, #40] \n"
"bl commit_discard_idcache \n" /* Invalidate new core's cache */
"ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */
: : "i"(IDLE_STACK_WORDS)
);
(void)core; (void)thread;
"add r0, r0, #8 \n"
"ldmia r0, { r6-r11, sp } \n" /* restore non-volatiles and stack */
"bl commit_discard_idcache \n" /* invalidate new core's cache */
"ldmfd sp!, { r4-r5, pc } \n" /* restore remaining context */
: : "i"(IDLE_STACK_WORDS));
(void)old_core; (void)thread;
}
/** PP-model-specific dual-core code **/

View file

@ -32,13 +32,13 @@
#include "core_alloc.h"
/* Define this as 1 to show informational messages that are not errors. */
#define THREAD_SDL_DEBUGF_ENABLED 0
#define THREAD_SDL_DEBUGF_ENABLED 1
#if THREAD_SDL_DEBUGF_ENABLED
#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
static char __name[32];
static char __name[sizeof (((struct thread_debug_info *)0)->name)];
#define THREAD_SDL_GET_NAME(thread) \
({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; })
({ format_thread_name(__name, sizeof (__name), thread); __name; })
#else
#define THREAD_SDL_DEBUGF(...)
#define THREAD_SDL_GET_NAME(thread)
@ -47,9 +47,6 @@ static char __name[32];
#define THREAD_PANICF(str...) \
({ fprintf(stderr, str); exit(-1); })
/* Thread/core entries as in rockbox core */
static struct core_entry cores[NUM_CORES];
struct thread_entry threads[MAXTHREADS];
/* Jump buffers for graceful exit - kernel threads don't stay neatly
* in their start routines responding to messages so this is the only
* way to get them back in there so they may exit */
@ -74,7 +71,7 @@ void sim_thread_shutdown(void)
/* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via remove_thread or multiple threads could exit
_could_ terminate via thread_exit or multiple threads could exit
on each unlock but that is safe. */
/* Do this before trying to acquire lock */
@ -86,7 +83,7 @@ void sim_thread_shutdown(void)
/* Signal all threads on delay or block */
for (i = 0; i < MAXTHREADS; i++)
{
struct thread_entry *thread = &threads[i];
struct thread_entry *thread = __thread_slot_entry(i);
if (thread->context.s == NULL)
continue;
SDL_SemPost(thread->context.s);
@ -95,7 +92,7 @@ void sim_thread_shutdown(void)
/* Wait for all threads to finish and cleanup old ones. */
for (i = 0; i < MAXTHREADS; i++)
{
struct thread_entry *thread = &threads[i];
struct thread_entry *thread = __thread_slot_entry(i);
SDL_Thread *t = thread->context.t;
if (t != NULL)
@ -111,11 +108,11 @@ void sim_thread_shutdown(void)
}
else
{
/* Wait on any previous thread in this location-- could be one not quite
* finished exiting but has just unlocked the mutex. If it's NULL, the
* call returns immediately.
/* Wait on any previous thread in this location-- could be one not
* quite finished exiting but has just unlocked the mutex. If it's
* NULL, the call returns immediately.
*
* See remove_thread below for more information. */
* See thread_exit below for more information. */
SDL_WaitThread(thread->context.told, NULL);
}
}
@ -126,103 +123,6 @@ void sim_thread_shutdown(void)
threads_status = THREADS_EXIT_COMMAND_DONE;
}
static void new_thread_id(unsigned int slot_num,
struct thread_entry *thread)
{
unsigned int version =
(thread->id + (1u << THREAD_ID_VERSION_SHIFT))
& THREAD_ID_VERSION_MASK;
if (version == 0)
version = 1u << THREAD_ID_VERSION_SHIFT;
thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
}
static struct thread_entry * find_empty_thread_slot(void)
{
struct thread_entry *thread = NULL;
int n;
for (n = 0; n < MAXTHREADS; n++)
{
int state = threads[n].state;
if (state == STATE_KILLED)
{
thread = &threads[n];
break;
}
}
return thread;
}
/* Initialize SDL threading */
void init_threads(void)
{
static uintptr_t main_stack[] = { DEADBEEF, 0 };
struct thread_entry *thread;
int n;
memset(cores, 0, sizeof(cores));
memset(threads, 0, sizeof(threads));
m = SDL_CreateMutex();
if (SDL_LockMutex(m) == -1)
{
fprintf(stderr, "Couldn't lock mutex\n");
return;
}
/* Initialize all IDs */
for (n = 0; n < MAXTHREADS; n++)
threads[n].id = THREAD_ID_INIT(n);
/* Slot 0 is reserved for the main thread - initialize it here and
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
thread = &threads[0];
thread->stack = main_stack;
thread->stack_size = sizeof (main_stack);
thread->name = "main";
thread->state = STATE_RUNNING;
thread->context.s = SDL_CreateSemaphore(0);
thread->context.t = NULL; /* NULL for the implicit main thread */
cores[CURRENT_CORE].running = thread;
if (thread->context.s == NULL)
{
fprintf(stderr, "Failed to create main semaphore\n");
return;
}
/* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via remove_thread or multiple threads could exit
on each unlock but that is safe. */
/* Setup jump for exit */
if (setjmp(thread_jmpbufs[0]) == 0)
{
THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
return;
}
SDL_UnlockMutex(m);
/* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
while (threads_status < THREADS_EXIT_COMMAND_DONE)
SDL_Delay(10);
SDL_DestroyMutex(m);
/* We're the main thead - perform exit - doesn't return. */
sim_do_exit();
}
void sim_thread_exception_wait(void)
{
while (1)
@ -237,7 +137,7 @@ void sim_thread_exception_wait(void)
void sim_thread_lock(void *me)
{
SDL_LockMutex(m);
cores[CURRENT_CORE].running = (struct thread_entry *)me;
__running_self_entry() = (struct thread_entry *)me;
if (threads_status != THREADS_RUN)
thread_exit();
@ -245,70 +145,14 @@ void sim_thread_lock(void *me)
void * sim_thread_unlock(void)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *current = __running_self_entry();
SDL_UnlockMutex(m);
return current;
}
struct thread_entry * thread_id_entry(unsigned int thread_id)
{
return &threads[thread_id & THREAD_ID_SLOT_MASK];
}
static void add_to_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (*list == NULL)
{
/* Insert into unoccupied list */
thread->l.next = thread;
thread->l.prev = thread;
*list = thread;
}
else
{
/* Insert last */
thread->l.next = *list;
thread->l.prev = (*list)->l.prev;
thread->l.prev->l.next = thread;
(*list)->l.prev = thread;
}
}
static void remove_from_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (thread == thread->l.next)
{
/* The only item */
*list = NULL;
return;
}
if (thread == *list)
{
/* List becomes next item */
*list = thread->l.next;
}
/* Fix links to jump over the removed entry. */
thread->l.prev->l.next = thread->l.next;
thread->l.next->l.prev = thread->l.prev;
}
unsigned int thread_self(void)
{
return cores[CURRENT_CORE].running->id;
}
struct thread_entry* thread_self_entry(void)
{
return cores[CURRENT_CORE].running;
}
void switch_thread(void)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *current = __running_self_entry();
enable_irq();
@ -346,17 +190,7 @@ void switch_thread(void)
oldlevel = disable_irq_save();
if (current->state == STATE_BLOCKED_W_TMO)
{
/* Timed out */
remove_from_list_l(current->bqp, current);
#ifdef HAVE_WAKEUP_EXT_CB
if (current->wakeup_ext_cb != NULL)
current->wakeup_ext_cb(current);
#endif
current->state = STATE_RUNNING;
}
if (result == SDL_MUTEX_TIMEDOUT)
{
@ -384,7 +218,7 @@ void switch_thread(void)
#ifdef DEBUG
core_check_valid();
#endif
cores[CURRENT_CORE].running = current;
__running_self_entry() = current;
if (threads_status != THREADS_RUN)
thread_exit();
@ -392,7 +226,7 @@ void switch_thread(void)
void sleep_thread(int ticks)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *current = __running_self_entry();
int rem;
current->state = STATE_SLEEPING;
@ -404,7 +238,7 @@ void sleep_thread(int ticks)
current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
}
void block_thread(struct thread_entry *current, int ticks)
void block_thread_(struct thread_entry *current, int ticks)
{
if (ticks < 0)
current->state = STATE_BLOCKED;
@ -414,32 +248,27 @@ void block_thread(struct thread_entry *current, int ticks)
current->tmo_tick = (1000/HZ)*ticks;
}
add_to_list_l(current->bqp, current);
wait_queue_register(current);
}
unsigned int wakeup_thread_(struct thread_entry **list)
unsigned int wakeup_thread_(struct thread_entry *thread)
{
struct thread_entry *thread = *list;
if (thread != NULL)
{
switch (thread->state)
{
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
remove_from_list_l(list, thread);
wait_queue_remove(thread);
thread->state = STATE_RUNNING;
SDL_SemPost(thread->context.s);
return THREAD_OK;
}
}
return THREAD_NONE;
}
void thread_thaw(unsigned int thread_id)
{
struct thread_entry *thread = thread_id_entry(thread_id);
struct thread_entry *thread = __thread_id_entry(thread_id);
if (thread->id == thread_id && thread->state == STATE_FROZEN)
{
@ -450,15 +279,14 @@ void thread_thaw(unsigned int thread_id)
int runthread(void *data)
{
struct thread_entry *current;
jmp_buf *current_jmpbuf;
/* Cannot access thread variables before locking the mutex as the
data structures may not be filled-in yet. */
SDL_LockMutex(m);
cores[CURRENT_CORE].running = (struct thread_entry *)data;
current = cores[CURRENT_CORE].running;
current_jmpbuf = &thread_jmpbufs[current - threads];
struct thread_entry *current = (struct thread_entry *)data;
__running_self_entry() = current;
jmp_buf *current_jmpbuf = &thread_jmpbufs[THREAD_ID_SLOT(current->id)];
/* Setup jump for exit */
if (setjmp(*current_jmpbuf) == 0)
@ -469,14 +297,15 @@ int runthread(void *data)
SDL_UnlockMutex(m);
SDL_SemWait(current->context.s);
SDL_LockMutex(m);
cores[CURRENT_CORE].running = current;
__running_self_entry() = current;
}
if (threads_status == THREADS_RUN)
{
current->context.start();
THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
current - threads, THREAD_SDL_GET_NAME(current));
THREAD_ID_SLOT(current->id),
THREAD_SDL_GET_NAME(current));
/* Thread routine returned - suicide */
}
@ -495,27 +324,23 @@ unsigned int create_thread(void (*function)(void),
void* stack, size_t stack_size,
unsigned flags, const char *name)
{
struct thread_entry *thread;
SDL_Thread* t;
SDL_sem *s;
THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
thread = find_empty_thread_slot();
struct thread_entry *thread = thread_alloc();
if (thread == NULL)
{
DEBUGF("Failed to find thread slot\n");
return 0;
}
s = SDL_CreateSemaphore(0);
SDL_sem *s = SDL_CreateSemaphore(0);
if (s == NULL)
{
DEBUGF("Failed to create semaphore\n");
return 0;
}
t = SDL_CreateThread(runthread, thread);
SDL_Thread *t = SDL_CreateThread(runthread, thread);
if (t == NULL)
{
DEBUGF("Failed to create SDL thread\n");
@ -523,12 +348,6 @@ unsigned int create_thread(void (*function)(void),
return 0;
}
unsigned int stack_words = stack_size / sizeof (uintptr_t);
for (unsigned int i = stack_words; i-- > 0;)
((uintptr_t *)stack)[i] = DEADBEEF;
thread->stack = stack;
thread->stack_size = stack_size;
thread->name = name;
thread->state = (flags & CREATE_THREAD_FROZEN) ?
STATE_FROZEN : STATE_RUNNING;
@ -536,27 +355,22 @@ unsigned int create_thread(void (*function)(void),
thread->context.t = t;
thread->context.s = s;
THREAD_SDL_DEBUGF("New Thread: %d (%s)\n",
thread - threads, THREAD_SDL_GET_NAME(thread));
THREAD_SDL_DEBUGF("New Thread: %lu (%s)\n",
(unsigned long)thread->id,
THREAD_SDL_GET_NAME(thread));
return thread->id;
(void)stack; (void)stack_size;
}
static void remove_thread(unsigned int thread_id)
void thread_exit(void)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *thread = thread_id_entry(thread_id);
SDL_Thread *t;
SDL_sem *s;
if (thread->id != thread_id)
return;
struct thread_entry *current = __running_self_entry();
int oldlevel = disable_irq_save();
t = thread->context.t;
s = thread->context.s;
SDL_Thread *t = current->context.t;
SDL_sem *s = current->context.s;
/* Wait the last thread here and keep this one or SDL will leak it since
* it doesn't free its own library allocations unless a wait is performed.
@ -566,59 +380,27 @@ static void remove_thread(unsigned int thread_id)
*
* However, see more below about SDL_KillThread.
*/
SDL_WaitThread(thread->context.told, NULL);
SDL_WaitThread(current->context.told, NULL);
thread->context.t = NULL;
thread->context.s = NULL;
thread->context.told = t;
current->context.t = NULL;
current->context.s = NULL;
current->context.told = t;
if (thread != current)
{
switch (thread->state)
{
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
/* Remove thread from object it's waiting on */
remove_from_list_l(thread->bqp, thread);
#ifdef HAVE_WAKEUP_EXT_CB
if (thread->wakeup_ext_cb != NULL)
thread->wakeup_ext_cb(thread);
#endif
break;
}
SDL_SemPost(s);
}
THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
thread - threads, THREAD_SDL_GET_NAME(thread));
new_thread_id(thread->id, thread);
thread->state = STATE_KILLED;
thread_queue_wake(&thread->queue);
unsigned int id = current->id;
new_thread_id(current);
current->state = STATE_KILLED;
wait_queue_wake(&current->queue);
SDL_DestroySemaphore(s);
if (thread == current)
{
/* Do a graceful exit - perform the longjmp back into the thread
function to return */
restore_irq(oldlevel);
longjmp(thread_jmpbufs[current - threads], 1);
}
/* SDL_KillThread frees the old pointer too because it uses SDL_WaitThread
* to wait for the host to remove it. */
thread->context.told = NULL;
SDL_KillThread(t);
restore_irq(oldlevel);
}
thread_free(current);
longjmp(thread_jmpbufs[THREAD_ID_SLOT(id)], 1);
void thread_exit(void)
{
unsigned int id = thread_self();
remove_thread(id);
/* This should never and must never be reached - if it is, the
* state is corrupted */
THREAD_PANICF("thread_exit->K:*R (ID: %d)", id);
@ -627,44 +409,73 @@ void thread_exit(void)
void thread_wait(unsigned int thread_id)
{
struct thread_entry *current = cores[CURRENT_CORE].running;
struct thread_entry *thread = thread_id_entry(thread_id);
struct thread_entry *current = __running_self_entry();
struct thread_entry *thread = __thread_id_entry(thread_id);
if (thread->id == thread_id && thread->state != STATE_KILLED)
{
current->bqp = &thread->queue;
block_thread(current, TIMEOUT_BLOCK);
block_thread(current, TIMEOUT_BLOCK, &thread->queue);
switch_thread();
}
}
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
/* Initialize SDL threading */
void init_threads(void)
{
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
m = SDL_CreateMutex();
/*---------------------------------------------------------------------------
* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*---------------------------------------------------------------------------
*/
void yield(void)
{
switch_thread();
if (SDL_LockMutex(m) == -1)
{
fprintf(stderr, "Couldn't lock mutex\n");
return;
}
thread_alloc_init();
struct thread_entry *thread = thread_alloc();
if (thread == NULL)
{
fprintf(stderr, "Main thread alloc failed\n");
return;
}
/* Slot 0 is reserved for the main thread - initialize it here and
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
thread->name = __main_thread_name;
thread->state = STATE_RUNNING;
thread->context.s = SDL_CreateSemaphore(0);
thread->context.t = NULL; /* NULL for the implicit main thread */
__running_self_entry() = thread;
if (thread->context.s == NULL)
{
fprintf(stderr, "Failed to create main semaphore\n");
return;
}
/* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via thread_exit or multiple threads could exit
on each unlock but that is safe. */
/* Setup jump for exit */
if (setjmp(thread_jmpbufs[THREAD_ID_SLOT(thread->id)]) == 0)
{
THREAD_SDL_DEBUGF("Main Thread: %lu (%s)\n",
(unsigned long)thread->id,
THREAD_SDL_GET_NAME(thread));
return;
}
SDL_UnlockMutex(m);
/* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
while (threads_status < THREADS_EXIT_COMMAND_DONE)
SDL_Delay(10);
SDL_DestroyMutex(m);
/* We're the main thead - perform exit - doesn't return. */
sim_do_exit();
}