Base scheduler queues off linked lists and do cleanup/consolidation

Abstracts threading from itself a bit, changes the way its queues are
handled and does type hiding for that as well.

Do alot here due to already required major brain surgery.

Threads may now be on a run queue and a wait queue simultaneously so
that the expired timer only has to wake the thread but not remove it
from the wait queue which simplifies the implicit wake handling.

List formats change for wait queues-- doubly-linked, not circular.
Timeout queue is now singly-linked. The run queue is still circular
as before.

Adds a better thread slot allocator that may keep the slot marked as
used regardless of the thread state. Assists in dumping special tasks
that switch_thread was tasked to perform (blocking tasks).

Deletes alot of code yet surprisingly, gets larger than expected.
Well, I'm not not minding that for the time being-- omlettes and break
a few eggs and all that.

Change-Id: I0834d7bb16b2aecb2f63b58886eeda6ae4f29d59
This commit is contained in:
Michael Sevakis 2014-08-08 06:33:51 -04:00
parent eb63d8b4a2
commit 6ed00870ab
20 changed files with 1550 additions and 2057 deletions

View file

@ -151,25 +151,21 @@ static const char* threads_getname(int selected_item, void *data,
selected_item -= NUM_CORES; selected_item -= NUM_CORES;
#endif #endif
const char *fmtstr = "%2d: ---";
struct thread_debug_info threadinfo; struct thread_debug_info threadinfo;
if (thread_get_debug_info(selected_item, &threadinfo) <= 0) if (thread_get_debug_info(selected_item, &threadinfo) > 0)
{ {
snprintf(buffer, buffer_len, "%2d: ---", selected_item); fmtstr = "%2d:" IF_COP(" (%d)") " %s" IF_PRIO(" %d %d")
return buffer; IFN_SDL(" %2d%%") " %s";
} }
snprintf(buffer, buffer_len, snprintf(buffer, buffer_len, fmtstr,
"%2d: " IF_COP("(%d) ") "%s " IF_PRIO("%d %d ") "%2d%% %s",
selected_item, selected_item,
#if NUM_CORES > 1 IF_COP(threadinfo.core,)
threadinfo.core,
#endif
threadinfo.statusstr, threadinfo.statusstr,
#ifdef HAVE_PRIORITY_SCHEDULING IF_PRIO(threadinfo.base_priority, threadinfo.current_priority,)
threadinfo.base_priority, IFN_SDL(threadinfo.stack_usage,)
threadinfo.current_priority,
#endif
threadinfo.stack_usage,
threadinfo.name); threadinfo.name);
return buffer; return buffer;
@ -187,16 +183,9 @@ static bool dbg_os(void)
{ {
struct simplelist_info info; struct simplelist_info info;
simplelist_info_init(&info, IF_COP("Core and ") "Stack usage:", simplelist_info_init(&info, IF_COP("Core and ") "Stack usage:",
#if NUM_CORES == 1 MAXTHREADS IF_COP( + NUM_CORES ), NULL);
MAXTHREADS,
#else
MAXTHREADS+NUM_CORES,
#endif
NULL);
#ifndef ROCKBOX_HAS_LOGF
info.hide_selection = true; info.hide_selection = true;
info.scroll_all = true; info.scroll_all = true;
#endif
info.action_callback = dbg_threads_action_callback; info.action_callback = dbg_threads_action_callback;
info.get_name = threads_getname; info.get_name = threads_getname;
return simplelist_show_list(&info); return simplelist_show_list(&info);

View file

@ -86,6 +86,8 @@ static inline void load_context(const void* addr)
); );
} }
#ifdef RB_PROFILE
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
* Call this from asm to make sure the sp is pointing to the * Call this from asm to make sure the sp is pointing to the
* correct place before the context is saved. * correct place before the context is saved.
@ -99,3 +101,6 @@ static inline void _profile_thread_stopped(int current_thread)
:: [id] "r" (current_thread) :: [id] "r" (current_thread)
: "cc", "memory"); : "cc", "memory");
} }
#define profile_thread_stopped _profile_thread_stopped
#endif /* RB_PROFILE */

View file

@ -118,15 +118,17 @@ int get_cpu_boost_counter(void);
#define ALIGN_UP(n, a) ALIGN_DOWN((n)+((a)-1),a) #define ALIGN_UP(n, a) ALIGN_DOWN((n)+((a)-1),a)
/* align start and end of buffer to nearest integer multiple of a */ /* align start and end of buffer to nearest integer multiple of a */
#define ALIGN_BUFFER(ptr,len,align) \ #define ALIGN_BUFFER(ptr, size, align) \
{\ ({ \
uintptr_t tmp_ptr1 = (uintptr_t)ptr; \ size_t __sz = (size); \
uintptr_t tmp_ptr2 = tmp_ptr1 + len;\ size_t __ali = (align); \
tmp_ptr1 = ALIGN_UP(tmp_ptr1,align); \ uintptr_t __a1 = (uintptr_t)(ptr); \
tmp_ptr2 = ALIGN_DOWN(tmp_ptr2,align); \ uintptr_t __a2 = __a1 + __sz; \
len = tmp_ptr2 - tmp_ptr1; \ __a1 = ALIGN_UP(__a1, __ali); \
ptr = (typeof(ptr))tmp_ptr1; \ __a2 = ALIGN_DOWN(__a2, __ali); \
} (ptr) = (typeof (ptr))__a1; \
(size) = __a2 > __a1 ? __a2 - __a1 : 0; \
})
#define PTR_ADD(ptr, x) ((typeof(ptr))((char*)(ptr) + (x))) #define PTR_ADD(ptr, x) ((typeof(ptr))((char*)(ptr) + (x)))
#define PTR_SUB(ptr, x) ((typeof(ptr))((char*)(ptr) - (x))) #define PTR_SUB(ptr, x) ((typeof(ptr))((char*)(ptr) - (x)))
@ -150,11 +152,16 @@ int get_cpu_boost_counter(void);
#endif #endif
/* Get the byte offset of a type's member */ /* Get the byte offset of a type's member */
#define OFFSETOF(type, membername) ((off_t)&((type *)0)->membername) #ifndef offsetof
#define offsetof(type, member) __builtin_offsetof(type, member)
#endif
/* Get the type pointer from one of its members */ /* Get the containing item of *ptr in type */
#define TYPE_FROM_MEMBER(type, memberptr, membername) \ #ifndef container_of
((type *)((intptr_t)(memberptr) - OFFSETOF(type, membername))) #define container_of(ptr, type, member) ({ \
const typeof (((type *)0)->member) *__mptr = (ptr); \
(type *)((void *)(__mptr) - offsetof(type, member)); })
#endif
/* returns index of first set bit or 32 if no bits are set */ /* returns index of first set bit or 32 if no bits are set */
#if defined(CPU_ARM) && ARM_ARCH >= 5 && !defined(__thumb__) #if defined(CPU_ARM) && ARM_ARCH >= 5 && !defined(__thumb__)
@ -324,6 +331,11 @@ static inline uint32_t swaw32_hw(uint32_t value)
* for all ARM CPUs. */ * for all ARM CPUs. */
#ifdef CPU_ARM #ifdef CPU_ARM
#define HAVE_CPU_CACHE_ALIGN #define HAVE_CPU_CACHE_ALIGN
#define MIN_STACK_ALIGN 8
#endif
#ifndef MIN_STACK_ALIGN
#define MIN_STACK_ALIGN (sizeof (uintptr_t))
#endif #endif
/* Calculate CACHEALIGN_SIZE from CACHEALIGN_BITS */ /* Calculate CACHEALIGN_SIZE from CACHEALIGN_BITS */

View file

@ -39,10 +39,9 @@
*/ */
struct mrsw_lock struct mrsw_lock
{ {
int volatile count; /* rd/wr counter; >0 = reader(s), <0 = writer */ int volatile count; /* counter; >0 = reader(s), <0 = writer */
struct thread_entry *queue; struct __wait_queue queue; /* waiter list */
struct blocker_splay splay; /* priority inheritance info struct blocker_splay splay; /* priority inheritance/owner info */
for waiters */
uint8_t rdrecursion[MAXTHREADS]; /* per-thread reader recursion counts */ uint8_t rdrecursion[MAXTHREADS]; /* per-thread reader recursion counts */
IF_COP( struct corelock cl; ) IF_COP( struct corelock cl; )
}; };

View file

@ -26,13 +26,13 @@
struct mutex struct mutex
{ {
struct thread_entry *queue; /* waiter list */ struct __wait_queue queue; /* waiter list */
int recursion; /* lock owner recursion count */ int recursion; /* lock owner recursion count */
struct blocker blocker; /* priority inheritance info struct blocker blocker; /* priority inheritance info
for waiters and owner*/ for waiters and owner*/
IF_COP( struct corelock cl; ) /* multiprocessor sync */ IF_COP( struct corelock cl; ) /* multiprocessor sync */
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
bool no_preempt; bool no_preempt;
#endif #endif
}; };

View file

@ -88,7 +88,7 @@ struct queue_sender_list
/* If non-NULL, there is a thread waiting for the corresponding event */ /* If non-NULL, there is a thread waiting for the corresponding event */
/* Must be statically allocated to put in non-cached ram. */ /* Must be statically allocated to put in non-cached ram. */
struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */ struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
struct thread_entry *list; /* list of senders in map */ struct __wait_queue list; /* list of senders in map */
/* Send info for last message dequeued or NULL if replied or not sent */ /* Send info for last message dequeued or NULL if replied or not sent */
struct thread_entry * volatile curr_sender; struct thread_entry * volatile curr_sender;
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
@ -108,7 +108,7 @@ struct queue_sender_list
struct event_queue struct event_queue
{ {
struct thread_entry *queue; /* waiter list */ struct __wait_queue queue; /* waiter list */
struct queue_event events[QUEUE_LENGTH]; /* list of events */ struct queue_event events[QUEUE_LENGTH]; /* list of events */
unsigned int volatile read; /* head of queue */ unsigned int volatile read; /* head of queue */
unsigned int volatile write; /* tail of queue */ unsigned int volatile write; /* tail of queue */

View file

@ -26,10 +26,10 @@
struct semaphore struct semaphore
{ {
struct thread_entry *queue; /* Waiter list */ struct __wait_queue queue; /* Waiter list */
int volatile count; /* # of waits remaining before unsignaled */ int volatile count; /* # of waits remaining before unsignaled */
int max; /* maximum # of waits to remain signaled */ int max; /* maximum # of waits to remain signaled */
IF_COP( struct corelock cl; ) /* multiprocessor sync */ IF_COP( struct corelock cl; ) /* multiprocessor sync */
}; };
extern void semaphore_init(struct semaphore *s, int max, int start); extern void semaphore_init(struct semaphore *s, int max, int start);

View file

@ -26,6 +26,7 @@
#include <stdbool.h> #include <stdbool.h>
#include "config.h" #include "config.h"
#include "gcc_extensions.h" #include "gcc_extensions.h"
#include "linked_list.h"
#include "bitarray.h" #include "bitarray.h"
#include "corelock.h" #include "corelock.h"
@ -52,7 +53,7 @@
#define PRIORITY_REALTIME_4 4 #define PRIORITY_REALTIME_4 4
#define PRIORITY_REALTIME 4 /* Lowest realtime range */ #define PRIORITY_REALTIME 4 /* Lowest realtime range */
#define PRIORITY_BUFFERING 15 /* Codec buffering thread */ #define PRIORITY_BUFFERING 15 /* Codec buffering thread */
#define PRIORITY_USER_INTERFACE 16 /* The main thread */ #define PRIORITY_USER_INTERFACE 16 /* For most UI thrads */
#define PRIORITY_RECORDING 16 /* Recording thread */ #define PRIORITY_RECORDING 16 /* Recording thread */
#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */ #define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */ #define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
@ -61,6 +62,7 @@
#define NUM_PRIORITIES 32 #define NUM_PRIORITIES 32
#define PRIORITY_IDLE 32 /* Priority representative of no tasks */ #define PRIORITY_IDLE 32 /* Priority representative of no tasks */
#define PRIORITY_MAIN_THREAD PRIORITY_USER_INTERFACE
#define IO_PRIORITY_IMMEDIATE 0 #define IO_PRIORITY_IMMEDIATE 0
#define IO_PRIORITY_BACKGROUND 32 #define IO_PRIORITY_BACKGROUND 32
@ -108,6 +110,9 @@ extern unsigned sleep(unsigned ticks);
#define IFN_PRIO(...) __VA_ARGS__ #define IFN_PRIO(...) __VA_ARGS__
#endif #endif
#define __wait_queue lld_head
#define __wait_queue_node lld_node
/* Basic structure describing the owner of an object */ /* Basic structure describing the owner of an object */
struct blocker struct blocker
{ {
@ -168,6 +173,7 @@ int thread_get_priority(unsigned int thread_id);
void thread_set_io_priority(unsigned int thread_id, int io_priority); void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id); int thread_get_io_priority(unsigned int thread_id);
#endif /* HAVE_IO_PRIORITY */ #endif /* HAVE_IO_PRIORITY */
#if NUM_CORES > 1 #if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core); unsigned int switch_core(unsigned int new_core);
#endif #endif
@ -186,11 +192,21 @@ int core_get_debug_info(unsigned int core, struct core_debug_info *infop);
#endif /* NUM_CORES */ #endif /* NUM_CORES */
#ifdef HAVE_SDL_THREADS
#define IF_SDL(x...) x
#define IFN_SDL(x...)
#else
#define IF_SDL(x...)
#define IFN_SDL(x...) x
#endif
struct thread_debug_info struct thread_debug_info
{ {
char statusstr[4]; char statusstr[4];
char name[32]; char name[32];
#ifndef HAVE_SDL_THREADS
unsigned int stack_usage; unsigned int stack_usage;
#endif
#if NUM_CORES > 1 #if NUM_CORES > 1
unsigned int core; unsigned int core;
#endif #endif

View file

@ -19,7 +19,8 @@
* *
****************************************************************************/ ****************************************************************************/
#include "kernel-internal.h" #include "kernel-internal.h"
#include "mrsw-lock.h" #include <string.h>
#include "mrsw_lock.h"
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
@ -34,13 +35,14 @@ mrsw_reader_claim(struct mrsw_lock *mrsw, struct thread_entry *current,
static FORCE_INLINE void static FORCE_INLINE void
mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current, mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
int count, unsigned int slotnum) struct thread_entry *first, int count,
unsigned int slotnum)
{ {
/* If no writer is queued or has ownership then noone is queued; /* If no writer is queued or has ownership then noone is queued;
if a writer owns it, then the reader would be blocked instead. if a writer owns it, then the reader would be blocked instead.
Therefore, if the queue has threads, then the next after the Therefore, if the queue has threads, then the next after the
owning readers is a writer and this is not the last reader. */ owning readers is a writer and this is not the last reader. */
if (mrsw->queue) if (first)
corelock_lock(&mrsw->splay.cl); corelock_lock(&mrsw->splay.cl);
threadbit_clear_bit(&mrsw->splay.mask, slotnum); threadbit_clear_bit(&mrsw->splay.mask, slotnum);
@ -61,10 +63,10 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
threadbit_popcount(&mrsw->splay.mask)); threadbit_popcount(&mrsw->splay.mask));
/* switch owner to sole remaining reader */ /* switch owner to sole remaining reader */
slotnum = threadbit_ffs(&mrsw->splay.mask); slotnum = threadbit_ffs(&mrsw->splay.mask);
mrsw->splay.blocker.thread = thread_id_entry(slotnum); mrsw->splay.blocker.thread = __thread_slot_entry(slotnum);
} }
if (mrsw->queue) if (first)
{ {
priority_disinherit(current, &mrsw->splay.blocker); priority_disinherit(current, &mrsw->splay.blocker);
corelock_unlock(&mrsw->splay.cl); corelock_unlock(&mrsw->splay.cl);
@ -72,23 +74,25 @@ mrsw_reader_relinquish(struct mrsw_lock *mrsw, struct thread_entry *current,
} }
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, unsigned int slotnum) mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread,
unsigned int slotnum)
{ {
threadbit_clear_bit(&mrsw->splay.mask, slotnum); threadbit_clear_bit(&mrsw->splay.mask, slotnum);
return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER); return wakeup_thread(thread, WAKEUP_TRANSFER);
} }
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw) mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{ {
return wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER); return wakeup_thread(thread, WAKEUP_TRANSFER);
(void)mrsw;
} }
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first)
{ {
unsigned int result = wakeup_thread(&mrsw->queue, WAKEUP_TRANSFER_MULTI); unsigned int result = wakeup_thread(first, WAKEUP_TRANSFER_MULTI);
mrsw->count = thread_self_entry()->retval; mrsw->count = __running_self_entry()->retval;
return result; return result;
} }
@ -97,32 +101,36 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
#define mrsw_reader_claim(mrsw, current, count, slotnum) \ #define mrsw_reader_claim(mrsw, current, count, slotnum) \
do {} while (0) do {} while (0)
#define mrsw_reader_relinquish(mrsw, current, count, slotnum) \ #define mrsw_reader_relinquish(mrsw, current, first, count, slotnum) \
do {} while (0) do {} while (0)
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw) mrsw_reader_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{ {
mrsw->splay.blocker.thread = mrsw->queue; mrsw->splay.blocker.thread = thread;
return wakeup_thread(&mrsw->queue); return wakeup_thread(thread);
} }
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw) mrsw_writer_wakeup_writer(struct mrsw_lock *mrsw, struct thread_entry *thread)
{ {
mrsw->splay.blocker.thread = mrsw->queue; mrsw->splay.blocker.thread = thread;
return wakeup_thread(&mrsw->queue); return wakeup_thread(thread);
} }
static FORCE_INLINE unsigned int static FORCE_INLINE unsigned int
mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw) mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw, struct thread_entry *first)
{ {
mrsw->splay.blocker.thread = NULL; mrsw->splay.blocker.thread = NULL;
int count = 0; int count = 1;
while (mrsw->queue && mrsw->queue->retval != 0) while (1)
{ {
wakeup_thread(&mrsw->queue); wakeup_thread(first);
if (!(first = WQ_THREAD_FIRST(&mrsw->queue)) || first->retval == 0)
break;
count++; count++;
} }
@ -138,14 +146,11 @@ mrsw_writer_wakeup_readers(struct mrsw_lock *mrsw)
void mrsw_init(struct mrsw_lock *mrsw) void mrsw_init(struct mrsw_lock *mrsw)
{ {
mrsw->count = 0; mrsw->count = 0;
mrsw->queue = NULL; wait_queue_init(&mrsw->queue);
mrsw->splay.blocker.thread = NULL; blocker_splay_init(&mrsw->splay);
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
mrsw->splay.blocker.priority = PRIORITY_IDLE;
threadbit_clear(&mrsw->splay.mask);
corelock_init(&mrsw->splay.cl);
memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion)); memset(mrsw->rdrecursion, 0, sizeof (mrsw->rdrecursion));
#endif /* HAVE_PRIORITY_SCHEDULING */ #endif
corelock_init(&mrsw->cl); corelock_init(&mrsw->cl);
} }
@ -154,7 +159,7 @@ void mrsw_init(struct mrsw_lock *mrsw)
* access recursively. The current writer is ignored and gets access. */ * access recursively. The current writer is ignored and gets access. */
void mrsw_read_acquire(struct mrsw_lock *mrsw) void mrsw_read_acquire(struct mrsw_lock *mrsw)
{ {
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
return; /* Read request while holding write access; pass */ return; /* Read request while holding write access; pass */
@ -178,7 +183,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
int count = mrsw->count; int count = mrsw->count;
if (LIKELY(count >= 0 && !mrsw->queue)) if (LIKELY(count >= 0 && mrsw->queue.head == NULL))
{ {
/* Lock open to readers: /* Lock open to readers:
IFN_PRIO, mrsw->count tracks reader recursion */ IFN_PRIO, mrsw->count tracks reader recursion */
@ -189,13 +194,10 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
} }
/* A writer owns it or is waiting; block... */ /* A writer owns it or is waiting; block... */
IF_COP( current->obj_cl = &mrsw->cl; )
IF_PRIO( current->blocker = &mrsw->splay.blocker; )
current->bqp = &mrsw->queue;
current->retval = 1; /* indicate multi-wake candidate */ current->retval = 1; /* indicate multi-wake candidate */
disable_irq(); disable_irq();
block_thread(current, TIMEOUT_BLOCK); block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker);
corelock_unlock(&mrsw->cl); corelock_unlock(&mrsw->cl);
@ -207,7 +209,7 @@ void mrsw_read_acquire(struct mrsw_lock *mrsw)
* leave opens up access to writer threads. The current writer is ignored. */ * leave opens up access to writer threads. The current writer is ignored. */
void mrsw_read_release(struct mrsw_lock *mrsw) void mrsw_read_release(struct mrsw_lock *mrsw)
{ {
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 )) if (current == mrsw->splay.blocker.thread IF_PRIO( && mrsw->count < 0 ))
return; /* Read release while holding write access; ignore */ return; /* Read release while holding write access; ignore */
@ -237,17 +239,18 @@ void mrsw_read_release(struct mrsw_lock *mrsw)
unsigned int result = THREAD_NONE; unsigned int result = THREAD_NONE;
const int oldlevel = disable_irq_save(); const int oldlevel = disable_irq_save();
if (--count == 0 && mrsw->queue) struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue);
if (--count == 0 && thread != NULL)
{ {
/* No readers remain and a writer is waiting */ /* No readers remain and a writer is waiting */
mrsw->count = -1; mrsw->count = -1;
result = mrsw_reader_wakeup_writer(mrsw IF_PRIO(, slotnum)); result = mrsw_reader_wakeup_writer(mrsw, thread IF_PRIO(, slotnum));
} }
else else
{ {
/* Giving up readership; we may be the last, or not */ /* Giving up readership; we may be the last, or not */
mrsw->count = count; mrsw->count = count;
mrsw_reader_relinquish(mrsw, current, count, slotnum); mrsw_reader_relinquish(mrsw, current, thread, count, slotnum);
} }
restore_irq(oldlevel); restore_irq(oldlevel);
@ -265,7 +268,7 @@ void mrsw_read_release(struct mrsw_lock *mrsw)
* safely call recursively. */ * safely call recursively. */
void mrsw_write_acquire(struct mrsw_lock *mrsw) void mrsw_write_acquire(struct mrsw_lock *mrsw)
{ {
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
if (current == mrsw->splay.blocker.thread) if (current == mrsw->splay.blocker.thread)
{ {
@ -288,13 +291,10 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw)
} }
/* Readers present or a writer owns it - block... */ /* Readers present or a writer owns it - block... */
IF_COP( current->obj_cl = &mrsw->cl; )
IF_PRIO( current->blocker = &mrsw->splay.blocker; )
current->bqp = &mrsw->queue;
current->retval = 0; /* indicate single-wake candidate */ current->retval = 0; /* indicate single-wake candidate */
disable_irq(); disable_irq();
block_thread(current, TIMEOUT_BLOCK); block_thread(current, TIMEOUT_BLOCK, &mrsw->queue, &mrsw->splay.blocker);
corelock_unlock(&mrsw->cl); corelock_unlock(&mrsw->cl);
@ -305,9 +305,9 @@ void mrsw_write_acquire(struct mrsw_lock *mrsw)
/* Release writer thread lock and open the lock to readers and writers */ /* Release writer thread lock and open the lock to readers and writers */
void mrsw_write_release(struct mrsw_lock *mrsw) void mrsw_write_release(struct mrsw_lock *mrsw)
{ {
KERNEL_ASSERT(thread_self_entry() == mrsw->splay.blocker.thread, KERNEL_ASSERT(__running_self_entry() == mrsw->splay.blocker.thread,
"mrsw_write_release->wrong thread (%s != %s)\n", "mrsw_write_release->wrong thread (%s != %s)\n",
thread_self_entry()->name, __running_self_entry()->name,
mrsw->splay.blocker.thread->name); mrsw->splay.blocker.thread->name);
int count = mrsw->count; int count = mrsw->count;
@ -323,15 +323,16 @@ void mrsw_write_release(struct mrsw_lock *mrsw)
corelock_lock(&mrsw->cl); corelock_lock(&mrsw->cl);
const int oldlevel = disable_irq_save(); const int oldlevel = disable_irq_save();
if (mrsw->queue == NULL) /* 'count' becomes zero */ struct thread_entry *thread = WQ_THREAD_FIRST(&mrsw->queue);
if (thread == NULL) /* 'count' becomes zero */
{ {
mrsw->splay.blocker.thread = NULL; mrsw->splay.blocker.thread = NULL;
mrsw->count = 0; mrsw->count = 0;
} }
else if (mrsw->queue->retval == 0) /* 'count' stays -1 */ else if (thread->retval == 0) /* 'count' stays -1 */
result = mrsw_writer_wakeup_writer(mrsw); result = mrsw_writer_wakeup_writer(mrsw, thread);
else /* 'count' becomes # of readers */ else /* 'count' becomes # of readers */
result = mrsw_writer_wakeup_readers(mrsw); result = mrsw_writer_wakeup_readers(mrsw, thread);
restore_irq(oldlevel); restore_irq(oldlevel);
corelock_unlock(&mrsw->cl); corelock_unlock(&mrsw->cl);

View file

@ -30,20 +30,19 @@
* the object is available to other threads */ * the object is available to other threads */
void mutex_init(struct mutex *m) void mutex_init(struct mutex *m)
{ {
corelock_init(&m->cl); wait_queue_init(&m->queue);
m->queue = NULL;
m->recursion = 0; m->recursion = 0;
m->blocker.thread = NULL; blocker_init(&m->blocker);
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
m->blocker.priority = PRIORITY_IDLE;
m->no_preempt = false; m->no_preempt = false;
#endif #endif
corelock_init(&m->cl);
} }
/* Gain ownership of a mutex object or block until it becomes free */ /* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m) void mutex_lock(struct mutex *m)
{ {
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
if(current == m->blocker.thread) if(current == m->blocker.thread)
{ {
@ -65,12 +64,8 @@ void mutex_lock(struct mutex *m)
} }
/* block until the lock is open... */ /* block until the lock is open... */
IF_COP( current->obj_cl = &m->cl; )
IF_PRIO( current->blocker = &m->blocker; )
current->bqp = &m->queue;
disable_irq(); disable_irq();
block_thread(current, TIMEOUT_BLOCK); block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);
corelock_unlock(&m->cl); corelock_unlock(&m->cl);
@ -82,10 +77,10 @@ void mutex_lock(struct mutex *m)
void mutex_unlock(struct mutex *m) void mutex_unlock(struct mutex *m)
{ {
/* unlocker not being the owner is an unlocking violation */ /* unlocker not being the owner is an unlocking violation */
KERNEL_ASSERT(m->blocker.thread == thread_self_entry(), KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
"mutex_unlock->wrong thread (%s != %s)\n", "mutex_unlock->wrong thread (%s != %s)\n",
m->blocker.thread->name, m->blocker.thread->name,
thread_self_entry()->name); __running_self_entry()->name);
if(m->recursion > 0) if(m->recursion > 0)
{ {
@ -98,7 +93,8 @@ void mutex_unlock(struct mutex *m)
corelock_lock(&m->cl); corelock_lock(&m->cl);
/* transfer to next queued thread if any */ /* transfer to next queued thread if any */
if(LIKELY(m->queue == NULL)) struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
if(LIKELY(thread == NULL))
{ {
/* no threads waiting - open the lock */ /* no threads waiting - open the lock */
m->blocker.thread = NULL; m->blocker.thread = NULL;
@ -107,11 +103,7 @@ void mutex_unlock(struct mutex *m)
} }
const int oldlevel = disable_irq_save(); const int oldlevel = disable_irq_save();
/* Tranfer of owning thread is handled in the wakeup protocol unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
* if priorities are enabled otherwise just set it from the
* queue head. */
IFN_PRIO( m->blocker.thread = m->queue; )
unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
restore_irq(oldlevel); restore_irq(oldlevel);
corelock_unlock(&m->cl); corelock_unlock(&m->cl);

View file

@ -3,8 +3,8 @@
#include <errno.h> #include <errno.h>
#include <pthread.h> #include <pthread.h>
#include "/usr/include/semaphore.h" #include "/usr/include/semaphore.h"
#include "thread-internal.h"
#include "kernel.h" #include "kernel.h"
#include "thread.h"
#define NSEC_PER_SEC 1000000000L #define NSEC_PER_SEC 1000000000L
static inline void timespec_add_ns(struct timespec *a, uint64_t ns) static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
@ -25,11 +25,6 @@ struct thread_init_data {
__thread struct thread_entry *_current; __thread struct thread_entry *_current;
struct thread_entry* thread_self_entry(void)
{
return _current;
}
unsigned int thread_self(void) unsigned int thread_self(void)
{ {
return (unsigned) pthread_self(); return (unsigned) pthread_self();
@ -70,12 +65,10 @@ static void *trampoline(void *arg)
if (data->start_frozen) if (data->start_frozen)
{ {
struct corelock thaw_lock; struct corelock thaw_lock;
struct thread_entry *queue = NULL;
corelock_init(&thaw_lock); corelock_init(&thaw_lock);
corelock_lock(&thaw_lock); corelock_lock(&thaw_lock);
_current->lock = &thaw_lock; _current->lock = &thaw_lock;
_current->bqp = &queue;
sem_post(&data->init_sem); sem_post(&data->init_sem);
block_thread_switch(_current, _current->lock); block_thread_switch(_current, _current->lock);
_current->lock = NULL; _current->lock = NULL;
@ -97,7 +90,7 @@ void thread_thaw(unsigned int thread_id)
if (e->lock) if (e->lock)
{ {
corelock_lock(e->lock); corelock_lock(e->lock);
wakeup_thread(e->bqp); wakeup_thread(e);
corelock_unlock(e->lock); corelock_unlock(e->lock);
} }
/* else: no lock. must be running already */ /* else: no lock. must be running already */
@ -135,7 +128,7 @@ unsigned int create_thread(void (*function)(void),
data->entry = entry; data->entry = entry;
pthread_cond_init(&entry->cond, NULL); pthread_cond_init(&entry->cond, NULL);
entry->runnable = true; entry->runnable = true;
entry->l = (struct thread_list) { NULL, NULL };
sem_init(&data->init_sem, 0, 0); sem_init(&data->init_sem, 0, 0);
if (pthread_create(&retval, NULL, trampoline, data) < 0) if (pthread_create(&retval, NULL, trampoline, data) < 0)
@ -153,58 +146,19 @@ unsigned int create_thread(void (*function)(void),
return retval; return retval;
} }
static void add_to_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (*list == NULL)
{
/* Insert into unoccupied list */
thread->l.next = thread;
thread->l.prev = thread;
*list = thread;
}
else
{
/* Insert last */
thread->l.next = *list;
thread->l.prev = (*list)->l.prev;
thread->l.prev->l.next = thread;
(*list)->l.prev = thread;
}
}
static void remove_from_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (thread == thread->l.next)
{
/* The only item */
*list = NULL;
return;
}
if (thread == *list)
{
/* List becomes next item */
*list = thread->l.next;
}
/* Fix links to jump over the removed entry. */
thread->l.prev->l.next = thread->l.next;
thread->l.next->l.prev = thread->l.prev;
}
/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point /* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
* to a corelock instance, and this corelock must be held by the caller */ * to a corelock instance, and this corelock must be held by the caller */
void block_thread_switch(struct thread_entry *t, struct corelock *cl) void block_thread_switch(struct thread_entry *t, struct corelock *cl)
{ {
t->runnable = false; t->runnable = false;
add_to_list_l(t->bqp, t); if (wait_queue_ptr(t))
wait_queue_register(t);
while(!t->runnable) while(!t->runnable)
pthread_cond_wait(&t->cond, &cl->mutex); pthread_cond_wait(&t->cond, &cl->mutex);
} }
void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl) void block_thread_switch_w_tmo(struct thread_entry *t, int timeout,
struct corelock *cl)
{ {
int err = 0; int err = 0;
struct timespec ts; struct timespec ts;
@ -213,30 +167,25 @@ void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corel
timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ)); timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
t->runnable = false; t->runnable = false;
add_to_list_l(t->bqp, t); wait_queue_register(t->wqp, t);
while(!t->runnable && !err) while(!t->runnable && !err)
err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts); err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
if (err == ETIMEDOUT) if (err == ETIMEDOUT)
{ /* the thread timed out and was not explicitely woken up. { /* the thread timed out and was not explicitely woken up.
* we need to do this now to mark it runnable again */ * we need to do this now to mark it runnable again */
remove_from_list_l(t->bqp, t);
t->runnable = true; t->runnable = true;
if (t->wakeup_ext_cb) /* NOTE: objects do their own removal upon timer expiration */
t->wakeup_ext_cb(t);
} }
} }
unsigned int wakeup_thread(struct thread_entry **list) unsigned int wakeup_thread(struct thread_entry *t)
{ {
struct thread_entry *t = *list; if (t->wqp)
if (t) wait_queue_remove(t);
{ t->runnable = true;
remove_from_list_l(list, t); pthread_cond_signal(&t->cond);
t->runnable = true; return THREAD_OK;
pthread_cond_signal(&t->cond);
}
return THREAD_NONE;
} }

View file

@ -51,7 +51,7 @@ static struct
* q->events[]: | XX | E1 | E2 | E3 | E4 | XX | * q->events[]: | XX | E1 | E2 | E3 | E4 | XX |
* q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL | * q->send->senders[]: | NULL | T1 | T2 | NULL | T3 | NULL |
* \/ \/ \/ * \/ \/ \/
* q->send->list: >->|T0|<->|T1|<->|T2|<-------->|T3|<-< * q->send->list: 0<-|T0|<->|T1|<->|T2|<-------->|T3|->0
* q->send->curr_sender: /\ * q->send->curr_sender: /\
* *
* Thread has E0 in its own struct queue_event. * Thread has E0 in its own struct queue_event.
@ -65,20 +65,20 @@ static struct
* more efficent to reject the majority of cases that don't need this * more efficent to reject the majority of cases that don't need this
* called. * called.
*/ */
static void queue_release_sender(struct thread_entry * volatile * sender, static void queue_release_sender_inner(
intptr_t retval) struct thread_entry * volatile * sender, intptr_t retval)
{ {
struct thread_entry *thread = *sender; struct thread_entry *thread = *sender;
*sender = NULL; /* Clear slot. */ *sender = NULL; /* Clear slot. */
#ifdef HAVE_WAKEUP_EXT_CB
thread->wakeup_ext_cb = NULL; /* Clear callback. */
#endif
thread->retval = retval; /* Assign thread-local return value. */ thread->retval = retval; /* Assign thread-local return value. */
*thread->bqp = thread; /* Move blocking queue head to thread since wakeup_thread(thread, WAKEUP_RELEASE);
wakeup_thread wakes the first thread in }
the list. */
wakeup_thread(thread->bqp, WAKEUP_RELEASE); static inline void queue_release_sender(
struct thread_entry * volatile * sender, intptr_t retval)
{
if(UNLIKELY(*sender))
queue_release_sender_inner(sender, retval);
} }
/* Releases any waiting threads that are queued with queue_send - /* Releases any waiting threads that are queued with queue_send -
@ -93,26 +93,11 @@ static void queue_release_all_senders(struct event_queue *q)
{ {
struct thread_entry **spp = struct thread_entry **spp =
&q->send->senders[i & QUEUE_LENGTH_MASK]; &q->send->senders[i & QUEUE_LENGTH_MASK];
queue_release_sender(spp, 0);
if(*spp)
{
queue_release_sender(spp, 0);
}
} }
} }
} }
#ifdef HAVE_WAKEUP_EXT_CB
/* Callback to do extra forced removal steps from sender list in addition
* to the normal blocking queue removal and priority dis-inherit */
static void queue_remove_sender_thread_cb(struct thread_entry *thread)
{
*((struct thread_entry **)thread->retval) = NULL;
thread->wakeup_ext_cb = NULL;
thread->retval = 0;
}
#endif /* HAVE_WAKEUP_EXT_CB */
/* Enables queue_send on the specified queue - caller allocates the extra /* Enables queue_send on the specified queue - caller allocates the extra
* data structure. Only queues which are taken to be owned by a thread should * data structure. Only queues which are taken to be owned by a thread should
* enable this however an official owner is not compulsory but must be * enable this however an official owner is not compulsory but must be
@ -132,11 +117,12 @@ void queue_enable_queue_send(struct event_queue *q,
if(send != NULL && q->send == NULL) if(send != NULL && q->send == NULL)
{ {
memset(send, 0, sizeof(*send)); memset(send, 0, sizeof(*send));
wait_queue_init(&send->list);
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
send->blocker.priority = PRIORITY_IDLE; blocker_init(&send->blocker);
if(owner_id != 0) if(owner_id != 0)
{ {
send->blocker.thread = thread_id_entry(owner_id); send->blocker.thread = __thread_id_entry(owner_id);
q->blocker_p = &send->blocker; q->blocker_p = &send->blocker;
} }
#endif #endif
@ -154,24 +140,14 @@ static inline void queue_do_unblock_sender(struct queue_sender_list *send,
unsigned int i) unsigned int i)
{ {
if(send) if(send)
{ queue_release_sender(&send->senders[i], 0);
struct thread_entry **spp = &send->senders[i];
if(UNLIKELY(*spp))
{
queue_release_sender(spp, 0);
}
}
} }
/* Perform the auto-reply sequence */ /* Perform the auto-reply sequence */
static inline void queue_do_auto_reply(struct queue_sender_list *send) static inline void queue_do_auto_reply(struct queue_sender_list *send)
{ {
if(send && send->curr_sender) if(send)
{
/* auto-reply */
queue_release_sender(&send->curr_sender, 0); queue_release_sender(&send->curr_sender, 0);
}
} }
/* Moves waiting thread's refrence from the senders array to the /* Moves waiting thread's refrence from the senders array to the
@ -191,7 +167,6 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
/* Move thread reference from array to the next thread /* Move thread reference from array to the next thread
that queue_reply will release */ that queue_reply will release */
send->curr_sender = *spp; send->curr_sender = *spp;
(*spp)->retval = (intptr_t)spp;
*spp = NULL; *spp = NULL;
} }
/* else message was posted asynchronously with queue_post */ /* else message was posted asynchronously with queue_post */
@ -205,18 +180,28 @@ static inline void queue_do_fetch_sender(struct queue_sender_list *send,
#define queue_do_fetch_sender(send, rd) #define queue_do_fetch_sender(send, rd)
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */ #endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
static void queue_wake_waiter_inner(struct thread_entry *thread)
{
wakeup_thread(thread, WAKEUP_DEFAULT);
}
static inline void queue_wake_waiter(struct event_queue *q)
{
struct thread_entry *thread = WQ_THREAD_FIRST(&q->queue);
if(thread != NULL)
queue_wake_waiter_inner(thread);
}
/* Queue must not be available for use during this call */ /* Queue must not be available for use during this call */
void queue_init(struct event_queue *q, bool register_queue) void queue_init(struct event_queue *q, bool register_queue)
{ {
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
if(register_queue) if(register_queue)
{
corelock_lock(&all_queues.cl); corelock_lock(&all_queues.cl);
}
corelock_init(&q->cl); corelock_init(&q->cl);
q->queue = NULL; wait_queue_init(&q->queue);
/* What garbage is in write is irrelevant because of the masking design- /* What garbage is in write is irrelevant because of the masking design-
* any other functions the empty the queue do this as well so that * any other functions the empty the queue do this as well so that
* queue_count and queue_empty return sane values in the case of a * queue_count and queue_empty return sane values in the case of a
@ -261,7 +246,7 @@ void queue_delete(struct event_queue *q)
corelock_unlock(&all_queues.cl); corelock_unlock(&all_queues.cl);
/* Release thread(s) waiting on queue head */ /* Release thread(s) waiting on queue head */
thread_queue_wake(&q->queue); wait_queue_wake(&q->queue);
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
if(q->send) if(q->send)
@ -293,7 +278,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_self_entry(), QUEUE_GET_THREAD(q) == __running_self_entry(),
"queue_wait->wrong thread\n"); "queue_wait->wrong thread\n");
#endif #endif
@ -307,18 +292,12 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
while(1) while(1)
{ {
struct thread_entry *current;
rd = q->read; rd = q->read;
if (rd != q->write) /* A waking message could disappear */ if (rd != q->write) /* A waking message could disappear */
break; break;
current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
block_thread(current, TIMEOUT_BLOCK, &q->queue, NULL);
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
block_thread(current, TIMEOUT_BLOCK);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
switch_thread(); switch_thread();
@ -349,16 +328,9 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
int oldlevel; int oldlevel;
unsigned int rd, wr; unsigned int rd, wr;
/* this function works only with a positive number (or zero) of ticks */
if (ticks == TIMEOUT_BLOCK)
{
queue_wait(q, ev);
return;
}
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL || KERNEL_ASSERT(QUEUE_GET_THREAD(q) == NULL ||
QUEUE_GET_THREAD(q) == thread_self_entry(), QUEUE_GET_THREAD(q) == __running_self_entry(),
"queue_wait_w_tmo->wrong thread\n"); "queue_wait_w_tmo->wrong thread\n");
#endif #endif
@ -372,14 +344,10 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
rd = q->read; rd = q->read;
wr = q->write; wr = q->write;
if (rd == wr && ticks > 0) if (rd == wr && ticks != 0)
{ {
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
block_thread(current, ticks, &q->queue, NULL);
IF_COP( current->obj_cl = &q->cl; )
current->bqp = &q->queue;
block_thread(current, ticks);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
switch_thread(); switch_thread();
@ -389,6 +357,8 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
rd = q->read; rd = q->read;
wr = q->write; wr = q->write;
wait_queue_try_remove(current);
} }
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
@ -436,7 +406,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
queue_do_unblock_sender(q->send, wr); queue_do_unblock_sender(q->send, wr);
/* Wakeup a waiting thread if any */ /* Wakeup a waiting thread if any */
wakeup_thread(&q->queue, WAKEUP_DEFAULT); queue_wake_waiter(q);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
restore_irq(oldlevel); restore_irq(oldlevel);
@ -465,28 +435,17 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
{ {
struct queue_sender_list *send = q->send; struct queue_sender_list *send = q->send;
struct thread_entry **spp = &send->senders[wr]; struct thread_entry **spp = &send->senders[wr];
struct thread_entry *current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
if(UNLIKELY(*spp)) /* overflow protect - unblock any thread waiting at this index */
{ queue_release_sender(spp, 0);
/* overflow protect - unblock any thread waiting at this index */
queue_release_sender(spp, 0);
}
/* Wakeup a waiting thread if any */ /* Wakeup a waiting thread if any */
wakeup_thread(&q->queue, WAKEUP_DEFAULT); queue_wake_waiter(q);
/* Save thread in slot, add to list and wait for reply */ /* Save thread in slot, add to list and wait for reply */
*spp = current; *spp = current;
IF_COP( current->obj_cl = &q->cl; ) block_thread(current, TIMEOUT_BLOCK, &send->list, q->blocker_p);
IF_PRIO( current->blocker = q->blocker_p; )
#ifdef HAVE_WAKEUP_EXT_CB
current->wakeup_ext_cb = queue_remove_sender_thread_cb;
#endif
current->retval = (intptr_t)spp;
current->bqp = &send->list;
block_thread(current, TIMEOUT_BLOCK);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
switch_thread(); switch_thread();
@ -495,7 +454,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
} }
/* Function as queue_post if sending is not enabled */ /* Function as queue_post if sending is not enabled */
wakeup_thread(&q->queue, WAKEUP_DEFAULT); queue_wake_waiter(q);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
restore_irq(oldlevel); restore_irq(oldlevel);
@ -530,16 +489,12 @@ void queue_reply(struct event_queue *q, intptr_t retval)
{ {
if(q->send && q->send->curr_sender) if(q->send && q->send->curr_sender)
{ {
struct queue_sender_list *sender;
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
corelock_lock(&q->cl); corelock_lock(&q->cl);
sender = q->send; struct queue_sender_list *send = q->send;
if(send)
/* Double-check locking */ queue_release_sender(&send->curr_sender, retval);
if(LIKELY(sender && sender->curr_sender))
queue_release_sender(&sender->curr_sender, retval);
corelock_unlock(&q->cl); corelock_unlock(&q->cl);
restore_irq(oldlevel); restore_irq(oldlevel);

View file

@ -24,6 +24,7 @@
/**************************************************************************** /****************************************************************************
* Simple semaphore functions ;) * Simple semaphore functions ;)
****************************************************************************/ ****************************************************************************/
/* Initialize the semaphore object. /* Initialize the semaphore object.
* max = maximum up count the semaphore may assume (max >= 1) * max = maximum up count the semaphore may assume (max >= 1)
* start = initial count of semaphore (0 <= count <= max) */ * start = initial count of semaphore (0 <= count <= max) */
@ -31,7 +32,7 @@ void semaphore_init(struct semaphore *s, int max, int start)
{ {
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max, KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
"semaphore_init->inv arg\n"); "semaphore_init->inv arg\n");
s->queue = NULL; wait_queue_init(&s->queue);
s->max = max; s->max = max;
s->count = start; s->count = start;
corelock_init(&s->cl); corelock_init(&s->cl);
@ -42,44 +43,49 @@ void semaphore_init(struct semaphore *s, int max, int start)
* safely be used in an ISR. */ * safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout) int semaphore_wait(struct semaphore *s, int timeout)
{ {
int ret; int ret = OBJ_WAIT_TIMEDOUT;
int oldlevel;
int count;
oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
corelock_lock(&s->cl); corelock_lock(&s->cl);
count = s->count; int count = s->count;
if(LIKELY(count > 0)) if(LIKELY(count > 0))
{ {
/* count is not zero; down it */ /* count is not zero; down it */
s->count = count - 1; s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED; ret = OBJ_WAIT_SUCCEEDED;
} }
else if(timeout == 0) else if(timeout != 0)
{
/* just polling it */
ret = OBJ_WAIT_TIMEDOUT;
}
else
{ {
/* too many waits - block until count is upped... */ /* too many waits - block until count is upped... */
struct thread_entry * current = thread_self_entry(); struct thread_entry *current = __running_self_entry();
IF_COP( current->obj_cl = &s->cl; )
current->bqp = &s->queue;
/* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
* explicit in semaphore_release */
current->retval = OBJ_WAIT_TIMEDOUT;
block_thread(current, timeout); block_thread(current, timeout, &s->queue, NULL);
corelock_unlock(&s->cl); corelock_unlock(&s->cl);
/* ...and turn control over to next thread */ /* ...and turn control over to next thread */
switch_thread(); switch_thread();
return current->retval; /* if explicit wake indicated; do no more */
if(LIKELY(!wait_queue_ptr(current)))
return OBJ_WAIT_SUCCEEDED;
disable_irq();
corelock_lock(&s->cl);
/* see if anyone got us after the expired wait */
if(wait_queue_try_remove(current))
{
count = s->count;
if(count > 0)
{
/* down it lately */
s->count = count - 1;
ret = OBJ_WAIT_SUCCEEDED;
}
}
} }
/* else just polling it */
corelock_unlock(&s->cl); corelock_unlock(&s->cl);
restore_irq(oldlevel); restore_irq(oldlevel);
@ -93,18 +99,17 @@ int semaphore_wait(struct semaphore *s, int timeout)
void semaphore_release(struct semaphore *s) void semaphore_release(struct semaphore *s)
{ {
unsigned int result = THREAD_NONE; unsigned int result = THREAD_NONE;
int oldlevel;
oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
corelock_lock(&s->cl); corelock_lock(&s->cl);
if(LIKELY(s->queue != NULL)) struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue);
if(LIKELY(thread != NULL))
{ {
/* a thread was queued - wake it up and keep count at 0 */ /* a thread was queued - wake it up and keep count at 0 */
KERNEL_ASSERT(s->count == 0, KERNEL_ASSERT(s->count == 0,
"semaphore_release->threads queued but count=%d!\n", s->count); "semaphore_release->threads queued but count=%d!\n", s->count);
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ result = wakeup_thread(thread, WAKEUP_DEFAULT);
result = wakeup_thread(&s->queue, WAKEUP_DEFAULT);
} }
else else
{ {

View file

@ -18,39 +18,222 @@
* KIND, either express or implied. * KIND, either express or implied.
* *
****************************************************************************/ ****************************************************************************/
#include "thread-internal.h" #include "kernel-internal.h"
#include "system.h" #include "system.h"
/* Unless otherwise defined, do nothing */
#ifndef YIELD_KERNEL_HOOK
#define YIELD_KERNEL_HOOK() false
#endif
#ifndef SLEEP_KERNEL_HOOK
#define SLEEP_KERNEL_HOOK(ticks) false
#endif
const char __main_thread_name_str[] = "main";
/* Array indexing is more efficient in inlines if the elements are a native
word size (100s of bytes fewer instructions) */
#if NUM_CORES > 1
static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR;
struct core_entry *__cores[NUM_CORES] IBSS_ATTR;
#else
struct core_entry __cores[NUM_CORES] IBSS_ATTR;
#endif
static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR;
struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR;
/** Internal functions **/
/*---------------------------------------------------------------------------
* Find an empty thread slot or NULL if none found. The slot returned will
* be locked on multicore.
*---------------------------------------------------------------------------
*/
static struct threadalloc
{
threadbit_t avail;
#if NUM_CORES > 1
struct corelock cl;
#endif
} threadalloc SHAREDBSS_ATTR;
/*---------------------------------------------------------------------------
* Initialize the thread allocator
*---------------------------------------------------------------------------
*/
void thread_alloc_init(void)
{
corelock_init(&threadalloc.cl);
for (unsigned int core = 0; core < NUM_CORES; core++)
{
#if NUM_CORES > 1
struct core_entry *c = &__core_entries[core];
__cores[core] = c;
#else
struct core_entry *c = &__cores[core];
#endif
rtr_queue_init(&c->rtr);
corelock_init(&c->rtr_cl);
tmo_queue_init(&c->tmo);
c->next_tmo_check = current_tick; /* Something not in the past */
}
for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++)
{
struct thread_entry *t = &__thread_entries[slotnum];
__threads[slotnum] = t;
corelock_init(&t->waiter_cl);
corelock_init(&t->slot_cl);
t->id = THREAD_ID_INIT(slotnum);
threadbit_set_bit(&threadalloc.avail, slotnum);
}
}
/*---------------------------------------------------------------------------
* Allocate a thread alot
*---------------------------------------------------------------------------
*/
struct thread_entry * thread_alloc(void)
{
struct thread_entry *thread = NULL;
corelock_lock(&threadalloc.cl);
unsigned int slotnum = threadbit_ffs(&threadalloc.avail);
if (slotnum < MAXTHREADS)
{
threadbit_clear_bit(&threadalloc.avail, slotnum);
thread = __threads[slotnum];
}
corelock_unlock(&threadalloc.cl);
return thread;
}
/*---------------------------------------------------------------------------
* Free the thread slot of 'thread'
*---------------------------------------------------------------------------
*/
void thread_free(struct thread_entry *thread)
{
corelock_lock(&threadalloc.cl);
threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id));
corelock_unlock(&threadalloc.cl);
}
/*---------------------------------------------------------------------------
* Assign the thread slot a new ID. Version is 0x00000100..0xffffff00.
*---------------------------------------------------------------------------
*/
void new_thread_id(struct thread_entry *thread)
{
uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT);
/* If wrapped to 0, make it 1 */
if ((id & THREAD_ID_VERSION_MASK) == 0)
id |= (1u << THREAD_ID_VERSION_SHIFT);
thread->id = id;
}
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
* Wakeup an entire queue of threads - returns bitwise-or of return bitmask * Wakeup an entire queue of threads - returns bitwise-or of return bitmask
* from each operation or THREAD_NONE of nothing was awakened. Object owning * from each operation or THREAD_NONE of nothing was awakened.
* the queue must be locked first.
*
* INTERNAL: Intended for use by kernel objects and not for programs.
*--------------------------------------------------------------------------- *---------------------------------------------------------------------------
*/ */
unsigned int thread_queue_wake(struct thread_entry **list) unsigned int wait_queue_wake(struct __wait_queue *wqp)
{ {
unsigned result = THREAD_NONE; unsigned result = THREAD_NONE;
struct thread_entry *thread;
for (;;) while ((thread = WQ_THREAD_FIRST(wqp)))
{ result |= wakeup_thread(thread, WAKEUP_DEFAULT);
unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT);
if (rc == THREAD_NONE)
break; /* No more threads */
result |= rc;
}
return result; return result;
} }
/** Debug screen stuff **/ /** Public functions **/
#ifdef RB_PROFILE
void profile_thread(void)
{
profstart(THREAD_ID_SLOT(__running_self_entry()->id));
}
#endif
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
* returns the stack space used in bytes * Return the thread id of the calling thread
* --------------------------------------------------------------------------
*/
unsigned int thread_self(void)
{
return __running_self_entry()->id;
}
/*---------------------------------------------------------------------------
* Suspends a thread's execution for at least the specified number of ticks.
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (SLEEP_KERNEL_HOOK(ticks))
return 0; /* Handled */
disable_irq();
sleep_thread(ticks);
switch_thread();
return 0;
}
/*---------------------------------------------------------------------------
* Elects another thread to run or, if no other thread may be made ready to
* run, immediately returns control back to the calling thread.
*---------------------------------------------------------------------------
*/
void yield(void)
{
/* In certain situations, certain bootloaders in particular, a normal
* threading call is inappropriate. */
if (YIELD_KERNEL_HOOK())
return; /* Handled */
switch_thread();
}
/** Debug screen stuff **/
void format_thread_name(char *buf, size_t bufsize,
const struct thread_entry *thread)
{
const char *name = thread->name;
if (!name)
name = "";
const char *fmt = *name ? "%s" : "%s%08lX";
snprintf(buf, bufsize, fmt, name, thread->id);
}
#ifndef HAVE_SDL_THREADS
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the stack ever used during runtime.
*--------------------------------------------------------------------------- *---------------------------------------------------------------------------
*/ */
static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
@ -69,13 +252,9 @@ static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size)
return usage; return usage;
} }
#endif /* HAVE_SDL_THREADS */
#if NUM_CORES > 1 #if NUM_CORES > 1
/*---------------------------------------------------------------------------
* Returns the maximum percentage of the core's idle stack ever used during
* runtime.
*---------------------------------------------------------------------------
*/
int core_get_debug_info(unsigned int core, struct core_debug_info *infop) int core_get_debug_info(unsigned int core, struct core_debug_info *infop)
{ {
extern uintptr_t * const idle_stacks[NUM_CORES]; extern uintptr_t * const idle_stacks[NUM_CORES];
@ -105,29 +284,29 @@ int thread_get_debug_info(unsigned int thread_id,
if (!infop) if (!infop)
return -1; return -1;
unsigned int slot = THREAD_ID_SLOT(thread_id); unsigned int slotnum = THREAD_ID_SLOT(thread_id);
if (slot >= MAXTHREADS) if (slotnum >= MAXTHREADS)
return -1; return -1;
extern struct thread_entry threads[MAXTHREADS]; struct thread_entry *thread = __thread_slot_entry(slotnum);
struct thread_entry *thread = &threads[slot];
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
LOCK_THREAD(thread); corelock_lock(&threadalloc.cl);
corelock_lock(&thread->slot_cl);
unsigned int state = thread->state; unsigned int state = thread->state;
if (state != STATE_KILLED) int ret = 0;
{
const char *name = thread->name;
if (!name)
name = "";
if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0)
{
bool cpu_boost = false; bool cpu_boost = false;
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
cpu_boost = thread->cpu_boost; cpu_boost = thread->cpu_boost;
#endif #endif
#ifndef HAVE_SDL_THREADS
infop->stack_usage = stack_usage(thread->stack, thread->stack_size); infop->stack_usage = stack_usage(thread->stack, thread->stack_size);
#endif
#if NUM_CORES > 1 #if NUM_CORES > 1
infop->core = thread->core; infop->core = thread->core;
#endif #endif
@ -140,13 +319,13 @@ int thread_get_debug_info(unsigned int thread_id,
cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '),
status_chars[state]); status_chars[state]);
const char *fmt = *name ? "%s" : "%s%08lX"; format_thread_name(infop->name, sizeof (infop->name), thread);
snprintf(infop->name, sizeof (infop->name), fmt, name, ret = 1;
thread->id);
} }
UNLOCK_THREAD(thread); corelock_unlock(&thread->slot_cl);
corelock_unlock(&threadalloc.cl);
restore_irq(oldlevel); restore_irq(oldlevel);
return state == STATE_KILLED ? 0 : 1; return ret;
} }

View file

@ -78,30 +78,11 @@ struct priority_distribution
#endif /* HAVE_PRIORITY_SCHEDULING */ #endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_CORELOCK_OBJECT #define __rtr_queue lldc_head
/* Operations to be performed just before stopping a thread and starting #define __rtr_queue_node lldc_node
a new one if specified before calling switch_thread */
enum
{
TBOP_CLEAR = 0, /* No operation to do */
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
};
struct thread_blk_ops #define __tmo_queue ll_head
{ #define __tmo_queue_node ll_node
struct corelock *cl_p; /* pointer to corelock */
unsigned char flags; /* TBOP_* flags */
};
#endif /* NUM_CORES > 1 */
/* Link information for lists thread is in */
struct thread_entry; /* forward */
struct thread_list
{
struct thread_entry *prev; /* Previous thread in a list */
struct thread_entry *next; /* Next thread in a list */
};
/* Information kept in each thread slot /* Information kept in each thread slot
* members are arranged according to size - largest first - in order * members are arranged according to size - largest first - in order
@ -109,96 +90,55 @@ struct thread_list
*/ */
struct thread_entry struct thread_entry
{ {
struct regs context; /* Register context at switch - struct regs context; /* Register context at switch -
_must_ be first member */ _must_ be first member */
uintptr_t *stack; /* Pointer to top of stack */ #ifndef HAVE_SDL_THREADS
const char *name; /* Thread name */ uintptr_t *stack; /* Pointer to top of stack */
long tmo_tick; /* Tick when thread should be woken from
timeout -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_list l; /* Links for blocked/waking/running -
circular linkage in both directions */
struct thread_list tmo; /* Links for timeout list -
Circular in reverse direction, NULL-terminated in
forward direction -
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
struct thread_entry **bqp; /* Pointer to list variable in kernel
object where thread is blocked - used
for implicit unblock and explicit wake
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#ifdef HAVE_CORELOCK_OBJECT
struct corelock *obj_cl; /* Object corelock where thead is blocked -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct corelock waiter_cl; /* Corelock for thread_wait */
struct corelock slot_cl; /* Corelock to lock thread slot */
unsigned char core; /* The core to which thread belongs */
#endif #endif
struct thread_entry *queue; /* List of threads waiting for thread to be const char *name; /* Thread name */
removed */ long tmo_tick; /* Tick when thread should be woken */
#ifdef HAVE_WAKEUP_EXT_CB struct __rtr_queue_node rtr; /* Node for run queue */
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that struct __tmo_queue_node tmo; /* Links for timeout list */
performs special steps needed when being struct __wait_queue_node wq; /* Node for wait queue */
forced off of an object's wait queue that struct __wait_queue *volatile wqp; /* Pointer to registered wait queue */
go beyond the standard wait queue removal #if NUM_CORES > 1
and priority disinheritance */ struct corelock waiter_cl; /* Corelock for thread_wait */
/* Only enabled when using queue_send for now */ struct corelock slot_cl; /* Corelock to lock thread slot */
unsigned char core; /* The core to which thread belongs */
#endif #endif
#if defined(HAVE_SEMAPHORE_OBJECTS) || \ struct __wait_queue queue; /* List of threads waiting for thread to be
defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || \ removed */
NUM_CORES > 1 volatile intptr_t retval; /* Return value from a blocked operation/
volatile intptr_t retval; /* Return value from a blocked operation/ misc. use */
misc. use */ uint32_t id; /* Current slot id */
#endif int __errno; /* Thread error number (errno tls) */
uint32_t id; /* Current slot id */
int __errno; /* Thread error number (errno tls) */
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
/* Priority summary of owned objects that support inheritance */ /* Priority summary of owned objects that support inheritance */
struct blocker *blocker; /* Pointer to blocker when this thread is blocked struct blocker *blocker; /* Pointer to blocker when this thread is blocked
on an object that supports PIP - on an object that supports PIP -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */ states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct priority_distribution pdist; /* Priority summary of owned objects struct priority_distribution pdist; /* Priority summary of owned objects
that have blocked threads and thread's own that have blocked threads and thread's own
base priority */ base priority */
int skip_count; /* Number of times skipped if higher priority int skip_count; /* Number of times skipped if higher priority
thread was running */ thread was running */
unsigned char base_priority; /* Base priority (set explicitly during unsigned char base_priority; /* Base priority (set explicitly during
creation or thread_set_priority) */ creation or thread_set_priority) */
unsigned char priority; /* Scheduled priority (higher of base or unsigned char priority; /* Scheduled priority (higher of base or
all threads blocked by this one) */ all threads blocked by this one) */
#endif #endif
unsigned short stack_size; /* Size of stack in bytes */ #ifndef HAVE_SDL_THREADS
unsigned char state; /* Thread slot state (STATE_*) */ unsigned short stack_size; /* Size of stack in bytes */
#endif
unsigned char state; /* Thread slot state (STATE_*) */
#ifdef HAVE_SCHEDULER_BOOSTCTRL #ifdef HAVE_SCHEDULER_BOOSTCTRL
unsigned char cpu_boost; /* CPU frequency boost flag */ unsigned char cpu_boost; /* CPU frequency boost flag */
#endif #endif
#ifdef HAVE_IO_PRIORITY #ifdef HAVE_IO_PRIORITY
unsigned char io_priority; unsigned char io_priority;
#endif #endif
}; };
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct thread_entry *running; /* threads that are running (RTR) */
struct thread_entry *timeout; /* threads that are on a timeout before
running again */
struct thread_entry *block_task; /* Task going off running list */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr; /* Summary of running and ready-to-run
threads */
#endif
long next_tmo_check; /* soonest time to check tmo threads */
#ifdef HAVE_CORELOCK_OBJECT
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ /* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
#define THREAD_ID_VERSION_SHIFT 8 #define THREAD_ID_VERSION_SHIFT 8
#define THREAD_ID_VERSION_MASK 0xffffff00 #define THREAD_ID_VERSION_MASK 0xffffff00
@ -206,38 +146,128 @@ struct core_entry
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) #define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
/* Thread locking */
#if NUM_CORES > 1
#define LOCK_THREAD(thread) \
({ corelock_lock(&(thread)->slot_cl); })
#define TRY_LOCK_THREAD(thread) \
({ corelock_try_lock(&(thread)->slot_cl); })
#define UNLOCK_THREAD(thread) \
({ corelock_unlock(&(thread)->slot_cl); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ unsigned int _core = (thread)->core; \
cores[_core].blk_ops.flags |= TBOP_UNLOCK_CORELOCK; \
cores[_core].blk_ops.cl_p = &(thread)->slot_cl; })
#else /* NUM_CORES == 1*/
#define LOCK_THREAD(thread) \
({ (void)(thread); })
#define TRY_LOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \
({ (void)(thread); })
#endif /* NUM_CORES */
#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull) #define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
/* Information kept for each core
* Members are arranged for the same reason as in thread_entry
*/
struct core_entry
{
/* "Active" lists - core is constantly active on these and are never
locked and interrupts do not access them */
struct __rtr_queue rtr; /* Threads that are runnable */
struct __tmo_queue tmo; /* Threads on a bounded wait */
struct thread_entry *running; /* Currently running thread */
#ifdef HAVE_PRIORITY_SCHEDULING
struct priority_distribution rtr_dist; /* Summary of runnables */
#endif
long next_tmo_check; /* Next due timeout check */
#if NUM_CORES > 1
struct corelock rtr_cl; /* Lock for rtr list */
#endif /* NUM_CORES */
};
/* Hide a few scheduler details from itself to make allocation more flexible */
#define __main_thread_name \
({ extern const char __main_thread_name_str[]; \
__main_thread_name_str; })
static FORCE_INLINE
void * __get_main_stack(size_t *stacksize)
{
#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
extern uintptr_t stackbegin[];
extern uintptr_t stackend[];
#else
extern uintptr_t *stackbegin;
extern uintptr_t *stackend;
#endif
*stacksize = (uintptr_t)stackend - (uintptr_t)stackbegin;
return stackbegin;
}
void format_thread_name(char *buf, size_t bufsize,
const struct thread_entry *thread);
static FORCE_INLINE
struct core_entry * __core_id_entry(unsigned int core)
{
#if NUM_CORES > 1
extern struct core_entry * __cores[NUM_CORES];
return __cores[core];
#else
extern struct core_entry __cores[NUM_CORES];
return &__cores[core];
#endif
}
#define __running_self_entry() \
__core_id_entry(CURRENT_CORE)->running
static FORCE_INLINE
struct thread_entry * __thread_slot_entry(unsigned int slotnum)
{
extern struct thread_entry * __threads[MAXTHREADS];
return __threads[slotnum];
}
#define __thread_id_entry(id) \
__thread_slot_entry(THREAD_ID_SLOT(id))
#define THREAD_FROM(p, member) \
container_of(p, struct thread_entry, member)
#define RTR_EMPTY(rtrp) \
({ (rtrp)->head == NULL; })
#define RTR_THREAD_FIRST(rtrp) \
({ THREAD_FROM((rtrp)->head, rtr); })
#define RTR_THREAD_NEXT(thread) \
({ THREAD_FROM((thread)->rtr.next, rtr); })
#define TMO_THREAD_FIRST(tmop) \
({ struct __tmo_queue *__tmop = (tmop); \
__tmop->head ? THREAD_FROM(__tmop->head, tmo) : NULL; })
#define TMO_THREAD_NEXT(thread) \
({ struct __tmo_queue_node *__next = (thread)->tmo.next; \
__next ? THREAD_FROM(__next, tmo) : NULL; })
#define WQ_THREAD_FIRST(wqp) \
({ struct __wait_queue *__wqp = (wqp); \
__wqp->head ? THREAD_FROM(__wqp->head, wq) : NULL; })
#define WQ_THREAD_NEXT(thread) \
({ struct __wait_queue_node *__next = (thread)->wq.next; \
__next ? THREAD_FROM(__next, wq) : NULL; })
void thread_alloc_init(void) INIT_ATTR;
struct thread_entry * thread_alloc(void);
void thread_free(struct thread_entry *thread);
void new_thread_id(struct thread_entry *thread);
/* Switch to next runnable thread */ /* Switch to next runnable thread */
void switch_thread(void); void switch_thread(void);
/* Blocks a thread for at least the specified number of ticks (0 = wait until /* Blocks a thread for at least the specified number of ticks (0 = wait until
* next tick) */ * next tick) */
void sleep_thread(int ticks); void sleep_thread(int ticks);
/* Blocks the current thread on a thread queue (< 0 == infinite) */ /* Blocks the current thread on a thread queue (< 0 == infinite) */
void block_thread(struct thread_entry *current, int timeout); void block_thread_(struct thread_entry *current, int timeout);
#ifdef HAVE_PRIORITY_SCHEDULING
#define block_thread(thread, timeout, __wqp, bl) \
({ struct thread_entry *__t = (thread); \
__t->wqp = (__wqp); \
if (!__builtin_constant_p(bl) || (bl)) \
__t->blocker = (bl); \
block_thread_(__t, (timeout)); })
#else
#define block_thread(thread, timeout, __wqp, bl...) \
({ struct thread_entry *__t = (thread); \
__t->wqp = (__wqp); \
block_thread_(__t, (timeout)); })
#endif
/* Return bit flags for thread wakeup */ /* Return bit flags for thread wakeup */
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
@ -246,7 +276,7 @@ void block_thread(struct thread_entry *current, int timeout);
higher priority than current were woken) */ higher priority than current were woken) */
/* A convenience function for waking an entire queue of threads. */ /* A convenience function for waking an entire queue of threads. */
unsigned int thread_queue_wake(struct thread_entry **list); unsigned int wait_queue_wake(struct __wait_queue *wqp);
/* Wakeup a thread at the head of a list */ /* Wakeup a thread at the head of a list */
enum wakeup_thread_protocol enum wakeup_thread_protocol
@ -257,36 +287,139 @@ enum wakeup_thread_protocol
WAKEUP_TRANSFER_MULTI, WAKEUP_TRANSFER_MULTI,
}; };
unsigned int wakeup_thread_(struct thread_entry **list unsigned int wakeup_thread_(struct thread_entry *thread
IF_PRIO(, enum wakeup_thread_protocol proto)); IF_PRIO(, enum wakeup_thread_protocol proto));
#ifdef HAVE_PRIORITY_SCHEDULING #ifdef HAVE_PRIORITY_SCHEDULING
#define wakeup_thread(list, proto) \ #define wakeup_thread(thread, proto) \
wakeup_thread_((list), (proto)) wakeup_thread_((thread), (proto))
#else /* !HAVE_PRIORITY_SCHEDULING */ #else
#define wakeup_thread(list, proto...) \ #define wakeup_thread(thread, proto...) \
wakeup_thread_((list)); wakeup_thread_((thread));
#endif /* HAVE_PRIORITY_SCHEDULING */
#ifdef HAVE_IO_PRIORITY
void thread_set_io_priority(unsigned int thread_id, int io_priority);
int thread_get_io_priority(unsigned int thread_id);
#endif /* HAVE_IO_PRIORITY */
#if NUM_CORES > 1
unsigned int switch_core(unsigned int new_core);
#endif #endif
/* Return the id of the calling thread. */
unsigned int thread_self(void);
/* Return the thread_entry for the calling thread */
struct thread_entry* thread_self_entry(void);
/* Return thread entry from id */
struct thread_entry *thread_id_entry(unsigned int thread_id);
#ifdef RB_PROFILE #ifdef RB_PROFILE
void profile_thread(void); void profile_thread(void);
#endif #endif
static inline void rtr_queue_init(struct __rtr_queue *rtrp)
{
lldc_init(rtrp);
}
static inline void rtr_queue_make_first(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
rtrp->head = &thread->rtr;
}
static inline void rtr_queue_add(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
lldc_insert_last(rtrp, &thread->rtr);
}
static inline void rtr_queue_remove(struct __rtr_queue *rtrp,
struct thread_entry *thread)
{
lldc_remove(rtrp, &thread->rtr);
}
#define TMO_NOT_QUEUED (NULL + 1)
static inline bool tmo_is_queued(struct thread_entry *thread)
{
return thread->tmo.next != TMO_NOT_QUEUED;
}
static inline void tmo_set_dequeued(struct thread_entry *thread)
{
thread->tmo.next = TMO_NOT_QUEUED;
}
static inline void tmo_queue_init(struct __tmo_queue *tmop)
{
ll_init(tmop);
}
static inline void tmo_queue_expire(struct __tmo_queue *tmop,
struct thread_entry *prev,
struct thread_entry *thread)
{
ll_remove_next(tmop, prev ? &prev->tmo : NULL);
tmo_set_dequeued(thread);
}
static inline void tmo_queue_remove(struct __tmo_queue *tmop,
struct thread_entry *thread)
{
if (tmo_is_queued(thread))
{
ll_remove(tmop, &thread->tmo);
tmo_set_dequeued(thread);
}
}
static inline void tmo_queue_register(struct __tmo_queue *tmop,
struct thread_entry *thread)
{
if (!tmo_is_queued(thread))
ll_insert_last(tmop, &thread->tmo);
}
static inline void wait_queue_init(struct __wait_queue *wqp)
{
lld_init(wqp);
}
static inline void wait_queue_register(struct thread_entry *thread)
{
lld_insert_last(thread->wqp, &thread->wq);
}
static inline struct __wait_queue *
wait_queue_ptr(struct thread_entry *thread)
{
return thread->wqp;
}
static inline struct __wait_queue *
wait_queue_remove(struct thread_entry *thread)
{
struct __wait_queue *wqp = thread->wqp;
thread->wqp = NULL;
lld_remove(wqp, &thread->wq);
return wqp;
}
static inline struct __wait_queue *
wait_queue_try_remove(struct thread_entry *thread)
{
struct __wait_queue *wqp = thread->wqp;
if (wqp)
{
thread->wqp = NULL;
lld_remove(wqp, &thread->wq);
}
return wqp;
}
static inline void blocker_init(struct blocker *bl)
{
bl->thread = NULL;
#ifdef HAVE_PRIORITY_SCHEDULING
bl->priority = PRIORITY_IDLE;
#endif
}
static inline void blocker_splay_init(struct blocker_splay *blsplay)
{
blocker_init(&blsplay->blocker);
#ifdef HAVE_PRIORITY_SCHEDULING
threadbit_clear(&blsplay->mask);
#endif
corelock_init(&blsplay->cl);
}
#endif /* THREAD_INTERNAL_H */ #endif /* THREAD_INTERNAL_H */

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
#include "../thread-internal.h" #include "../thread-internal.h"
int * __errno(void) int * __errno(void)
{ {
return &thread_self_entry()->__errno; return &__running_self_entry()->__errno;
} }

View file

@ -125,6 +125,7 @@ SECTIONS
.idle_stacks (NOLOAD) : .idle_stacks (NOLOAD) :
{ {
*(.idle_stacks) *(.idle_stacks)
. = ALIGN(8);
#if NUM_CORES > 1 #if NUM_CORES > 1
cpu_idlestackbegin = .; cpu_idlestackbegin = .;
. += IDLE_STACK_SIZE; . += IDLE_STACK_SIZE;

View file

@ -82,46 +82,22 @@ static void INIT_ATTR core_thread_init(unsigned int core)
* to use a stack from an unloaded module until another thread runs on it. * to use a stack from an unloaded module until another thread runs on it.
*--------------------------------------------------------------------------- *---------------------------------------------------------------------------
*/ */
static inline void NORETURN_ATTR __attribute__((always_inline)) static void __attribute__((naked, noinline, noreturn))
thread_final_exit(struct thread_entry *current) thread_exit_finalize(unsigned int core, struct thread_entry *current)
{ {
asm volatile ( asm volatile (
"cmp %1, #0 \n" /* CPU? */ "ldr r2, =idle_stacks \n" /* switch to idle stack */
"ldr sp, [r2, r0, lsl #2] \n"
"add sp, sp, %0*4 \n"
"cmp r0, #0 \n" /* CPU? */
"mov r4, r1 \n"
"blne commit_dcache \n" "blne commit_dcache \n"
"mov r0, %0 \n" /* copy thread parameter */ "mov r0, r4 \n"
"mov sp, %2 \n" /* switch to idle stack */ "b thread_exit_final \n"
"bl thread_final_exit_do \n" /* finish removal */ : : "i"(IDLE_STACK_WORDS));
: : "r"(current),
"r"(current->core),
"r"(&idle_stacks[current->core][IDLE_STACK_WORDS])
: "r0", "r1", "r2", "r3", "ip", "lr"); /* Because of flush call,
force inputs out
of scratch regs */
while (1);
}
/*--------------------------------------------------------------------------- while (1);
* Perform core switch steps that need to take place inside switch_thread. (void)core; (void)current;
*
* These steps must take place while before changing the processor and after
* having entered switch_thread since switch_thread may not do a normal return
* because the stack being used for anything the compiler saved will not belong
* to the thread's destination core and it may have been recycled for other
* purposes by the time a normal context load has taken place. switch_thread
* will also clobber anything stashed in the thread's context or stored in the
* nonvolatile registers if it is saved there before the call since the
* compiler's order of operations cannot be known for certain.
*/
static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
{
/* Flush our data to ram */
commit_dcache();
/* Stash thread in r4 slot */
thread->context.r[0] = (uint32_t)thread;
/* Stash restart address in r5 slot */
thread->context.r[1] = thread->context.start;
/* Save sp in context.sp while still running on old core */
thread->context.sp = idle_stacks[core][IDLE_STACK_WORDS-1];
} }
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
@ -136,31 +112,32 @@ static void core_switch_blk_op(unsigned int core, struct thread_entry *thread)
/*--------------------------------------------------------------------------- /*---------------------------------------------------------------------------
* This actually performs the core switch. * This actually performs the core switch.
*/ */
static void __attribute__((naked)) static void __attribute__((naked, noinline))
switch_thread_core(unsigned int core, struct thread_entry *thread) switch_thread_core(unsigned int old_core, struct thread_entry *thread)
{ {
/* Pure asm for this because compiler behavior isn't sufficiently predictable.
* Stack access also isn't permitted until restoring the original stack and
* context. */
asm volatile ( asm volatile (
"stmfd sp!, { r4-r11, lr } \n" /* Stack all non-volatile context on current core */ "stmfd sp!, { r4-r5, lr } \n" /* can't use the first two ctx fields */
"ldr r2, =idle_stacks \n" /* r2 = &idle_stacks[core][IDLE_STACK_WORDS] */ "add r2, r1, #8 \n"
"ldr r2, [r2, r0, lsl #2] \n" "stmia r2, { r6-r11, sp } \n" /* save remaining context */
"add r2, r2, %0*4 \n" "adr r2, .new_core_restart \n" /* save context ptr + restart address */
"stmfd r2!, { sp } \n" /* save original stack pointer on idle stack */ "str r2, [r1, #40] \n" /* make 'start' non-null */
"mov sp, r2 \n" /* switch stacks */ "stmia r1, { r1-r2 } \n"
"adr r2, 1f \n" /* r2 = new core restart address */ "ldr r2, =idle_stacks \n" /* switch to idle stack on old core */
"str r2, [r1, #40] \n" /* thread->context.start = r2 */ "ldr sp, [r2, r0, lsl #2] \n"
"ldr pc, =switch_thread \n" /* r0 = thread after call - see load_context */ "add sp, sp, %0*4 \n"
"1: \n" "stmfd sp!, { r0-r1 } \n"
"ldr sp, [r0, #32] \n" /* Reload original sp from context structure */ "bl commit_dcache \n" /* write back everything */
"mov r1, #0 \n" /* Clear start address */ "ldmfd sp!, { r0-r1 } \n"
"str r1, [r0, #40] \n" "b switch_core_final \n"
"bl commit_discard_idcache \n" /* Invalidate new core's cache */ ".new_core_restart: \n"
"ldmfd sp!, { r4-r11, pc } \n" /* Restore non-volatile context to new core and return */ "mov r1, #0 \n" /* mark as started */
: : "i"(IDLE_STACK_WORDS) "str r1, [r0, #40] \n"
); "add r0, r0, #8 \n"
(void)core; (void)thread; "ldmia r0, { r6-r11, sp } \n" /* restore non-volatiles and stack */
"bl commit_discard_idcache \n" /* invalidate new core's cache */
"ldmfd sp!, { r4-r5, pc } \n" /* restore remaining context */
: : "i"(IDLE_STACK_WORDS));
(void)old_core; (void)thread;
} }
/** PP-model-specific dual-core code **/ /** PP-model-specific dual-core code **/

View file

@ -32,13 +32,13 @@
#include "core_alloc.h" #include "core_alloc.h"
/* Define this as 1 to show informational messages that are not errors. */ /* Define this as 1 to show informational messages that are not errors. */
#define THREAD_SDL_DEBUGF_ENABLED 0 #define THREAD_SDL_DEBUGF_ENABLED 1
#if THREAD_SDL_DEBUGF_ENABLED #if THREAD_SDL_DEBUGF_ENABLED
#define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__) #define THREAD_SDL_DEBUGF(...) DEBUGF(__VA_ARGS__)
static char __name[32]; static char __name[sizeof (((struct thread_debug_info *)0)->name)];
#define THREAD_SDL_GET_NAME(thread) \ #define THREAD_SDL_GET_NAME(thread) \
({ thread_get_name(__name, ARRAYLEN(__name), thread); __name; }) ({ format_thread_name(__name, sizeof (__name), thread); __name; })
#else #else
#define THREAD_SDL_DEBUGF(...) #define THREAD_SDL_DEBUGF(...)
#define THREAD_SDL_GET_NAME(thread) #define THREAD_SDL_GET_NAME(thread)
@ -47,9 +47,6 @@ static char __name[32];
#define THREAD_PANICF(str...) \ #define THREAD_PANICF(str...) \
({ fprintf(stderr, str); exit(-1); }) ({ fprintf(stderr, str); exit(-1); })
/* Thread/core entries as in rockbox core */
static struct core_entry cores[NUM_CORES];
struct thread_entry threads[MAXTHREADS];
/* Jump buffers for graceful exit - kernel threads don't stay neatly /* Jump buffers for graceful exit - kernel threads don't stay neatly
* in their start routines responding to messages so this is the only * in their start routines responding to messages so this is the only
* way to get them back in there so they may exit */ * way to get them back in there so they may exit */
@ -74,7 +71,7 @@ void sim_thread_shutdown(void)
/* Tell all threads jump back to their start routines, unlock and exit /* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via remove_thread or multiple threads could exit _could_ terminate via thread_exit or multiple threads could exit
on each unlock but that is safe. */ on each unlock but that is safe. */
/* Do this before trying to acquire lock */ /* Do this before trying to acquire lock */
@ -86,7 +83,7 @@ void sim_thread_shutdown(void)
/* Signal all threads on delay or block */ /* Signal all threads on delay or block */
for (i = 0; i < MAXTHREADS; i++) for (i = 0; i < MAXTHREADS; i++)
{ {
struct thread_entry *thread = &threads[i]; struct thread_entry *thread = __thread_slot_entry(i);
if (thread->context.s == NULL) if (thread->context.s == NULL)
continue; continue;
SDL_SemPost(thread->context.s); SDL_SemPost(thread->context.s);
@ -95,7 +92,7 @@ void sim_thread_shutdown(void)
/* Wait for all threads to finish and cleanup old ones. */ /* Wait for all threads to finish and cleanup old ones. */
for (i = 0; i < MAXTHREADS; i++) for (i = 0; i < MAXTHREADS; i++)
{ {
struct thread_entry *thread = &threads[i]; struct thread_entry *thread = __thread_slot_entry(i);
SDL_Thread *t = thread->context.t; SDL_Thread *t = thread->context.t;
if (t != NULL) if (t != NULL)
@ -111,11 +108,11 @@ void sim_thread_shutdown(void)
} }
else else
{ {
/* Wait on any previous thread in this location-- could be one not quite /* Wait on any previous thread in this location-- could be one not
* finished exiting but has just unlocked the mutex. If it's NULL, the * quite finished exiting but has just unlocked the mutex. If it's
* call returns immediately. * NULL, the call returns immediately.
* *
* See remove_thread below for more information. */ * See thread_exit below for more information. */
SDL_WaitThread(thread->context.told, NULL); SDL_WaitThread(thread->context.told, NULL);
} }
} }
@ -126,103 +123,6 @@ void sim_thread_shutdown(void)
threads_status = THREADS_EXIT_COMMAND_DONE; threads_status = THREADS_EXIT_COMMAND_DONE;
} }
static void new_thread_id(unsigned int slot_num,
struct thread_entry *thread)
{
unsigned int version =
(thread->id + (1u << THREAD_ID_VERSION_SHIFT))
& THREAD_ID_VERSION_MASK;
if (version == 0)
version = 1u << THREAD_ID_VERSION_SHIFT;
thread->id = version | (slot_num & THREAD_ID_SLOT_MASK);
}
static struct thread_entry * find_empty_thread_slot(void)
{
struct thread_entry *thread = NULL;
int n;
for (n = 0; n < MAXTHREADS; n++)
{
int state = threads[n].state;
if (state == STATE_KILLED)
{
thread = &threads[n];
break;
}
}
return thread;
}
/* Initialize SDL threading */
void init_threads(void)
{
static uintptr_t main_stack[] = { DEADBEEF, 0 };
struct thread_entry *thread;
int n;
memset(cores, 0, sizeof(cores));
memset(threads, 0, sizeof(threads));
m = SDL_CreateMutex();
if (SDL_LockMutex(m) == -1)
{
fprintf(stderr, "Couldn't lock mutex\n");
return;
}
/* Initialize all IDs */
for (n = 0; n < MAXTHREADS; n++)
threads[n].id = THREAD_ID_INIT(n);
/* Slot 0 is reserved for the main thread - initialize it here and
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
thread = &threads[0];
thread->stack = main_stack;
thread->stack_size = sizeof (main_stack);
thread->name = "main";
thread->state = STATE_RUNNING;
thread->context.s = SDL_CreateSemaphore(0);
thread->context.t = NULL; /* NULL for the implicit main thread */
cores[CURRENT_CORE].running = thread;
if (thread->context.s == NULL)
{
fprintf(stderr, "Failed to create main semaphore\n");
return;
}
/* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via remove_thread or multiple threads could exit
on each unlock but that is safe. */
/* Setup jump for exit */
if (setjmp(thread_jmpbufs[0]) == 0)
{
THREAD_SDL_DEBUGF("Main thread: %p\n", thread);
return;
}
SDL_UnlockMutex(m);
/* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
while (threads_status < THREADS_EXIT_COMMAND_DONE)
SDL_Delay(10);
SDL_DestroyMutex(m);
/* We're the main thead - perform exit - doesn't return. */
sim_do_exit();
}
void sim_thread_exception_wait(void) void sim_thread_exception_wait(void)
{ {
while (1) while (1)
@ -237,7 +137,7 @@ void sim_thread_exception_wait(void)
void sim_thread_lock(void *me) void sim_thread_lock(void *me)
{ {
SDL_LockMutex(m); SDL_LockMutex(m);
cores[CURRENT_CORE].running = (struct thread_entry *)me; __running_self_entry() = (struct thread_entry *)me;
if (threads_status != THREADS_RUN) if (threads_status != THREADS_RUN)
thread_exit(); thread_exit();
@ -245,70 +145,14 @@ void sim_thread_lock(void *me)
void * sim_thread_unlock(void) void * sim_thread_unlock(void)
{ {
struct thread_entry *current = cores[CURRENT_CORE].running; struct thread_entry *current = __running_self_entry();
SDL_UnlockMutex(m); SDL_UnlockMutex(m);
return current; return current;
} }
struct thread_entry * thread_id_entry(unsigned int thread_id)
{
return &threads[thread_id & THREAD_ID_SLOT_MASK];
}
static void add_to_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (*list == NULL)
{
/* Insert into unoccupied list */
thread->l.next = thread;
thread->l.prev = thread;
*list = thread;
}
else
{
/* Insert last */
thread->l.next = *list;
thread->l.prev = (*list)->l.prev;
thread->l.prev->l.next = thread;
(*list)->l.prev = thread;
}
}
static void remove_from_list_l(struct thread_entry **list,
struct thread_entry *thread)
{
if (thread == thread->l.next)
{
/* The only item */
*list = NULL;
return;
}
if (thread == *list)
{
/* List becomes next item */
*list = thread->l.next;
}
/* Fix links to jump over the removed entry. */
thread->l.prev->l.next = thread->l.next;
thread->l.next->l.prev = thread->l.prev;
}
unsigned int thread_self(void)
{
return cores[CURRENT_CORE].running->id;
}
struct thread_entry* thread_self_entry(void)
{
return cores[CURRENT_CORE].running;
}
void switch_thread(void) void switch_thread(void)
{ {
struct thread_entry *current = cores[CURRENT_CORE].running; struct thread_entry *current = __running_self_entry();
enable_irq(); enable_irq();
@ -346,17 +190,7 @@ void switch_thread(void)
oldlevel = disable_irq_save(); oldlevel = disable_irq_save();
if (current->state == STATE_BLOCKED_W_TMO) current->state = STATE_RUNNING;
{
/* Timed out */
remove_from_list_l(current->bqp, current);
#ifdef HAVE_WAKEUP_EXT_CB
if (current->wakeup_ext_cb != NULL)
current->wakeup_ext_cb(current);
#endif
current->state = STATE_RUNNING;
}
if (result == SDL_MUTEX_TIMEDOUT) if (result == SDL_MUTEX_TIMEDOUT)
{ {
@ -384,7 +218,7 @@ void switch_thread(void)
#ifdef DEBUG #ifdef DEBUG
core_check_valid(); core_check_valid();
#endif #endif
cores[CURRENT_CORE].running = current; __running_self_entry() = current;
if (threads_status != THREADS_RUN) if (threads_status != THREADS_RUN)
thread_exit(); thread_exit();
@ -392,7 +226,7 @@ void switch_thread(void)
void sleep_thread(int ticks) void sleep_thread(int ticks)
{ {
struct thread_entry *current = cores[CURRENT_CORE].running; struct thread_entry *current = __running_self_entry();
int rem; int rem;
current->state = STATE_SLEEPING; current->state = STATE_SLEEPING;
@ -404,7 +238,7 @@ void sleep_thread(int ticks)
current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
} }
void block_thread(struct thread_entry *current, int ticks) void block_thread_(struct thread_entry *current, int ticks)
{ {
if (ticks < 0) if (ticks < 0)
current->state = STATE_BLOCKED; current->state = STATE_BLOCKED;
@ -414,24 +248,19 @@ void block_thread(struct thread_entry *current, int ticks)
current->tmo_tick = (1000/HZ)*ticks; current->tmo_tick = (1000/HZ)*ticks;
} }
add_to_list_l(current->bqp, current); wait_queue_register(current);
} }
unsigned int wakeup_thread_(struct thread_entry **list) unsigned int wakeup_thread_(struct thread_entry *thread)
{ {
struct thread_entry *thread = *list; switch (thread->state)
if (thread != NULL)
{ {
switch (thread->state) case STATE_BLOCKED:
{ case STATE_BLOCKED_W_TMO:
case STATE_BLOCKED: wait_queue_remove(thread);
case STATE_BLOCKED_W_TMO: thread->state = STATE_RUNNING;
remove_from_list_l(list, thread); SDL_SemPost(thread->context.s);
thread->state = STATE_RUNNING; return THREAD_OK;
SDL_SemPost(thread->context.s);
return THREAD_OK;
}
} }
return THREAD_NONE; return THREAD_NONE;
@ -439,7 +268,7 @@ unsigned int wakeup_thread_(struct thread_entry **list)
void thread_thaw(unsigned int thread_id) void thread_thaw(unsigned int thread_id)
{ {
struct thread_entry *thread = thread_id_entry(thread_id); struct thread_entry *thread = __thread_id_entry(thread_id);
if (thread->id == thread_id && thread->state == STATE_FROZEN) if (thread->id == thread_id && thread->state == STATE_FROZEN)
{ {
@ -450,15 +279,14 @@ void thread_thaw(unsigned int thread_id)
int runthread(void *data) int runthread(void *data)
{ {
struct thread_entry *current;
jmp_buf *current_jmpbuf;
/* Cannot access thread variables before locking the mutex as the /* Cannot access thread variables before locking the mutex as the
data structures may not be filled-in yet. */ data structures may not be filled-in yet. */
SDL_LockMutex(m); SDL_LockMutex(m);
cores[CURRENT_CORE].running = (struct thread_entry *)data;
current = cores[CURRENT_CORE].running; struct thread_entry *current = (struct thread_entry *)data;
current_jmpbuf = &thread_jmpbufs[current - threads]; __running_self_entry() = current;
jmp_buf *current_jmpbuf = &thread_jmpbufs[THREAD_ID_SLOT(current->id)];
/* Setup jump for exit */ /* Setup jump for exit */
if (setjmp(*current_jmpbuf) == 0) if (setjmp(*current_jmpbuf) == 0)
@ -469,14 +297,15 @@ int runthread(void *data)
SDL_UnlockMutex(m); SDL_UnlockMutex(m);
SDL_SemWait(current->context.s); SDL_SemWait(current->context.s);
SDL_LockMutex(m); SDL_LockMutex(m);
cores[CURRENT_CORE].running = current; __running_self_entry() = current;
} }
if (threads_status == THREADS_RUN) if (threads_status == THREADS_RUN)
{ {
current->context.start(); current->context.start();
THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n", THREAD_SDL_DEBUGF("Thread Done: %d (%s)\n",
current - threads, THREAD_SDL_GET_NAME(current)); THREAD_ID_SLOT(current->id),
THREAD_SDL_GET_NAME(current));
/* Thread routine returned - suicide */ /* Thread routine returned - suicide */
} }
@ -495,27 +324,23 @@ unsigned int create_thread(void (*function)(void),
void* stack, size_t stack_size, void* stack, size_t stack_size,
unsigned flags, const char *name) unsigned flags, const char *name)
{ {
struct thread_entry *thread;
SDL_Thread* t;
SDL_sem *s;
THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : ""); THREAD_SDL_DEBUGF("Creating thread: (%s)\n", name ? name : "");
thread = find_empty_thread_slot(); struct thread_entry *thread = thread_alloc();
if (thread == NULL) if (thread == NULL)
{ {
DEBUGF("Failed to find thread slot\n"); DEBUGF("Failed to find thread slot\n");
return 0; return 0;
} }
s = SDL_CreateSemaphore(0); SDL_sem *s = SDL_CreateSemaphore(0);
if (s == NULL) if (s == NULL)
{ {
DEBUGF("Failed to create semaphore\n"); DEBUGF("Failed to create semaphore\n");
return 0; return 0;
} }
t = SDL_CreateThread(runthread, thread); SDL_Thread *t = SDL_CreateThread(runthread, thread);
if (t == NULL) if (t == NULL)
{ {
DEBUGF("Failed to create SDL thread\n"); DEBUGF("Failed to create SDL thread\n");
@ -523,12 +348,6 @@ unsigned int create_thread(void (*function)(void),
return 0; return 0;
} }
unsigned int stack_words = stack_size / sizeof (uintptr_t);
for (unsigned int i = stack_words; i-- > 0;)
((uintptr_t *)stack)[i] = DEADBEEF;
thread->stack = stack;
thread->stack_size = stack_size;
thread->name = name; thread->name = name;
thread->state = (flags & CREATE_THREAD_FROZEN) ? thread->state = (flags & CREATE_THREAD_FROZEN) ?
STATE_FROZEN : STATE_RUNNING; STATE_FROZEN : STATE_RUNNING;
@ -536,27 +355,22 @@ unsigned int create_thread(void (*function)(void),
thread->context.t = t; thread->context.t = t;
thread->context.s = s; thread->context.s = s;
THREAD_SDL_DEBUGF("New Thread: %d (%s)\n", THREAD_SDL_DEBUGF("New Thread: %lu (%s)\n",
thread - threads, THREAD_SDL_GET_NAME(thread)); (unsigned long)thread->id,
THREAD_SDL_GET_NAME(thread));
return thread->id; return thread->id;
(void)stack; (void)stack_size;
} }
static void remove_thread(unsigned int thread_id) void thread_exit(void)
{ {
struct thread_entry *current = cores[CURRENT_CORE].running; struct thread_entry *current = __running_self_entry();
struct thread_entry *thread = thread_id_entry(thread_id);
SDL_Thread *t;
SDL_sem *s;
if (thread->id != thread_id)
return;
int oldlevel = disable_irq_save(); int oldlevel = disable_irq_save();
t = thread->context.t; SDL_Thread *t = current->context.t;
s = thread->context.s; SDL_sem *s = current->context.s;
/* Wait the last thread here and keep this one or SDL will leak it since /* Wait the last thread here and keep this one or SDL will leak it since
* it doesn't free its own library allocations unless a wait is performed. * it doesn't free its own library allocations unless a wait is performed.
@ -566,59 +380,27 @@ static void remove_thread(unsigned int thread_id)
* *
* However, see more below about SDL_KillThread. * However, see more below about SDL_KillThread.
*/ */
SDL_WaitThread(thread->context.told, NULL); SDL_WaitThread(current->context.told, NULL);
thread->context.t = NULL; current->context.t = NULL;
thread->context.s = NULL; current->context.s = NULL;
thread->context.told = t; current->context.told = t;
if (thread != current) unsigned int id = current->id;
{ new_thread_id(current);
switch (thread->state) current->state = STATE_KILLED;
{ wait_queue_wake(&current->queue);
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
/* Remove thread from object it's waiting on */
remove_from_list_l(thread->bqp, thread);
#ifdef HAVE_WAKEUP_EXT_CB
if (thread->wakeup_ext_cb != NULL)
thread->wakeup_ext_cb(thread);
#endif
break;
}
SDL_SemPost(s);
}
THREAD_SDL_DEBUGF("Removing thread: %d (%s)\n",
thread - threads, THREAD_SDL_GET_NAME(thread));
new_thread_id(thread->id, thread);
thread->state = STATE_KILLED;
thread_queue_wake(&thread->queue);
SDL_DestroySemaphore(s); SDL_DestroySemaphore(s);
if (thread == current) /* Do a graceful exit - perform the longjmp back into the thread
{ function to return */
/* Do a graceful exit - perform the longjmp back into the thread
function to return */
restore_irq(oldlevel);
longjmp(thread_jmpbufs[current - threads], 1);
}
/* SDL_KillThread frees the old pointer too because it uses SDL_WaitThread
* to wait for the host to remove it. */
thread->context.told = NULL;
SDL_KillThread(t);
restore_irq(oldlevel); restore_irq(oldlevel);
}
void thread_exit(void) thread_free(current);
{
unsigned int id = thread_self(); longjmp(thread_jmpbufs[THREAD_ID_SLOT(id)], 1);
remove_thread(id);
/* This should never and must never be reached - if it is, the /* This should never and must never be reached - if it is, the
* state is corrupted */ * state is corrupted */
THREAD_PANICF("thread_exit->K:*R (ID: %d)", id); THREAD_PANICF("thread_exit->K:*R (ID: %d)", id);
@ -627,44 +409,73 @@ void thread_exit(void)
void thread_wait(unsigned int thread_id) void thread_wait(unsigned int thread_id)
{ {
struct thread_entry *current = cores[CURRENT_CORE].running; struct thread_entry *current = __running_self_entry();
struct thread_entry *thread = thread_id_entry(thread_id); struct thread_entry *thread = __thread_id_entry(thread_id);
if (thread->id == thread_id && thread->state != STATE_KILLED) if (thread->id == thread_id && thread->state != STATE_KILLED)
{ {
current->bqp = &thread->queue; block_thread(current, TIMEOUT_BLOCK, &thread->queue);
block_thread(current, TIMEOUT_BLOCK);
switch_thread(); switch_thread();
} }
} }
/*--------------------------------------------------------------------------- /* Initialize SDL threading */
* Suspends a thread's execution for at least the specified number of ticks. void init_threads(void)
*
* May result in CPU core entering wait-for-interrupt mode if no other thread
* may be scheduled.
*
* NOTE: sleep(0) sleeps until the end of the current tick
* sleep(n) that doesn't result in rescheduling:
* n <= ticks suspended < n + 1
* n to n+1 is a lower bound. Other factors may affect the actual time
* a thread is suspended before it runs again.
*---------------------------------------------------------------------------
*/
unsigned sleep(unsigned ticks)
{ {
disable_irq(); m = SDL_CreateMutex();
sleep_thread(ticks);
switch_thread();
return 0;
}
/*--------------------------------------------------------------------------- if (SDL_LockMutex(m) == -1)
* Elects another thread to run or, if no other thread may be made ready to {
* run, immediately returns control back to the calling thread. fprintf(stderr, "Couldn't lock mutex\n");
*--------------------------------------------------------------------------- return;
*/ }
void yield(void)
{ thread_alloc_init();
switch_thread();
struct thread_entry *thread = thread_alloc();
if (thread == NULL)
{
fprintf(stderr, "Main thread alloc failed\n");
return;
}
/* Slot 0 is reserved for the main thread - initialize it here and
then create the SDL thread - it is possible to have a quick, early
shutdown try to access the structure. */
thread->name = __main_thread_name;
thread->state = STATE_RUNNING;
thread->context.s = SDL_CreateSemaphore(0);
thread->context.t = NULL; /* NULL for the implicit main thread */
__running_self_entry() = thread;
if (thread->context.s == NULL)
{
fprintf(stderr, "Failed to create main semaphore\n");
return;
}
/* Tell all threads jump back to their start routines, unlock and exit
gracefully - we'll check each one in turn for it's status. Threads
_could_ terminate via thread_exit or multiple threads could exit
on each unlock but that is safe. */
/* Setup jump for exit */
if (setjmp(thread_jmpbufs[THREAD_ID_SLOT(thread->id)]) == 0)
{
THREAD_SDL_DEBUGF("Main Thread: %lu (%s)\n",
(unsigned long)thread->id,
THREAD_SDL_GET_NAME(thread));
return;
}
SDL_UnlockMutex(m);
/* Set to 'COMMAND_DONE' when other rockbox threads have exited. */
while (threads_status < THREADS_EXIT_COMMAND_DONE)
SDL_Delay(10);
SDL_DestroyMutex(m);
/* We're the main thead - perform exit - doesn't return. */
sim_do_exit();
} }