From 533d396761b630e372166f6f0522ba1c2d128d70 Mon Sep 17 00:00:00 2001 From: Michael Sevakis Date: Thu, 24 Apr 2014 04:09:18 -0400 Subject: [PATCH] Add multi-reader, single-writer locks to kernel. Any number of readers may be in the critical section at a time and writers are mutually exclusive to all other threads. They are a better choice when data is rarely modified but often read and multiple threads can safely access it for reading. Priority inheritance is fully implemented along with other changes to the kernel to fully support it on multiowner objects. This also cleans up priority code in the kernel and updates some associated structures in existing objects to the cleaner form. Currently doesn't add the mrsw_lock.[ch] files since they're not yet needed by anything but the supporting improvements are still useful. This includes a typed bitarray API (bitarray.h) which is pretty basic for now. Change-Id: Idbe43dcd9170358e06d48d00f1c69728ff45b0e3 Reviewed-on: http://gerrit.rockbox.org/801 Reviewed-by: Michael Sevakis Tested: Michael Sevakis --- firmware/SOURCES | 3 + firmware/include/bitarray.h | 231 ++++++ firmware/kernel/include/kernel.h | 1 + firmware/kernel/include/mutex.h | 20 +- firmware/kernel/include/queue.h | 2 + firmware/kernel/include/thread.h | 81 +- firmware/kernel/mutex.c | 65 +- firmware/kernel/queue.c | 21 +- firmware/kernel/semaphore.c | 8 +- firmware/kernel/thread.c | 1013 ++++++++++++----------- firmware/target/hosted/sdl/thread-sdl.c | 36 +- 11 files changed, 880 insertions(+), 601 deletions(-) create mode 100644 firmware/include/bitarray.h diff --git a/firmware/SOURCES b/firmware/SOURCES index 5e37892efe..584254a666 100644 --- a/firmware/SOURCES +++ b/firmware/SOURCES @@ -1825,6 +1825,9 @@ drivers/touchpad.c #ifdef HAVE_CORELOCK_OBJECT kernel/corelock.c #endif +#if 0 /* pending dependent code */ +kernel/mrsw_lock.c +#endif kernel/mutex.c kernel/queue.c #ifdef HAVE_SEMAPHORE_OBJECTS diff --git a/firmware/include/bitarray.h b/firmware/include/bitarray.h new file mode 100644 index 0000000000..4777ccb6a4 --- /dev/null +++ b/firmware/include/bitarray.h @@ -0,0 +1,231 @@ +/*************************************************************************** + * __________ __ ___. + * Open \______ \ ____ ____ | | _\_ |__ _______ ___ + * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / + * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < + * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ + * \/ \/ \/ \/ \/ + * $Id$ + * + * Copyright (C) 2014 by Michael Sevakis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ****************************************************************************/ +#ifndef BITARRAY_H +#define BITARRAY_H + +/* Type-checked bit array definitions */ + +/* All this stuff gets optimized into very simple object code */ + +#define BITARRAY_WORD_BITS \ + (sizeof (unsigned int) * 8) +#define BITARRAY_NWORDS(bits) \ + (((bits) + BITARRAY_WORD_BITS - 1) / BITARRAY_WORD_BITS) +#define BITARRAY_BITWORD(bitnum) \ + ((bitnum) / BITARRAY_WORD_BITS) +#define BITARRAY_WORDBIT(bitnum) \ + ((bitnum) % BITARRAY_WORD_BITS) +#define BITARRAY_NBIT(word, bit) \ + ((word)*BITARRAY_WORD_BITS + (bit)) +#define BITARRAY_BITS(bits) \ + (BITARRAY_NWORDS(bits)*BITARRAY_WORD_BITS) +#define BITARRAY_BITN(bitnum) \ + (1u << BITARRAY_WORDBIT(bitnum)) + + +/** Iterators **/ +#include "config.h" +#include + +#if (defined(CPU_ARM) && ARM_ARCH >= 5) || UINT32_MAX < UINT_MAX +#define __BITARRAY_CTZ(wval) __builtin_ctz(wval) +#else +#include "system.h" +#define __BITARRAY_CTZ(wval) find_first_set_bit(wval) +#endif +#define __BITARRAY_POPCNT(wval) __builtin_popcount(wval) + +#ifndef BIT_N +#define BIT_N(n) (1u << (n)) +#endif + +/* Enumerate each word index */ +#define FOR_EACH_BITARRAY_WORD_INDEX(nwords, index) \ + for (unsigned int index = 0, _nwords = (nwords); \ + index < _nwords; index++) + +/* Enumerate each word value */ +#define FOR_EACH_BITARRAY_WORD(a, wval) \ + FOR_EACH_BITARRAY_WORD_INDEX(ARRAYLEN((a)->words), _w) \ + for (unsigned int wval = (a)->words[_w], _ = 1; _; _--) + +/* Enumerate the bit number of each set bit of a word in sequence */ +#define FOR_EACH_BITARRAY_SET_WORD_BIT(wval, bit) \ + for (unsigned int _wval = (wval), bit; \ + _wval ? (((bit) = __BITARRAY_CTZ(_wval)), 1) : 0; \ + _wval &= ~BIT_N(bit)) + +/* Enumerate the bit number of each set bit in the bit array in sequence */ +#define FOR_EACH_BITARRAY_SET_BIT_ARR(nwords, words, nbit) \ + FOR_EACH_BITARRAY_WORD_INDEX(nwords, _w) \ + FOR_EACH_BITARRAY_SET_WORD_BIT(words[_w], _bit) \ + for (unsigned int nbit = BITARRAY_NBIT(_w, _bit), _ = 1; _; _--) + +/* As above but takes an array type for an argument */ +#define FOR_EACH_BITARRAY_SET_BIT(a, nbit) \ + FOR_EACH_BITARRAY_SET_BIT_ARR(ARRAYLEN((a)->words), (a)->words, nbit) + + +/** Base functions (called by typed functions) **/ + +/* Return the word associated with the bit */ +static inline unsigned int +__bitarray_get_word(unsigned int words[], unsigned int bitnum) +{ + return words[BITARRAY_BITWORD(bitnum)]; +} + +/* Set the word associated with the bit */ +static inline void +__bitarray_set_word(unsigned int words[], unsigned int bitnum, + unsigned int wordval) +{ + words[BITARRAY_BITWORD(bitnum)] = wordval; +} + +/* Set the bit at index 'bitnum' to '1' */ +static inline void +__bitarray_set_bit(unsigned int words[], unsigned int bitnum) +{ + unsigned int word = BITARRAY_BITWORD(bitnum); + unsigned int bit = BITARRAY_BITN(bitnum); + words[word] |= bit; +} + +/* Set the bit at index 'bitnum' to '0' */ +static inline void +__bitarray_clear_bit(unsigned int words[], unsigned int bitnum) +{ + unsigned int word = BITARRAY_BITWORD(bitnum); + unsigned int bit = BITARRAY_BITN(bitnum); + words[word] &= ~bit; +} + +/* Return the value of the specified bit ('0' or '1') */ +static inline unsigned int +__bitarray_test_bit(const unsigned int words[], unsigned int bitnum) +{ + unsigned int word = BITARRAY_BITWORD(bitnum); + unsigned int nbit = BITARRAY_WORDBIT(bitnum); + return (words[word] >> nbit) & 1u; +} + +/* Check if all bits in the bit array are '0' */ +static inline bool +__bitarray_is_clear(const unsigned int words[], unsigned int nbits) +{ + FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word) + { + if (words[word] != 0) + return false; + } + + return true; +} + +/* Set every bit in the array to '0' */ +static inline void +__bitarray_clear(unsigned int words[], unsigned int nbits) +{ + FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word) + words[word] = 0; +} + +/* Set every bit in the array to '1' */ +static inline void +__bitarray_set(unsigned int words[], unsigned int nbits) +{ + FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word) + words[word] = ~0u; +} + +/* Find the lowest-indexed '1' bit in the bit array, returning the size of + the array if none are set */ +static inline unsigned int +__bitarray_ffs(const unsigned int words[], unsigned int nbits) +{ + FOR_EACH_BITARRAY_SET_BIT_ARR(BITARRAY_NWORDS(nbits), words, nbit) + return nbit; + + return BITARRAY_BITS(nbits); +} + +/* Return the number of bits currently set to '1' in the bit array */ +static inline unsigned int +__bitarray_popcount(const unsigned int words[], unsigned int nbits) +{ + unsigned int count = 0; + + FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word) + count += __BITARRAY_POPCNT(words[word]); + + return count; +} + +/** + * Giant macro to define all the typed functions + * typename: The name of the type (e.g. myarr_t myarr;) + * fnprefix: The prefix all functions get (e.g. myarr_set_bit) + * nbits : The minimum number of bits the array is meant to hold + * (the implementation rounds this up to the word size + * and all words may be fully utilized) + * + * uses 'typedef' to freely change from, e.g., struct to union without + * changing source code + */ +#define BITARRAY_TYPE_DECLARE(typename, fnprefix, nbits) \ +typedef struct \ +{ \ + unsigned int words[BITARRAY_NWORDS(nbits)]; \ +} typename; \ +static inline unsigned int \ +fnprefix##_get_word(typename *array, unsigned int bitnum) \ + { return __bitarray_get_word(array->words, bitnum); } \ +static inline void \ +fnprefix##_set_word(typename *array, unsigned int bitnum, \ + unsigned int wordval) \ + { __bitarray_set_word(array->words, bitnum, wordval); } \ +static inline void \ +fnprefix##_set_bit(typename *array, unsigned int bitnum) \ + { __bitarray_set_bit(array->words, bitnum); } \ +static inline void \ +fnprefix##_clear_bit(typename *array, unsigned int bitnum) \ + { __bitarray_clear_bit(array->words, bitnum); } \ +static inline unsigned int \ +fnprefix##_test_bit(const typename *array, unsigned int bitnum) \ + { return __bitarray_test_bit(array->words, bitnum); } \ +static inline bool \ +fnprefix##_is_clear(const typename *array) \ + { return __bitarray_is_clear(array->words, nbits); } \ +static inline void \ +fnprefix##_clear(typename *array) \ + { __bitarray_clear(array->words, nbits); } \ +static inline void \ +fnprefix##_set(typename *array) \ + { __bitarray_set(array->words, nbits); } \ +static inline unsigned int \ +fnprefix##_ffs(const typename *array) \ + { return __bitarray_ffs(array->words, nbits); } \ +static inline unsigned int \ +fnprefix##_popcount(const typename *array) \ + { return __bitarray_popcount(array->words, nbits); } + +#endif /* BITARRAY_H */ diff --git a/firmware/kernel/include/kernel.h b/firmware/kernel/include/kernel.h index fafff25ce4..d2ffffcda9 100644 --- a/firmware/kernel/include/kernel.h +++ b/firmware/kernel/include/kernel.h @@ -26,6 +26,7 @@ #include "system.h" #include "queue.h" #include "mutex.h" +#include "mrsw_lock.h" #include "tick.h" #ifdef INCLUDE_TIMEOUT_API diff --git a/firmware/kernel/include/mutex.h b/firmware/kernel/include/mutex.h index bcf5701bd9..02b85f331f 100644 --- a/firmware/kernel/include/mutex.h +++ b/firmware/kernel/include/mutex.h @@ -28,20 +28,14 @@ struct mutex { - struct thread_entry *queue; /* waiter list */ - int recursion; /* lock owner recursion count */ + struct thread_entry *queue; /* waiter list */ + int recursion; /* lock owner recursion count */ + struct blocker blocker; /* priority inheritance info + for waiters and owner*/ + IF_COP( struct corelock cl; ) /* multiprocessor sync */ #ifdef HAVE_PRIORITY_SCHEDULING - struct blocker blocker; /* priority inheritance info - for waiters */ - bool no_preempt; /* don't allow higher-priority thread - to be scheduled even if woken */ -#else - struct thread_entry *thread; /* Indicates owner thread - an owner - implies a locked state - same goes - for priority scheduling - (in blocker struct for that) */ + bool no_preempt; #endif - IF_COP( struct corelock cl; ) /* multiprocessor sync */ }; extern void mutex_init(struct mutex *m); @@ -56,7 +50,7 @@ static inline void mutex_set_preempt(struct mutex *m, bool preempt) #else /* Deprecated but needed for now - firmware/drivers/ata_mmc.c */ static inline bool mutex_test(const struct mutex *m) - { return m->thread != NULL; } + { return m->blocker.thread != NULL; } #endif /* HAVE_PRIORITY_SCHEDULING */ #endif /* MUTEX_H */ diff --git a/firmware/kernel/include/queue.h b/firmware/kernel/include/queue.h index 1b404f8297..3f24598d5b 100644 --- a/firmware/kernel/include/queue.h +++ b/firmware/kernel/include/queue.h @@ -143,6 +143,8 @@ extern bool queue_peek(struct event_queue *q, struct queue_event *ev); #define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */ #define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */ #define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */ +#define QPEEK_FILTER1(a) QPEEK_FILTER2((a), (a)) +#define QPEEK_FILTER2(a, b) (&(const long [2]){ (a), (b) }) extern bool queue_peek_ex(struct event_queue *q, struct queue_event *ev, unsigned int flags, diff --git a/firmware/kernel/include/thread.h b/firmware/kernel/include/thread.h index 8c13b462e6..f181f867cb 100644 --- a/firmware/kernel/include/thread.h +++ b/firmware/kernel/include/thread.h @@ -28,6 +28,7 @@ #include #include "gcc_extensions.h" #include "corelock.h" +#include "bitarray.h" /* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works * by giving high priority threads more CPU time than lower priority threads @@ -80,6 +81,10 @@ #endif #define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS) + +BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS) +BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES) + /* * We need more stack when we run under a host * maybe more expensive C lib functions? @@ -134,32 +139,39 @@ struct thread_list struct thread_entry *next; /* Next thread in a list */ }; -#ifdef HAVE_PRIORITY_SCHEDULING +/* Basic structure describing the owner of an object */ struct blocker { struct thread_entry * volatile thread; /* thread blocking other threads (aka. object owner) */ - int priority; /* highest priority waiter */ - struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread); +#ifdef HAVE_PRIORITY_SCHEDULING + int priority; /* highest priority waiter */ +#endif }; -/* Choices of wakeup protocol */ +/* If a thread has a blocker but the blocker's registered thread is NULL, + then it references this and the struct blocker pointer may be + reinterpreted as such. */ +struct blocker_splay +{ + struct blocker blocker; /* blocker info (first!) */ +#ifdef HAVE_PRIORITY_SCHEDULING + threadbit_t mask; /* mask of nonzero tcounts */ +#if NUM_CORES > 1 + struct corelock cl; /* mutual exclusion */ +#endif +#endif /* HAVE_PRIORITY_SCHEDULING */ +}; -/* For transfer of object ownership by one thread to another thread by - * the owning thread itself (mutexes) */ -struct thread_entry * - wakeup_priority_protocol_transfer(struct thread_entry *thread); - -/* For release by owner where ownership doesn't change - other threads, - * interrupts, timeouts, etc. (mutex timeout, queues) */ -struct thread_entry * - wakeup_priority_protocol_release(struct thread_entry *thread); +#ifdef HAVE_PRIORITY_SCHEDULING +/* Quick-disinherit of priority elevation. Must be a running thread. */ +void priority_disinherit(struct thread_entry *thread, struct blocker *bl); struct priority_distribution { - uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ - uint32_t mask; /* Bitmask of hist entries that are not zero */ + uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */ + priobit_t mask; /* Bitmask of hist entries that are not zero */ }; #endif /* HAVE_PRIORITY_SCHEDULING */ @@ -210,6 +222,7 @@ struct thread_entry volatile intptr_t retval; /* Return value from a blocked operation/ misc. use */ #endif + uint32_t id; /* Current slot id */ int __errno; /* Thread error number (errno tls) */ #ifdef HAVE_PRIORITY_SCHEDULING /* Priority summary of owned objects that support inheritance */ @@ -226,7 +239,6 @@ struct thread_entry unsigned char priority; /* Scheduled priority (higher of base or all threads blocked by this one) */ #endif - uint16_t id; /* Current slot id */ unsigned short stack_size; /* Size of stack in bytes */ unsigned char state; /* Thread slot state (STATE_*) */ #ifdef HAVE_SCHEDULER_BOOSTCTRL @@ -238,11 +250,12 @@ struct thread_entry }; /*** Macros for internal use ***/ -/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */ -#define THREAD_ID_VERSION_SHIFT 8 -#define THREAD_ID_VERSION_MASK 0xff00 -#define THREAD_ID_SLOT_MASK 0x00ff +/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */ +#define THREAD_ID_VERSION_SHIFT 8 +#define THREAD_ID_VERSION_MASK 0xffffff00 +#define THREAD_ID_SLOT_MASK 0x000000ff #define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n)) +#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK) #ifdef HAVE_CORELOCK_OBJECT /* Operations to be performed just before stopping a thread and starting @@ -337,11 +350,8 @@ void switch_thread(void); /* Blocks a thread for at least the specified number of ticks (0 = wait until * next tick) */ void sleep_thread(int ticks); -/* Indefinitely blocks the current thread on a thread queue */ -void block_thread(struct thread_entry *current); -/* Blocks the current thread on a thread queue until explicitely woken or - * the timeout is reached */ -void block_thread_w_tmo(struct thread_entry *current, int timeout); +/* Blocks the current thread on a thread queue (< 0 == infinite) */ +void block_thread(struct thread_entry *current, int timeout); /* Return bit flags for thread wakeup */ #define THREAD_NONE 0x0 /* No thread woken up (exclusive) */ @@ -350,15 +360,32 @@ void block_thread_w_tmo(struct thread_entry *current, int timeout); higher priority than current were woken) */ /* A convenience function for waking an entire queue of threads. */ -unsigned int thread_queue_wake(struct thread_entry **list); +unsigned int thread_queue_wake(struct thread_entry **list, + volatile int *count); /* Wakeup a thread at the head of a list */ -unsigned int wakeup_thread(struct thread_entry **list); +enum wakeup_thread_protocol +{ + WAKEUP_DEFAULT, + WAKEUP_TRANSFER, + WAKEUP_RELEASE, + WAKEUP_TRANSFER_MULTI, +}; + +unsigned int wakeup_thread_(struct thread_entry **list + IF_PRIO(, enum wakeup_thread_protocol proto)); #ifdef HAVE_PRIORITY_SCHEDULING +#define wakeup_thread(list, proto) \ + wakeup_thread_((list), (proto)) + int thread_set_priority(unsigned int thread_id, int priority); int thread_get_priority(unsigned int thread_id); +#else /* !HAVE_PRIORITY_SCHEDULING */ +#define wakeup_thread(list, proto...) \ + wakeup_thread_((list)); #endif /* HAVE_PRIORITY_SCHEDULING */ + #ifdef HAVE_IO_PRIORITY void thread_set_io_priority(unsigned int thread_id, int io_priority); int thread_get_io_priority(unsigned int thread_id); diff --git a/firmware/kernel/mutex.c b/firmware/kernel/mutex.c index f1e4b3c722..2e90b0f4b1 100644 --- a/firmware/kernel/mutex.c +++ b/firmware/kernel/mutex.c @@ -27,31 +27,10 @@ #include #include "config.h" #include "system.h" -#include "mutex.h" -#include "corelock.h" +#include "kernel.h" #include "thread-internal.h" #include "kernel-internal.h" -static inline void __attribute__((always_inline)) -mutex_set_thread(struct mutex *mtx, struct thread_entry *td) -{ -#ifdef HAVE_PRIORITY_SCHEDULING - mtx->blocker.thread = td; -#else - mtx->thread = td; -#endif -} - -static inline struct thread_entry * __attribute__((always_inline)) -mutex_get_thread(volatile struct mutex *mtx) -{ -#ifdef HAVE_PRIORITY_SCHEDULING - return mtx->blocker.thread; -#else - return mtx->thread; -#endif -} - /* Initialize a mutex object - call before any use and do not call again once * the object is available to other threads */ void mutex_init(struct mutex *m) @@ -59,10 +38,9 @@ void mutex_init(struct mutex *m) corelock_init(&m->cl); m->queue = NULL; m->recursion = 0; - mutex_set_thread(m, NULL); + m->blocker.thread = NULL; #ifdef HAVE_PRIORITY_SCHEDULING m->blocker.priority = PRIORITY_IDLE; - m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer; m->no_preempt = false; #endif } @@ -72,7 +50,7 @@ void mutex_lock(struct mutex *m) { struct thread_entry *current = thread_self_entry(); - if(current == mutex_get_thread(m)) + if(current == m->blocker.thread) { /* current thread already owns this mutex */ m->recursion++; @@ -83,10 +61,10 @@ void mutex_lock(struct mutex *m) corelock_lock(&m->cl); /* must read thread again inside cs (a multiprocessor concern really) */ - if(LIKELY(mutex_get_thread(m) == NULL)) + if(LIKELY(m->blocker.thread == NULL)) { /* lock is open */ - mutex_set_thread(m, current); + m->blocker.thread = current; corelock_unlock(&m->cl); return; } @@ -97,7 +75,7 @@ void mutex_lock(struct mutex *m) current->bqp = &m->queue; disable_irq(); - block_thread(current); + block_thread(current, TIMEOUT_BLOCK); corelock_unlock(&m->cl); @@ -109,9 +87,9 @@ void mutex_lock(struct mutex *m) void mutex_unlock(struct mutex *m) { /* unlocker not being the owner is an unlocking violation */ - KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(), + KERNEL_ASSERT(m->blocker.thread == thread_self_entry(), "mutex_unlock->wrong thread (%s != %s)\n", - mutex_get_thread(m)->name, + m->blocker.thread->name, thread_self_entry()->name); if(m->recursion > 0) @@ -128,25 +106,24 @@ void mutex_unlock(struct mutex *m) if(LIKELY(m->queue == NULL)) { /* no threads waiting - open the lock */ - mutex_set_thread(m, NULL); + m->blocker.thread = NULL; corelock_unlock(&m->cl); return; } - else - { - const int oldlevel = disable_irq_save(); - /* Tranfer of owning thread is handled in the wakeup protocol - * if priorities are enabled otherwise just set it from the - * queue head. */ - IFN_PRIO( mutex_set_thread(m, m->queue); ) - IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue); - restore_irq(oldlevel); - corelock_unlock(&m->cl); + const int oldlevel = disable_irq_save(); + /* Tranfer of owning thread is handled in the wakeup protocol + * if priorities are enabled otherwise just set it from the + * queue head. */ + IFN_PRIO( m->blocker.thread = m->queue; ) + unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER); + restore_irq(oldlevel); + + corelock_unlock(&m->cl); #ifdef HAVE_PRIORITY_SCHEDULING - if((result & THREAD_SWITCH) && !m->no_preempt) - switch_thread(); + if((result & THREAD_SWITCH) && !m->no_preempt) + switch_thread(); #endif - } + (void)result; } diff --git a/firmware/kernel/queue.c b/firmware/kernel/queue.c index 379e3f62c8..22a8da9bd3 100644 --- a/firmware/kernel/queue.c +++ b/firmware/kernel/queue.c @@ -84,7 +84,7 @@ static void queue_release_sender(struct thread_entry * volatile * sender, *thread->bqp = thread; /* Move blocking queue head to thread since wakeup_thread wakes the first thread in the list. */ - wakeup_thread(thread->bqp); + wakeup_thread(thread->bqp, WAKEUP_RELEASE); } /* Releases any waiting threads that are queued with queue_send - @@ -108,16 +108,16 @@ static void queue_release_all_senders(struct event_queue *q) } } +#ifdef HAVE_WAKEUP_EXT_CB /* Callback to do extra forced removal steps from sender list in addition * to the normal blocking queue removal and priority dis-inherit */ static void queue_remove_sender_thread_cb(struct thread_entry *thread) { *((struct thread_entry **)thread->retval) = NULL; -#ifdef HAVE_WAKEUP_EXT_CB thread->wakeup_ext_cb = NULL; -#endif thread->retval = 0; } +#endif /* HAVE_WAKEUP_EXT_CB */ /* Enables queue_send on the specified queue - caller allocates the extra * data structure. Only queues which are taken to be owned by a thread should @@ -139,7 +139,6 @@ void queue_enable_queue_send(struct event_queue *q, { memset(send, 0, sizeof(*send)); #ifdef HAVE_PRIORITY_SCHEDULING - send->blocker.wakeup_protocol = wakeup_priority_protocol_release; send->blocker.priority = PRIORITY_IDLE; if(owner_id != 0) { @@ -268,7 +267,7 @@ void queue_delete(struct event_queue *q) corelock_unlock(&all_queues.cl); /* Release thread(s) waiting on queue head */ - thread_queue_wake(&q->queue); + thread_queue_wake(&q->queue, NULL); #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME if(q->send) @@ -325,7 +324,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev) IF_COP( current->obj_cl = &q->cl; ) current->bqp = &q->queue; - block_thread(current); + block_thread(current, TIMEOUT_BLOCK); corelock_unlock(&q->cl); switch_thread(); @@ -386,7 +385,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks) IF_COP( current->obj_cl = &q->cl; ) current->bqp = &q->queue; - block_thread_w_tmo(current, ticks); + block_thread(current, ticks); corelock_unlock(&q->cl); switch_thread(); @@ -443,7 +442,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data) queue_do_unblock_sender(q->send, wr); /* Wakeup a waiting thread if any */ - wakeup_thread(&q->queue); + wakeup_thread(&q->queue, WAKEUP_DEFAULT); corelock_unlock(&q->cl); restore_irq(oldlevel); @@ -481,7 +480,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) } /* Wakeup a waiting thread if any */ - wakeup_thread(&q->queue); + wakeup_thread(&q->queue, WAKEUP_DEFAULT); /* Save thread in slot, add to list and wait for reply */ *spp = current; @@ -493,7 +492,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) current->retval = (intptr_t)spp; current->bqp = &send->list; - block_thread(current); + block_thread(current, TIMEOUT_BLOCK); corelock_unlock(&q->cl); switch_thread(); @@ -502,7 +501,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data) } /* Function as queue_post if sending is not enabled */ - wakeup_thread(&q->queue); + wakeup_thread(&q->queue, WAKEUP_DEFAULT); corelock_unlock(&q->cl); restore_irq(oldlevel); diff --git a/firmware/kernel/semaphore.c b/firmware/kernel/semaphore.c index f9ff0ad987..b6ce7fd742 100644 --- a/firmware/kernel/semaphore.c +++ b/firmware/kernel/semaphore.c @@ -82,11 +82,7 @@ int semaphore_wait(struct semaphore *s, int timeout) * explicit in semaphore_release */ current->retval = OBJ_WAIT_TIMEDOUT; - if(timeout > 0) - block_thread_w_tmo(current, timeout); /* ...or timed out... */ - else - block_thread(current); /* -timeout = infinite */ - + block_thread(current, timeout); corelock_unlock(&s->cl); /* ...and turn control over to next thread */ @@ -118,7 +114,7 @@ void semaphore_release(struct semaphore *s) KERNEL_ASSERT(s->count == 0, "semaphore_release->threads queued but count=%d!\n", s->count); s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */ - result = wakeup_thread(&s->queue); + result = wakeup_thread(&s->queue, WAKEUP_DEFAULT); } else { diff --git a/firmware/kernel/thread.c b/firmware/kernel/thread.c index 43ff584a68..0a47f97e93 100644 --- a/firmware/kernel/thread.c +++ b/firmware/kernel/thread.c @@ -246,13 +246,13 @@ static void thread_stkov(struct thread_entry *thread) cores[_core].blk_ops.cl_p = &(thread)->slot_cl; }) #else #define LOCK_THREAD(thread) \ - ({ }) + ({ (void)(thread); }) #define TRY_LOCK_THREAD(thread) \ - ({ }) + ({ (void)(thread); }) #define UNLOCK_THREAD(thread) \ - ({ }) + ({ (void)(thread); }) #define UNLOCK_THREAD_AT_TASK_SWITCH(thread) \ - ({ }) + ({ (void)(thread); }) #endif /* RTR list */ @@ -279,6 +279,100 @@ static void thread_stkov(struct thread_entry *thread) #define rtr_move_entry_inl(core, from, to) #endif +static inline void thread_store_context(struct thread_entry *thread) +{ +#if (CONFIG_PLATFORM & PLATFORM_HOSTED) + thread->__errno = errno; +#endif + store_context(&thread->context); +} + +static inline void thread_load_context(struct thread_entry *thread) +{ + load_context(&thread->context); +#if (CONFIG_PLATFORM & PLATFORM_HOSTED) + errno = thread->__errno; +#endif +} + +static inline unsigned int should_switch_tasks(void) +{ + unsigned int result = THREAD_OK; + +#ifdef HAVE_PRIORITY_SCHEDULING + struct thread_entry *current = cores[CURRENT_CORE].running; + if (current && + priobit_ffs(&cores[IF_COP_CORE(current->core)].rtr.mask) + < current->priority) + { + /* There is a thread ready to run of higher priority on the same + * core as the current one; recommend a task switch. */ + result |= THREAD_SWITCH; + } +#endif /* HAVE_PRIORITY_SCHEDULING */ + + return result; +} + +#ifdef HAVE_PRIORITY_SCHEDULING +/*--------------------------------------------------------------------------- + * Locks the thread registered as the owner of the block and makes sure it + * didn't change in the meantime + *--------------------------------------------------------------------------- + */ +#if NUM_CORES == 1 +static inline struct thread_entry * lock_blocker_thread(struct blocker *bl) +{ + return bl->thread; +} +#else /* NUM_CORES > 1 */ +static struct thread_entry * lock_blocker_thread(struct blocker *bl) +{ + /* The blocker thread may change during the process of trying to + capture it */ + while (1) + { + struct thread_entry *t = bl->thread; + + /* TRY, or else deadlocks are possible */ + if (!t) + { + struct blocker_splay *blsplay = (struct blocker_splay *)bl; + if (corelock_try_lock(&blsplay->cl)) + { + if (!bl->thread) + return NULL; /* Still multi */ + + corelock_unlock(&blsplay->cl); + } + } + else + { + if (TRY_LOCK_THREAD(t)) + { + if (bl->thread == t) + return t; + + UNLOCK_THREAD(t); + } + } + } +} +#endif /* NUM_CORES */ + +static inline void unlock_blocker_thread(struct blocker *bl) +{ +#if NUM_CORES > 1 + struct thread_entry *blt = bl->thread; + if (blt) + UNLOCK_THREAD(blt); + else + corelock_unlock(&((struct blocker_splay *)bl)->cl); +#endif /* NUM_CORES > 1*/ + (void)bl; +} +#endif /* HAVE_PRIORITY_SCHEDULING */ + /*--------------------------------------------------------------------------- * Thread list structure - circular: * +------------------------------+ @@ -420,7 +514,6 @@ static void remove_from_list_tmo(struct thread_entry *thread) } } - #ifdef HAVE_PRIORITY_SCHEDULING /*--------------------------------------------------------------------------- * Priority distribution structure (one category for each possible priority): @@ -476,19 +569,9 @@ static void remove_from_list_tmo(struct thread_entry *thread) static inline unsigned int prio_add_entry( struct priority_distribution *pd, int priority) { - unsigned int count; - /* Enough size/instruction count difference for ARM makes it worth it to - * use different code (192 bytes for ARM). Only thing better is ASM. */ -#ifdef CPU_ARM - count = pd->hist[priority]; - if (++count == 1) - pd->mask |= 1 << priority; - pd->hist[priority] = count; -#else /* This one's better for Coldfire */ - if ((count = ++pd->hist[priority]) == 1) - pd->mask |= 1 << priority; -#endif - + unsigned int count = ++pd->hist[priority]; + if (count == 1) + priobit_set_bit(&pd->mask, priority); return count; } @@ -499,18 +582,9 @@ static inline unsigned int prio_add_entry( static inline unsigned int prio_subtract_entry( struct priority_distribution *pd, int priority) { - unsigned int count; - -#ifdef CPU_ARM - count = pd->hist[priority]; - if (--count == 0) - pd->mask &= ~(1 << priority); - pd->hist[priority] = count; -#else - if ((count = --pd->hist[priority]) == 0) - pd->mask &= ~(1 << priority); -#endif - + unsigned int count = --pd->hist[priority]; + if (count == 0) + priobit_clear_bit(&pd->mask, priority); return count; } @@ -521,31 +595,38 @@ static inline unsigned int prio_subtract_entry( static inline void prio_move_entry( struct priority_distribution *pd, int from, int to) { - uint32_t mask = pd->mask; - -#ifdef CPU_ARM - unsigned int count; - - count = pd->hist[from]; - if (--count == 0) - mask &= ~(1 << from); - pd->hist[from] = count; - - count = pd->hist[to]; - if (++count == 1) - mask |= 1 << to; - pd->hist[to] = count; -#else if (--pd->hist[from] == 0) - mask &= ~(1 << from); + priobit_clear_bit(&pd->mask, from); if (++pd->hist[to] == 1) - mask |= 1 << to; -#endif + priobit_set_bit(&pd->mask, to); +} +#endif /* HAVE_PRIORITY_SCHEDULING */ - pd->mask = mask; +/*--------------------------------------------------------------------------- + * Move a thread back to a running state on its core. + *--------------------------------------------------------------------------- + */ +static void core_schedule_wakeup(struct thread_entry *thread) +{ + const unsigned int core = IF_COP_CORE(thread->core); + + RTR_LOCK(core); + + thread->state = STATE_RUNNING; + + add_to_list_l(&cores[core].running, thread); + rtr_add_entry(core, thread->priority); + + RTR_UNLOCK(core); + +#if NUM_CORES > 1 + if (core != CURRENT_CORE) + core_wake(core); +#endif } +#ifdef HAVE_PRIORITY_SCHEDULING /*--------------------------------------------------------------------------- * Change the priority and rtr entry for a running thread *--------------------------------------------------------------------------- @@ -605,191 +686,211 @@ static int find_highest_priority_in_list_l( * those are prevented, right? :-) *--------------------------------------------------------------------------- */ -static struct thread_entry * - blocker_inherit_priority(struct thread_entry *current) +static void inherit_priority( + struct blocker * const blocker0, struct blocker *bl, + struct thread_entry *blt, int newblpr) { - const int priority = current->priority; - struct blocker *bl = current->blocker; - struct thread_entry * const tstart = current; - struct thread_entry *bl_t = bl->thread; + int oldblpr = bl->priority; - /* Blocker cannot change since the object protection is held */ - LOCK_THREAD(bl_t); - - for (;;) + while (1) { - struct thread_entry *next; - int bl_pr = bl->priority; - - if (priority >= bl_pr) - break; /* Object priority already high enough */ - - bl->priority = priority; - - /* Add this one */ - prio_add_entry(&bl_t->pdist, priority); - - if (bl_pr < PRIORITY_IDLE) + if (blt == NULL) { - /* Not first waiter - subtract old one */ - prio_subtract_entry(&bl_t->pdist, bl_pr); + /* Multiple owners */ + struct blocker_splay *blsplay = (struct blocker_splay *)bl; + + /* Recurse down the all the branches of this; it's the only way. + We might meet the same queue several times if more than one of + these threads is waiting the same queue. That isn't a problem + for us since we early-terminate, just notable. */ + FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum) + { + bl->priority = oldblpr; /* To see the change each time */ + blt = &threads[slotnum]; + LOCK_THREAD(blt); + inherit_priority(blocker0, bl, blt, newblpr); + } + + corelock_unlock(&blsplay->cl); + return; } - if (priority >= bl_t->priority) - break; /* Thread priority high enough */ + bl->priority = newblpr; - if (bl_t->state == STATE_RUNNING) + /* Update blocker thread inheritance record */ + if (newblpr < PRIORITY_IDLE) + prio_add_entry(&blt->pdist, newblpr); + + if (oldblpr < PRIORITY_IDLE) + prio_subtract_entry(&blt->pdist, oldblpr); + + int oldpr = blt->priority; + int newpr = priobit_ffs(&blt->pdist.mask); + if (newpr == oldpr) + break; /* No blocker thread priority change */ + + if (blt->state == STATE_RUNNING) { - /* Blocking thread is a running thread therefore there are no - * further blockers. Change the "run queue" on which it - * resides. */ - set_running_thread_priority(bl_t, priority); - break; + set_running_thread_priority(blt, newpr); + break; /* Running: last in chain */ } - bl_t->priority = priority; + /* Blocker is blocked */ + blt->priority = newpr; - /* If blocking thread has a blocker, apply transitive inheritance */ - bl = bl_t->blocker; + bl = blt->blocker; + if (LIKELY(bl == NULL)) + break; /* Block doesn't support PIP */ - if (bl == NULL) - break; /* End of chain or object doesn't support inheritance */ + if (UNLIKELY(bl == blocker0)) + break; /* Full circle - deadlock! */ - next = bl->thread; + /* Blocker becomes current thread and the process repeats */ + struct thread_entry **bqp = blt->bqp; + struct thread_entry *t = blt; + blt = lock_blocker_thread(bl); - if (UNLIKELY(next == tstart)) - break; /* Full-circle - deadlock! */ + UNLOCK_THREAD(t); - UNLOCK_THREAD(current); + /* Adjust this wait queue */ + oldblpr = bl->priority; + if (newpr <= oldblpr) + newblpr = newpr; + else if (oldpr <= oldblpr) + newblpr = find_highest_priority_in_list_l(*bqp); -#if NUM_CORES > 1 - for (;;) - { - LOCK_THREAD(next); - - /* Blocker could change - retest condition */ - if (LIKELY(bl->thread == next)) - break; - - UNLOCK_THREAD(next); - next = bl->thread; - } -#endif - current = bl_t; - bl_t = next; + if (newblpr == oldblpr) + break; /* Queue priority not changing */ } - UNLOCK_THREAD(bl_t); - - return current; + UNLOCK_THREAD(blt); } /*--------------------------------------------------------------------------- - * Readjust priorities when waking a thread blocked waiting for another - * in essence "releasing" the thread's effect on the object owner. Can be - * performed from any context. + * Quick-disinherit of priority elevation. 'thread' must be a running thread. *--------------------------------------------------------------------------- */ -struct thread_entry * - wakeup_priority_protocol_release(struct thread_entry *thread) +static void priority_disinherit_internal(struct thread_entry *thread, + int blpr) { - const int priority = thread->priority; - struct blocker *bl = thread->blocker; - struct thread_entry * const tstart = thread; - struct thread_entry *bl_t = bl->thread; - - /* Blocker cannot change since object will be locked */ - LOCK_THREAD(bl_t); - - thread->blocker = NULL; /* Thread not blocked */ - - for (;;) + if (blpr < PRIORITY_IDLE && + prio_subtract_entry(&thread->pdist, blpr) == 0 && + blpr <= thread->priority) { - struct thread_entry *next; - int bl_pr = bl->priority; + int priority = priobit_ffs(&thread->pdist.mask); + if (priority != thread->priority) + set_running_thread_priority(thread, priority); + } +} - if (priority > bl_pr) - break; /* Object priority higher */ +void priority_disinherit(struct thread_entry *thread, struct blocker *bl) +{ + LOCK_THREAD(thread); + priority_disinherit_internal(thread, bl->priority); + UNLOCK_THREAD(thread); +} - next = *thread->bqp; +/*--------------------------------------------------------------------------- + * Transfer ownership from a single owner to a multi-owner splay from a wait + * queue + *--------------------------------------------------------------------------- + */ +static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread) +{ + /* All threads will have the same blocker and queue; only we are changing + it now */ + struct thread_entry **bqp = thread->bqp; + struct blocker_splay *blsplay = (struct blocker_splay *)thread->blocker; + struct thread_entry *blt = blsplay->blocker.thread; - if (next == NULL) - { - /* No more threads in queue */ - prio_subtract_entry(&bl_t->pdist, bl_pr); - bl->priority = PRIORITY_IDLE; - } - else - { - /* Check list for highest remaining priority */ - int queue_pr = find_highest_priority_in_list_l(next); + /* The first thread is already locked and is assumed tagged "multi" */ + int count = 1; + struct thread_entry *temp_queue = NULL; - if (queue_pr == bl_pr) - break; /* Object priority not changing */ + /* 'thread' is locked on entry */ + while (1) + { + LOCK_THREAD(blt); - /* Change queue priority */ - prio_move_entry(&bl_t->pdist, bl_pr, queue_pr); - bl->priority = queue_pr; - } + remove_from_list_l(bqp, thread); + thread->blocker = NULL; - if (bl_pr > bl_t->priority) - break; /* thread priority is higher */ - - bl_pr = find_first_set_bit(bl_t->pdist.mask); - - if (bl_pr == bl_t->priority) - break; /* Thread priority not changing */ - - if (bl_t->state == STATE_RUNNING) - { - /* No further blockers */ - set_running_thread_priority(bl_t, bl_pr); + struct thread_entry *tnext = *bqp; + if (tnext == NULL || tnext->retval == 0) break; + + add_to_list_l(&temp_queue, thread); + + UNLOCK_THREAD(thread); + UNLOCK_THREAD(blt); + + count++; + thread = tnext; + + LOCK_THREAD(thread); + } + + int blpr = blsplay->blocker.priority; + priority_disinherit_internal(blt, blpr); + + /* Locking order reverses here since the threads are no longer on the + queue side */ + if (count > 1) + { + add_to_list_l(&temp_queue, thread); + UNLOCK_THREAD(thread); + corelock_lock(&blsplay->cl); + + blpr = find_highest_priority_in_list_l(*bqp); + blsplay->blocker.thread = NULL; + + thread = temp_queue; + LOCK_THREAD(thread); + } + else + { + /* Becomes a simple, direct transfer */ + if (thread->priority <= blpr) + blpr = find_highest_priority_in_list_l(*bqp); + blsplay->blocker.thread = thread; + } + + blsplay->blocker.priority = blpr; + + while (1) + { + unsigned int slotnum = THREAD_ID_SLOT(thread->id); + threadbit_set_bit(&blsplay->mask, slotnum); + + if (blpr < PRIORITY_IDLE) + { + prio_add_entry(&thread->pdist, blpr); + if (blpr < thread->priority) + thread->priority = blpr; } - bl_t->priority = bl_pr; + if (count > 1) + remove_from_list_l(&temp_queue, thread); - /* If blocking thread has a blocker, apply transitive inheritance */ - bl = bl_t->blocker; - - if (bl == NULL) - break; /* End of chain or object doesn't support inheritance */ - - next = bl->thread; - - if (UNLIKELY(next == tstart)) - break; /* Full-circle - deadlock! */ + core_schedule_wakeup(thread); UNLOCK_THREAD(thread); -#if NUM_CORES > 1 - for (;;) - { - LOCK_THREAD(next); + thread = temp_queue; + if (thread == NULL) + break; - /* Blocker could change - retest condition */ - if (LIKELY(bl->thread == next)) - break; - - UNLOCK_THREAD(next); - next = bl->thread; - } -#endif - thread = bl_t; - bl_t = next; + LOCK_THREAD(thread); } - UNLOCK_THREAD(bl_t); + UNLOCK_THREAD(blt); -#if NUM_CORES > 1 - if (UNLIKELY(thread != tstart)) + if (count > 1) { - /* Relock original if it changed */ - LOCK_THREAD(tstart); + corelock_unlock(&blsplay->cl); } -#endif - return cores[CURRENT_CORE].running; + blt->retval = count; } /*--------------------------------------------------------------------------- @@ -801,67 +902,95 @@ struct thread_entry * * it is the running thread is made. *--------------------------------------------------------------------------- */ -struct thread_entry * - wakeup_priority_protocol_transfer(struct thread_entry *thread) +static void wakeup_thread_transfer(struct thread_entry *thread) { - /* Waking thread inherits priority boost from object owner */ + /* Waking thread inherits priority boost from object owner (blt) */ struct blocker *bl = thread->blocker; - struct thread_entry *bl_t = bl->thread; - struct thread_entry *next; - int bl_pr; + struct thread_entry *blt = bl->thread; - THREAD_ASSERT(cores[CURRENT_CORE].running == bl_t, + THREAD_ASSERT(cores[CURRENT_CORE].running == blt, "UPPT->wrong thread", cores[CURRENT_CORE].running); - LOCK_THREAD(bl_t); + LOCK_THREAD(blt); - bl_pr = bl->priority; + struct thread_entry **bqp = thread->bqp; + remove_from_list_l(bqp, thread); + thread->blocker = NULL; + + int blpr = bl->priority; /* Remove the object's boost from the owning thread */ - if (prio_subtract_entry(&bl_t->pdist, bl_pr) == 0 && - bl_pr <= bl_t->priority) + if (prio_subtract_entry(&blt->pdist, blpr) == 0 && blpr <= blt->priority) { /* No more threads at this priority are waiting and the old level is * at least the thread level */ - int priority = find_first_set_bit(bl_t->pdist.mask); - - if (priority != bl_t->priority) - { - /* Adjust this thread's priority */ - set_running_thread_priority(bl_t, priority); - } + int priority = priobit_ffs(&blt->pdist.mask); + if (priority != blt->priority) + set_running_thread_priority(blt, priority); } - next = *thread->bqp; + struct thread_entry *tnext = *bqp; - if (LIKELY(next == NULL)) + if (LIKELY(tnext == NULL)) { /* Expected shortcut - no more waiters */ - bl_pr = PRIORITY_IDLE; + blpr = PRIORITY_IDLE; } else { - if (thread->priority <= bl_pr) - { - /* Need to scan threads remaining in queue */ - bl_pr = find_highest_priority_in_list_l(next); - } + /* If lowering, we need to scan threads remaining in queue */ + int priority = thread->priority; + if (priority <= blpr) + blpr = find_highest_priority_in_list_l(tnext); - if (prio_add_entry(&thread->pdist, bl_pr) == 1 && - bl_pr < thread->priority) - { - /* Thread priority must be raised */ - thread->priority = bl_pr; - } + if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < priority) + thread->priority = blpr; /* Raise new owner */ } - bl->thread = thread; /* This thread pwns */ - bl->priority = bl_pr; /* Save highest blocked priority */ - thread->blocker = NULL; /* Thread not blocked */ + core_schedule_wakeup(thread); + UNLOCK_THREAD(thread); - UNLOCK_THREAD(bl_t); + bl->thread = thread; /* This thread pwns */ + bl->priority = blpr; /* Save highest blocked priority */ + UNLOCK_THREAD(blt); +} - return bl_t; +/*--------------------------------------------------------------------------- + * Readjust priorities when waking a thread blocked waiting for another + * in essence "releasing" the thread's effect on the object owner. Can be + * performed from any context. + *--------------------------------------------------------------------------- + */ +static void wakeup_thread_release(struct thread_entry *thread) +{ + struct blocker *bl = thread->blocker; + struct thread_entry *blt = lock_blocker_thread(bl); + struct thread_entry **bqp = thread->bqp; + remove_from_list_l(bqp, thread); + thread->blocker = NULL; + + /* Off to see the wizard... */ + core_schedule_wakeup(thread); + + if (thread->priority > bl->priority) + { + /* Queue priority won't change */ + UNLOCK_THREAD(thread); + unlock_blocker_thread(bl); + return; + } + + UNLOCK_THREAD(thread); + + int newblpr = find_highest_priority_in_list_l(*bqp); + if (newblpr == bl->priority) + { + /* Blocker priority won't change */ + unlock_blocker_thread(bl); + return; + } + + inherit_priority(bl, bl, blt, newblpr); } /*--------------------------------------------------------------------------- @@ -877,9 +1006,8 @@ static void __attribute__((noinline)) check_for_obj_waiters( { /* Only one bit in the mask should be set with a frequency on 1 which * represents the thread's own base priority */ - uint32_t mask = thread->pdist.mask; - if ((mask & (mask - 1)) != 0 || - thread->pdist.hist[find_first_set_bit(mask)] > 1) + if (priobit_popcount(&thread->pdist.mask) != 1 || + thread->pdist.hist[priobit_ffs(&thread->pdist.mask)] > 1) { unsigned char name[32]; thread_get_name(name, 32, thread); @@ -889,26 +1017,72 @@ static void __attribute__((noinline)) check_for_obj_waiters( #endif /* HAVE_PRIORITY_SCHEDULING */ /*--------------------------------------------------------------------------- - * Move a thread back to a running state on its core. + * Explicitly wakeup a thread on a blocking queue. Only effects threads of + * STATE_BLOCKED and STATE_BLOCKED_W_TMO. + * + * This code should be considered a critical section by the caller meaning + * that the object's corelock should be held. + * + * INTERNAL: Intended for use by kernel objects and not for programs. *--------------------------------------------------------------------------- */ -static void core_schedule_wakeup(struct thread_entry *thread) +unsigned int wakeup_thread_(struct thread_entry **list + IF_PRIO(, enum wakeup_thread_protocol proto)) { - const unsigned int core = IF_COP_CORE(thread->core); + struct thread_entry *thread = *list; - RTR_LOCK(core); + /* Check if there is a blocked thread at all. */ + if (*list == NULL) + return THREAD_NONE; - thread->state = STATE_RUNNING; + LOCK_THREAD(thread); - add_to_list_l(&cores[core].running, thread); - rtr_add_entry(core, thread->priority); + /* Determine thread's current state. */ + switch (thread->state) + { + case STATE_BLOCKED: + case STATE_BLOCKED_W_TMO: +#ifdef HAVE_PRIORITY_SCHEDULING + /* Threads with PIP blockers cannot specify "WAKEUP_DEFAULT" */ + if (thread->blocker != NULL) + { + static void (* const funcs[])(struct thread_entry *thread) + ICONST_ATTR = + { + [WAKEUP_DEFAULT] = NULL, + [WAKEUP_TRANSFER] = wakeup_thread_transfer, + [WAKEUP_RELEASE] = wakeup_thread_release, + [WAKEUP_TRANSFER_MULTI] = wakeup_thread_queue_multi_transfer, + }; - RTR_UNLOCK(core); + /* Call the specified unblocking PIP (does the rest) */ + funcs[proto](thread); + } + else +#endif /* HAVE_PRIORITY_SCHEDULING */ + { + /* No PIP - just boost the thread by aging */ +#ifdef HAVE_PRIORITY_SCHEDULING + IF_NO_SKIP_YIELD( if (thread->skip_count != -1) ) + thread->skip_count = thread->priority; +#endif /* HAVE_PRIORITY_SCHEDULING */ + remove_from_list_l(list, thread); + core_schedule_wakeup(thread); + UNLOCK_THREAD(thread); + } -#if NUM_CORES > 1 - if (core != CURRENT_CORE) - core_wake(core); + return should_switch_tasks(); + + /* Nothing to do. State is not blocked. */ + default: +#if THREAD_EXTRA_CHECKS + THREAD_PANICF("wakeup_thread->block invalid", thread); + case STATE_RUNNING: + case STATE_KILLED: #endif + UNLOCK_THREAD(thread); + return THREAD_NONE; + } } /*--------------------------------------------------------------------------- @@ -990,8 +1164,6 @@ void check_tmo_threads(void) } #endif /* NUM_CORES */ - remove_from_list_l(curr->bqp, curr); - #ifdef HAVE_WAKEUP_EXT_CB if (curr->wakeup_ext_cb != NULL) curr->wakeup_ext_cb(curr); @@ -999,8 +1171,11 @@ void check_tmo_threads(void) #ifdef HAVE_PRIORITY_SCHEDULING if (curr->blocker != NULL) - wakeup_priority_protocol_release(curr); + wakeup_thread_release(curr); + else #endif + remove_from_list_l(curr->bqp, curr); + corelock_unlock(ocl); } /* else state == STATE_SLEEPING */ @@ -1161,8 +1336,7 @@ void switch_thread(void) /* Begin task switching by saving our current context so that we can * restore the state of the current thread later to the point prior * to this call. */ - store_context(&thread->context); - + thread_store_context(thread); #ifdef DEBUG /* Check core_ctx buflib integrity */ core_check_valid(); @@ -1212,8 +1386,7 @@ void switch_thread(void) /* Select the new task based on priorities and the last time a * process got CPU time relative to the highest priority runnable * task. */ - struct priority_distribution *pd = &cores[core].rtr; - int max = find_first_set_bit(pd->mask); + int max = priobit_ffs(&cores[core].rtr.mask); if (block == NULL) { @@ -1269,7 +1442,7 @@ void switch_thread(void) } /* And finally give control to the next thread. */ - load_context(&thread->context); + thread_load_context(thread); #ifdef RB_PROFILE profile_thread_started(thread->id & THREAD_ID_SLOT_MASK); @@ -1291,140 +1464,59 @@ void sleep_thread(int ticks) LOCK_THREAD(current); /* Set our timeout, remove from run list and join timeout list. */ - current->tmo_tick = current_tick + ticks + 1; + current->tmo_tick = current_tick + MAX(ticks, 0) + 1; block_thread_on_l(current, STATE_SLEEPING); UNLOCK_THREAD(current); } /*--------------------------------------------------------------------------- - * Indefinitely block a thread on a blocking queue for explicit wakeup. + * Block a thread on a blocking queue for explicit wakeup. If timeout is + * negative, the block is infinite. * * INTERNAL: Intended for use by kernel objects and not for programs. *--------------------------------------------------------------------------- */ -void block_thread(struct thread_entry *current) +void block_thread(struct thread_entry *current, int timeout) { - /* Set the state to blocked and take us off of the run queue until we - * are explicitly woken */ LOCK_THREAD(current); - /* Set the list for explicit wakeup */ - block_thread_on_l(current, STATE_BLOCKED); - + struct blocker *bl = NULL; #ifdef HAVE_PRIORITY_SCHEDULING - if (current->blocker != NULL) - { - /* Object supports PIP */ - current = blocker_inherit_priority(current); - } -#endif - - UNLOCK_THREAD(current); -} - -/*--------------------------------------------------------------------------- - * Block a thread on a blocking queue for a specified time interval or until - * explicitly woken - whichever happens first. - * - * INTERNAL: Intended for use by kernel objects and not for programs. - *--------------------------------------------------------------------------- - */ -void block_thread_w_tmo(struct thread_entry *current, int timeout) -{ - /* Get the entry for the current running thread. */ - LOCK_THREAD(current); - - /* Set the state to blocked with the specified timeout */ - current->tmo_tick = current_tick + timeout; - - /* Set the list for explicit wakeup */ - block_thread_on_l(current, STATE_BLOCKED_W_TMO); - -#ifdef HAVE_PRIORITY_SCHEDULING - if (current->blocker != NULL) - { - /* Object supports PIP */ - current = blocker_inherit_priority(current); - } -#endif - - UNLOCK_THREAD(current); -} - -/*--------------------------------------------------------------------------- - * Explicitly wakeup a thread on a blocking queue. Only effects threads of - * STATE_BLOCKED and STATE_BLOCKED_W_TMO. - * - * This code should be considered a critical section by the caller meaning - * that the object's corelock should be held. - * - * INTERNAL: Intended for use by kernel objects and not for programs. - *--------------------------------------------------------------------------- - */ -unsigned int wakeup_thread(struct thread_entry **list) -{ - struct thread_entry *thread = *list; - unsigned int result = THREAD_NONE; - - /* Check if there is a blocked thread at all. */ - if (thread == NULL) - return result; - - LOCK_THREAD(thread); - - /* Determine thread's current state. */ - switch (thread->state) - { - case STATE_BLOCKED: - case STATE_BLOCKED_W_TMO: - remove_from_list_l(list, thread); - - result = THREAD_OK; - -#ifdef HAVE_PRIORITY_SCHEDULING - struct thread_entry *current; - struct blocker *bl = thread->blocker; - - if (bl == NULL) - { - /* No inheritance - just boost the thread by aging */ - IF_NO_SKIP_YIELD( if (thread->skip_count != -1) ) - thread->skip_count = thread->priority; - current = cores[CURRENT_CORE].running; - } - else - { - /* Call the specified unblocking PIP */ - current = bl->wakeup_protocol(thread); - } - - if (current != NULL && - find_first_set_bit(cores[IF_COP_CORE(current->core)].rtr.mask) - < current->priority) - { - /* There is a thread ready to run of higher or same priority on - * the same core as the current one; recommend a task switch. - * Knowing if this is an interrupt call would be helpful here. */ - result |= THREAD_SWITCH; - } + bl = current->blocker; + struct thread_entry *blt = bl ? lock_blocker_thread(bl) : NULL; #endif /* HAVE_PRIORITY_SCHEDULING */ - core_schedule_wakeup(thread); - break; - - /* Nothing to do. State is not blocked. */ -#if THREAD_EXTRA_CHECKS - default: - THREAD_PANICF("wakeup_thread->block invalid", thread); - case STATE_RUNNING: - case STATE_KILLED: - break; -#endif + if (LIKELY(timeout < 0)) + { + /* Block until explicitly woken */ + block_thread_on_l(current, STATE_BLOCKED); + } + else + { + /* Set the state to blocked with the specified timeout */ + current->tmo_tick = current_tick + timeout; + block_thread_on_l(current, STATE_BLOCKED_W_TMO); } - UNLOCK_THREAD(thread); - return result; + if (bl == NULL) + { + UNLOCK_THREAD(current); + return; + } + +#ifdef HAVE_PRIORITY_SCHEDULING + int newblpr = current->priority; + UNLOCK_THREAD(current); + + if (newblpr >= bl->priority) + { + unlock_blocker_thread(bl); + return; /* Queue priority won't change */ + } + + inherit_priority(bl, bl, blt, newblpr); +#endif /* HAVE_PRIORITY_SCHEDULING */ } /*--------------------------------------------------------------------------- @@ -1435,25 +1527,31 @@ unsigned int wakeup_thread(struct thread_entry **list) * INTERNAL: Intended for use by kernel objects and not for programs. *--------------------------------------------------------------------------- */ -unsigned int thread_queue_wake(struct thread_entry **list) +unsigned int thread_queue_wake(struct thread_entry **list, + volatile int *count) { + int num = 0; unsigned result = THREAD_NONE; for (;;) { - unsigned int rc = wakeup_thread(list); + unsigned int rc = wakeup_thread(list, WAKEUP_DEFAULT); if (rc == THREAD_NONE) break; /* No more threads */ result |= rc; + num++; } + if (count) + *count = num; + return result; } /*--------------------------------------------------------------------------- - * Assign the thread slot a new ID. Version is 1-255. + * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. *--------------------------------------------------------------------------- */ static void new_thread_id(unsigned int slot_num, @@ -1693,7 +1791,7 @@ void thread_wait(unsigned int thread_id) current->bqp = &thread->queue; disable_irq(); - block_thread(current); + block_thread(current, TIMEOUT_BLOCK); corelock_unlock(&thread->waiter_cl); @@ -1723,7 +1821,7 @@ static inline void thread_final_exit(struct thread_entry *current) * execution except the slot itself. */ /* Signal this thread */ - thread_queue_wake(¤t->queue); + thread_queue_wake(¤t->queue, NULL); corelock_unlock(¤t->waiter_cl); switch_thread(); /* This should never and must never be reached - if it is, the @@ -1912,20 +2010,18 @@ IF_COP( retry_state: ) } } #endif - remove_from_list_l(thread->bqp, thread); - #ifdef HAVE_WAKEUP_EXT_CB if (thread->wakeup_ext_cb != NULL) thread->wakeup_ext_cb(thread); #endif #ifdef HAVE_PRIORITY_SCHEDULING + /* Remove thread's priority influence from its chain if needed */ if (thread->blocker != NULL) - { - /* Remove thread's priority influence from its chain */ wakeup_priority_protocol_release(thread); - } + else #endif + remove_from_list_l(thread->bqp, thread); #if NUM_CORES > 1 if (ocl != NULL) @@ -1970,130 +2066,77 @@ thread_killed: /* Thread was already killed */ */ int thread_set_priority(unsigned int thread_id, int priority) { + if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) + return -1; /* Invalid priority argument */ + int old_base_priority = -1; struct thread_entry *thread = thread_id_entry(thread_id); - /* A little safety measure */ - if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY) - return -1; - /* Thread could be on any list and therefore on an interrupt accessible one - disable interrupts */ - int oldlevel = disable_irq_save(); - + const int oldlevel = disable_irq_save(); LOCK_THREAD(thread); - /* Make sure it's not killed */ - if (thread->id == thread_id && thread->state != STATE_KILLED) + if (thread->id != thread_id || thread->state == STATE_KILLED) + goto done; /* Invalid thread */ + + old_base_priority = thread->base_priority; + if (priority == old_base_priority) + goto done; /* No base priority change */ + + thread->base_priority = priority; + + /* Adjust the thread's priority influence on itself */ + prio_move_entry(&thread->pdist, old_base_priority, priority); + + int old_priority = thread->priority; + int new_priority = priobit_ffs(&thread->pdist.mask); + + if (old_priority == new_priority) + goto done; /* No running priority change */ + + if (thread->state == STATE_RUNNING) { - int old_priority = thread->priority; - - old_base_priority = thread->base_priority; - thread->base_priority = priority; - - prio_move_entry(&thread->pdist, old_base_priority, priority); - priority = find_first_set_bit(thread->pdist.mask); - - if (old_priority == priority) - { - /* No priority change - do nothing */ - } - else if (thread->state == STATE_RUNNING) - { - /* This thread is running - change location on the run - * queue. No transitive inheritance needed. */ - set_running_thread_priority(thread, priority); - } - else - { - thread->priority = priority; - - if (thread->blocker != NULL) - { - /* Bubble new priority down the chain */ - struct blocker *bl = thread->blocker; /* Blocker struct */ - struct thread_entry *bl_t = bl->thread; /* Blocking thread */ - struct thread_entry * const tstart = thread; /* Initial thread */ - const int highest = MIN(priority, old_priority); /* Higher of new or old */ - - for (;;) - { - struct thread_entry *next; /* Next thread to check */ - int bl_pr; /* Highest blocked thread */ - int queue_pr; /* New highest blocked thread */ -#if NUM_CORES > 1 - /* Owner can change but thread cannot be dislodged - thread - * may not be the first in the queue which allows other - * threads ahead in the list to be given ownership during the - * operation. If thread is next then the waker will have to - * wait for us and the owner of the object will remain fixed. - * If we successfully grab the owner -- which at some point - * is guaranteed -- then the queue remains fixed until we - * pass by. */ - for (;;) - { - LOCK_THREAD(bl_t); - - /* Double-check the owner - retry if it changed */ - if (LIKELY(bl->thread == bl_t)) - break; - - UNLOCK_THREAD(bl_t); - bl_t = bl->thread; - } -#endif - bl_pr = bl->priority; - - if (highest > bl_pr) - break; /* Object priority won't change */ - - /* This will include the thread being set */ - queue_pr = find_highest_priority_in_list_l(*thread->bqp); - - if (queue_pr == bl_pr) - break; /* Object priority not changing */ - - /* Update thread boost for this object */ - bl->priority = queue_pr; - prio_move_entry(&bl_t->pdist, bl_pr, queue_pr); - bl_pr = find_first_set_bit(bl_t->pdist.mask); - - if (bl_t->priority == bl_pr) - break; /* Blocking thread priority not changing */ - - if (bl_t->state == STATE_RUNNING) - { - /* Thread not blocked - we're done */ - set_running_thread_priority(bl_t, bl_pr); - break; - } - - bl_t->priority = bl_pr; - bl = bl_t->blocker; /* Blocking thread has a blocker? */ - - if (bl == NULL) - break; /* End of chain */ - - next = bl->thread; - - if (UNLIKELY(next == tstart)) - break; /* Full-circle */ - - UNLOCK_THREAD(thread); - - thread = bl_t; - bl_t = next; - } /* for (;;) */ - - UNLOCK_THREAD(bl_t); - } - } + /* This thread is running - just change location on the run queue. + Also sets thread->priority. */ + set_running_thread_priority(thread, new_priority); + goto done; } + /* Thread is blocked */ + struct blocker *bl = thread->blocker; + if (bl == NULL) + { + thread->priority = new_priority; + goto done; /* End of transitive blocks */ + } + + struct thread_entry *blt = lock_blocker_thread(bl); + struct thread_entry **bqp = thread->bqp; + + thread->priority = new_priority; + UNLOCK_THREAD(thread); + thread = NULL; + int oldblpr = bl->priority; + int newblpr = oldblpr; + if (new_priority < oldblpr) + newblpr = new_priority; + else if (old_priority <= oldblpr) + newblpr = find_highest_priority_in_list_l(*bqp); + + if (newblpr == oldblpr) + { + unlock_blocker_thread(bl); + goto done; + } + + inherit_priority(bl, bl, blt, newblpr); +done: + if (thread) + UNLOCK_THREAD(thread); restore_irq(oldlevel); - return old_base_priority; } diff --git a/firmware/target/hosted/sdl/thread-sdl.c b/firmware/target/hosted/sdl/thread-sdl.c index c17e793833..eaf59e245d 100644 --- a/firmware/target/hosted/sdl/thread-sdl.c +++ b/firmware/target/hosted/sdl/thread-sdl.c @@ -406,20 +406,20 @@ void sleep_thread(int ticks) current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem; } -void block_thread(struct thread_entry *current) +void block_thread(struct thread_entry *current, int ticks) { - current->state = STATE_BLOCKED; + if (ticks < 0) + current->state = STATE_BLOCKED; + else + { + current->state = STATE_BLOCKED_W_TMO; + current->tmo_tick = (1000/HZ)*ticks; + } + add_to_list_l(current->bqp, current); } -void block_thread_w_tmo(struct thread_entry *current, int ticks) -{ - current->state = STATE_BLOCKED_W_TMO; - current->tmo_tick = (1000/HZ)*ticks; - add_to_list_l(current->bqp, current); -} - -unsigned int wakeup_thread(struct thread_entry **list) +unsigned int wakeup_thread_(struct thread_entry **list) { struct thread_entry *thread = *list; @@ -439,20 +439,26 @@ unsigned int wakeup_thread(struct thread_entry **list) return THREAD_NONE; } -unsigned int thread_queue_wake(struct thread_entry **list) +unsigned int thread_queue_wake(struct thread_entry **list, + volatile int *count) { unsigned int result = THREAD_NONE; + int num = 0; for (;;) { - unsigned int rc = wakeup_thread(list); + unsigned int rc = wakeup_thread_(list); if (rc == THREAD_NONE) break; - result |= rc; + result |= rc; + num++; } + if (count) + *count = num; + return result; } @@ -615,7 +621,7 @@ void remove_thread(unsigned int thread_id) new_thread_id(thread->id, thread); thread->state = STATE_KILLED; - thread_queue_wake(&thread->queue); + thread_queue_wake(&thread->queue, NULL); SDL_DestroySemaphore(s); @@ -652,7 +658,7 @@ void thread_wait(unsigned int thread_id) if (thread->id == thread_id && thread->state != STATE_KILLED) { current->bqp = &thread->queue; - block_thread(current); + block_thread(current, TIMEOUT_BLOCK); switch_thread(); } }