Add multi-reader, single-writer locks to kernel.
Any number of readers may be in the critical section at a time and writers are mutually exclusive to all other threads. They are a better choice when data is rarely modified but often read and multiple threads can safely access it for reading. Priority inheritance is fully implemented along with other changes to the kernel to fully support it on multiowner objects. This also cleans up priority code in the kernel and updates some associated structures in existing objects to the cleaner form. Currently doesn't add the mrsw_lock.[ch] files since they're not yet needed by anything but the supporting improvements are still useful. This includes a typed bitarray API (bitarray.h) which is pretty basic for now. Change-Id: Idbe43dcd9170358e06d48d00f1c69728ff45b0e3 Reviewed-on: http://gerrit.rockbox.org/801 Reviewed-by: Michael Sevakis <jethead71@rockbox.org> Tested: Michael Sevakis <jethead71@rockbox.org>
This commit is contained in:
parent
6536f1db3e
commit
533d396761
11 changed files with 880 additions and 601 deletions
|
@ -1825,6 +1825,9 @@ drivers/touchpad.c
|
|||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
kernel/corelock.c
|
||||
#endif
|
||||
#if 0 /* pending dependent code */
|
||||
kernel/mrsw_lock.c
|
||||
#endif
|
||||
kernel/mutex.c
|
||||
kernel/queue.c
|
||||
#ifdef HAVE_SEMAPHORE_OBJECTS
|
||||
|
|
231
firmware/include/bitarray.h
Normal file
231
firmware/include/bitarray.h
Normal file
|
@ -0,0 +1,231 @@
|
|||
/***************************************************************************
|
||||
* __________ __ ___.
|
||||
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
||||
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
||||
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
||||
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
||||
* \/ \/ \/ \/ \/
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (C) 2014 by Michael Sevakis
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
****************************************************************************/
|
||||
#ifndef BITARRAY_H
|
||||
#define BITARRAY_H
|
||||
|
||||
/* Type-checked bit array definitions */
|
||||
|
||||
/* All this stuff gets optimized into very simple object code */
|
||||
|
||||
#define BITARRAY_WORD_BITS \
|
||||
(sizeof (unsigned int) * 8)
|
||||
#define BITARRAY_NWORDS(bits) \
|
||||
(((bits) + BITARRAY_WORD_BITS - 1) / BITARRAY_WORD_BITS)
|
||||
#define BITARRAY_BITWORD(bitnum) \
|
||||
((bitnum) / BITARRAY_WORD_BITS)
|
||||
#define BITARRAY_WORDBIT(bitnum) \
|
||||
((bitnum) % BITARRAY_WORD_BITS)
|
||||
#define BITARRAY_NBIT(word, bit) \
|
||||
((word)*BITARRAY_WORD_BITS + (bit))
|
||||
#define BITARRAY_BITS(bits) \
|
||||
(BITARRAY_NWORDS(bits)*BITARRAY_WORD_BITS)
|
||||
#define BITARRAY_BITN(bitnum) \
|
||||
(1u << BITARRAY_WORDBIT(bitnum))
|
||||
|
||||
|
||||
/** Iterators **/
|
||||
#include "config.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#if (defined(CPU_ARM) && ARM_ARCH >= 5) || UINT32_MAX < UINT_MAX
|
||||
#define __BITARRAY_CTZ(wval) __builtin_ctz(wval)
|
||||
#else
|
||||
#include "system.h"
|
||||
#define __BITARRAY_CTZ(wval) find_first_set_bit(wval)
|
||||
#endif
|
||||
#define __BITARRAY_POPCNT(wval) __builtin_popcount(wval)
|
||||
|
||||
#ifndef BIT_N
|
||||
#define BIT_N(n) (1u << (n))
|
||||
#endif
|
||||
|
||||
/* Enumerate each word index */
|
||||
#define FOR_EACH_BITARRAY_WORD_INDEX(nwords, index) \
|
||||
for (unsigned int index = 0, _nwords = (nwords); \
|
||||
index < _nwords; index++)
|
||||
|
||||
/* Enumerate each word value */
|
||||
#define FOR_EACH_BITARRAY_WORD(a, wval) \
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(ARRAYLEN((a)->words), _w) \
|
||||
for (unsigned int wval = (a)->words[_w], _ = 1; _; _--)
|
||||
|
||||
/* Enumerate the bit number of each set bit of a word in sequence */
|
||||
#define FOR_EACH_BITARRAY_SET_WORD_BIT(wval, bit) \
|
||||
for (unsigned int _wval = (wval), bit; \
|
||||
_wval ? (((bit) = __BITARRAY_CTZ(_wval)), 1) : 0; \
|
||||
_wval &= ~BIT_N(bit))
|
||||
|
||||
/* Enumerate the bit number of each set bit in the bit array in sequence */
|
||||
#define FOR_EACH_BITARRAY_SET_BIT_ARR(nwords, words, nbit) \
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(nwords, _w) \
|
||||
FOR_EACH_BITARRAY_SET_WORD_BIT(words[_w], _bit) \
|
||||
for (unsigned int nbit = BITARRAY_NBIT(_w, _bit), _ = 1; _; _--)
|
||||
|
||||
/* As above but takes an array type for an argument */
|
||||
#define FOR_EACH_BITARRAY_SET_BIT(a, nbit) \
|
||||
FOR_EACH_BITARRAY_SET_BIT_ARR(ARRAYLEN((a)->words), (a)->words, nbit)
|
||||
|
||||
|
||||
/** Base functions (called by typed functions) **/
|
||||
|
||||
/* Return the word associated with the bit */
|
||||
static inline unsigned int
|
||||
__bitarray_get_word(unsigned int words[], unsigned int bitnum)
|
||||
{
|
||||
return words[BITARRAY_BITWORD(bitnum)];
|
||||
}
|
||||
|
||||
/* Set the word associated with the bit */
|
||||
static inline void
|
||||
__bitarray_set_word(unsigned int words[], unsigned int bitnum,
|
||||
unsigned int wordval)
|
||||
{
|
||||
words[BITARRAY_BITWORD(bitnum)] = wordval;
|
||||
}
|
||||
|
||||
/* Set the bit at index 'bitnum' to '1' */
|
||||
static inline void
|
||||
__bitarray_set_bit(unsigned int words[], unsigned int bitnum)
|
||||
{
|
||||
unsigned int word = BITARRAY_BITWORD(bitnum);
|
||||
unsigned int bit = BITARRAY_BITN(bitnum);
|
||||
words[word] |= bit;
|
||||
}
|
||||
|
||||
/* Set the bit at index 'bitnum' to '0' */
|
||||
static inline void
|
||||
__bitarray_clear_bit(unsigned int words[], unsigned int bitnum)
|
||||
{
|
||||
unsigned int word = BITARRAY_BITWORD(bitnum);
|
||||
unsigned int bit = BITARRAY_BITN(bitnum);
|
||||
words[word] &= ~bit;
|
||||
}
|
||||
|
||||
/* Return the value of the specified bit ('0' or '1') */
|
||||
static inline unsigned int
|
||||
__bitarray_test_bit(const unsigned int words[], unsigned int bitnum)
|
||||
{
|
||||
unsigned int word = BITARRAY_BITWORD(bitnum);
|
||||
unsigned int nbit = BITARRAY_WORDBIT(bitnum);
|
||||
return (words[word] >> nbit) & 1u;
|
||||
}
|
||||
|
||||
/* Check if all bits in the bit array are '0' */
|
||||
static inline bool
|
||||
__bitarray_is_clear(const unsigned int words[], unsigned int nbits)
|
||||
{
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
|
||||
{
|
||||
if (words[word] != 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Set every bit in the array to '0' */
|
||||
static inline void
|
||||
__bitarray_clear(unsigned int words[], unsigned int nbits)
|
||||
{
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
|
||||
words[word] = 0;
|
||||
}
|
||||
|
||||
/* Set every bit in the array to '1' */
|
||||
static inline void
|
||||
__bitarray_set(unsigned int words[], unsigned int nbits)
|
||||
{
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
|
||||
words[word] = ~0u;
|
||||
}
|
||||
|
||||
/* Find the lowest-indexed '1' bit in the bit array, returning the size of
|
||||
the array if none are set */
|
||||
static inline unsigned int
|
||||
__bitarray_ffs(const unsigned int words[], unsigned int nbits)
|
||||
{
|
||||
FOR_EACH_BITARRAY_SET_BIT_ARR(BITARRAY_NWORDS(nbits), words, nbit)
|
||||
return nbit;
|
||||
|
||||
return BITARRAY_BITS(nbits);
|
||||
}
|
||||
|
||||
/* Return the number of bits currently set to '1' in the bit array */
|
||||
static inline unsigned int
|
||||
__bitarray_popcount(const unsigned int words[], unsigned int nbits)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
|
||||
FOR_EACH_BITARRAY_WORD_INDEX(BITARRAY_NWORDS(nbits), word)
|
||||
count += __BITARRAY_POPCNT(words[word]);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Giant macro to define all the typed functions
|
||||
* typename: The name of the type (e.g. myarr_t myarr;)
|
||||
* fnprefix: The prefix all functions get (e.g. myarr_set_bit)
|
||||
* nbits : The minimum number of bits the array is meant to hold
|
||||
* (the implementation rounds this up to the word size
|
||||
* and all words may be fully utilized)
|
||||
*
|
||||
* uses 'typedef' to freely change from, e.g., struct to union without
|
||||
* changing source code
|
||||
*/
|
||||
#define BITARRAY_TYPE_DECLARE(typename, fnprefix, nbits) \
|
||||
typedef struct \
|
||||
{ \
|
||||
unsigned int words[BITARRAY_NWORDS(nbits)]; \
|
||||
} typename; \
|
||||
static inline unsigned int \
|
||||
fnprefix##_get_word(typename *array, unsigned int bitnum) \
|
||||
{ return __bitarray_get_word(array->words, bitnum); } \
|
||||
static inline void \
|
||||
fnprefix##_set_word(typename *array, unsigned int bitnum, \
|
||||
unsigned int wordval) \
|
||||
{ __bitarray_set_word(array->words, bitnum, wordval); } \
|
||||
static inline void \
|
||||
fnprefix##_set_bit(typename *array, unsigned int bitnum) \
|
||||
{ __bitarray_set_bit(array->words, bitnum); } \
|
||||
static inline void \
|
||||
fnprefix##_clear_bit(typename *array, unsigned int bitnum) \
|
||||
{ __bitarray_clear_bit(array->words, bitnum); } \
|
||||
static inline unsigned int \
|
||||
fnprefix##_test_bit(const typename *array, unsigned int bitnum) \
|
||||
{ return __bitarray_test_bit(array->words, bitnum); } \
|
||||
static inline bool \
|
||||
fnprefix##_is_clear(const typename *array) \
|
||||
{ return __bitarray_is_clear(array->words, nbits); } \
|
||||
static inline void \
|
||||
fnprefix##_clear(typename *array) \
|
||||
{ __bitarray_clear(array->words, nbits); } \
|
||||
static inline void \
|
||||
fnprefix##_set(typename *array) \
|
||||
{ __bitarray_set(array->words, nbits); } \
|
||||
static inline unsigned int \
|
||||
fnprefix##_ffs(const typename *array) \
|
||||
{ return __bitarray_ffs(array->words, nbits); } \
|
||||
static inline unsigned int \
|
||||
fnprefix##_popcount(const typename *array) \
|
||||
{ return __bitarray_popcount(array->words, nbits); }
|
||||
|
||||
#endif /* BITARRAY_H */
|
|
@ -26,6 +26,7 @@
|
|||
#include "system.h"
|
||||
#include "queue.h"
|
||||
#include "mutex.h"
|
||||
#include "mrsw_lock.h"
|
||||
#include "tick.h"
|
||||
|
||||
#ifdef INCLUDE_TIMEOUT_API
|
||||
|
|
|
@ -30,18 +30,12 @@ struct mutex
|
|||
{
|
||||
struct thread_entry *queue; /* waiter list */
|
||||
int recursion; /* lock owner recursion count */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
struct blocker blocker; /* priority inheritance info
|
||||
for waiters */
|
||||
bool no_preempt; /* don't allow higher-priority thread
|
||||
to be scheduled even if woken */
|
||||
#else
|
||||
struct thread_entry *thread; /* Indicates owner thread - an owner
|
||||
implies a locked state - same goes
|
||||
for priority scheduling
|
||||
(in blocker struct for that) */
|
||||
#endif
|
||||
for waiters and owner*/
|
||||
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
bool no_preempt;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern void mutex_init(struct mutex *m);
|
||||
|
@ -56,7 +50,7 @@ static inline void mutex_set_preempt(struct mutex *m, bool preempt)
|
|||
#else
|
||||
/* Deprecated but needed for now - firmware/drivers/ata_mmc.c */
|
||||
static inline bool mutex_test(const struct mutex *m)
|
||||
{ return m->thread != NULL; }
|
||||
{ return m->blocker.thread != NULL; }
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
||||
#endif /* MUTEX_H */
|
||||
|
|
|
@ -143,6 +143,8 @@ extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
|
|||
#define QPEEK_FILTER_COUNT_MASK (0xffu) /* 0x00=1 filter, 0xff=256 filters */
|
||||
#define QPEEK_FILTER_HEAD_ONLY (1u << 8) /* Ignored if no filters */
|
||||
#define QPEEK_REMOVE_EVENTS (1u << 9) /* Remove or discard events */
|
||||
#define QPEEK_FILTER1(a) QPEEK_FILTER2((a), (a))
|
||||
#define QPEEK_FILTER2(a, b) (&(const long [2]){ (a), (b) })
|
||||
extern bool queue_peek_ex(struct event_queue *q,
|
||||
struct queue_event *ev,
|
||||
unsigned int flags,
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <stdbool.h>
|
||||
#include "gcc_extensions.h"
|
||||
#include "corelock.h"
|
||||
#include "bitarray.h"
|
||||
|
||||
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
|
||||
* by giving high priority threads more CPU time than lower priority threads
|
||||
|
@ -80,6 +81,10 @@
|
|||
#endif
|
||||
|
||||
#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
|
||||
|
||||
BITARRAY_TYPE_DECLARE(threadbit_t, threadbit, MAXTHREADS)
|
||||
BITARRAY_TYPE_DECLARE(priobit_t, priobit, NUM_PRIORITIES)
|
||||
|
||||
/*
|
||||
* We need more stack when we run under a host
|
||||
* maybe more expensive C lib functions?
|
||||
|
@ -134,32 +139,39 @@ struct thread_list
|
|||
struct thread_entry *next; /* Next thread in a list */
|
||||
};
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Basic structure describing the owner of an object */
|
||||
struct blocker
|
||||
{
|
||||
struct thread_entry * volatile thread; /* thread blocking other threads
|
||||
(aka. object owner) */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
int priority; /* highest priority waiter */
|
||||
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Choices of wakeup protocol */
|
||||
/* If a thread has a blocker but the blocker's registered thread is NULL,
|
||||
then it references this and the struct blocker pointer may be
|
||||
reinterpreted as such. */
|
||||
struct blocker_splay
|
||||
{
|
||||
struct blocker blocker; /* blocker info (first!) */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
threadbit_t mask; /* mask of nonzero tcounts */
|
||||
#if NUM_CORES > 1
|
||||
struct corelock cl; /* mutual exclusion */
|
||||
#endif
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
};
|
||||
|
||||
/* For transfer of object ownership by one thread to another thread by
|
||||
* the owning thread itself (mutexes) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_transfer(struct thread_entry *thread);
|
||||
|
||||
/* For release by owner where ownership doesn't change - other threads,
|
||||
* interrupts, timeouts, etc. (mutex timeout, queues) */
|
||||
struct thread_entry *
|
||||
wakeup_priority_protocol_release(struct thread_entry *thread);
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
|
||||
/* Quick-disinherit of priority elevation. Must be a running thread. */
|
||||
void priority_disinherit(struct thread_entry *thread, struct blocker *bl);
|
||||
|
||||
struct priority_distribution
|
||||
{
|
||||
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
|
||||
uint32_t mask; /* Bitmask of hist entries that are not zero */
|
||||
priobit_t mask; /* Bitmask of hist entries that are not zero */
|
||||
};
|
||||
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
@ -210,6 +222,7 @@ struct thread_entry
|
|||
volatile intptr_t retval; /* Return value from a blocked operation/
|
||||
misc. use */
|
||||
#endif
|
||||
uint32_t id; /* Current slot id */
|
||||
int __errno; /* Thread error number (errno tls) */
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
/* Priority summary of owned objects that support inheritance */
|
||||
|
@ -226,7 +239,6 @@ struct thread_entry
|
|||
unsigned char priority; /* Scheduled priority (higher of base or
|
||||
all threads blocked by this one) */
|
||||
#endif
|
||||
uint16_t id; /* Current slot id */
|
||||
unsigned short stack_size; /* Size of stack in bytes */
|
||||
unsigned char state; /* Thread slot state (STATE_*) */
|
||||
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
||||
|
@ -238,11 +250,12 @@ struct thread_entry
|
|||
};
|
||||
|
||||
/*** Macros for internal use ***/
|
||||
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
|
||||
/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
|
||||
#define THREAD_ID_VERSION_SHIFT 8
|
||||
#define THREAD_ID_VERSION_MASK 0xff00
|
||||
#define THREAD_ID_SLOT_MASK 0x00ff
|
||||
#define THREAD_ID_VERSION_MASK 0xffffff00
|
||||
#define THREAD_ID_SLOT_MASK 0x000000ff
|
||||
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
|
||||
#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
|
||||
|
||||
#ifdef HAVE_CORELOCK_OBJECT
|
||||
/* Operations to be performed just before stopping a thread and starting
|
||||
|
@ -337,11 +350,8 @@ void switch_thread(void);
|
|||
/* Blocks a thread for at least the specified number of ticks (0 = wait until
|
||||
* next tick) */
|
||||
void sleep_thread(int ticks);
|
||||
/* Indefinitely blocks the current thread on a thread queue */
|
||||
void block_thread(struct thread_entry *current);
|
||||
/* Blocks the current thread on a thread queue until explicitely woken or
|
||||
* the timeout is reached */
|
||||
void block_thread_w_tmo(struct thread_entry *current, int timeout);
|
||||
/* Blocks the current thread on a thread queue (< 0 == infinite) */
|
||||
void block_thread(struct thread_entry *current, int timeout);
|
||||
|
||||
/* Return bit flags for thread wakeup */
|
||||
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
|
||||
|
@ -350,15 +360,32 @@ void block_thread_w_tmo(struct thread_entry *current, int timeout);
|
|||
higher priority than current were woken) */
|
||||
|
||||
/* A convenience function for waking an entire queue of threads. */
|
||||
unsigned int thread_queue_wake(struct thread_entry **list);
|
||||
unsigned int thread_queue_wake(struct thread_entry **list,
|
||||
volatile int *count);
|
||||
|
||||
/* Wakeup a thread at the head of a list */
|
||||
unsigned int wakeup_thread(struct thread_entry **list);
|
||||
enum wakeup_thread_protocol
|
||||
{
|
||||
WAKEUP_DEFAULT,
|
||||
WAKEUP_TRANSFER,
|
||||
WAKEUP_RELEASE,
|
||||
WAKEUP_TRANSFER_MULTI,
|
||||
};
|
||||
|
||||
unsigned int wakeup_thread_(struct thread_entry **list
|
||||
IF_PRIO(, enum wakeup_thread_protocol proto));
|
||||
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
#define wakeup_thread(list, proto) \
|
||||
wakeup_thread_((list), (proto))
|
||||
|
||||
int thread_set_priority(unsigned int thread_id, int priority);
|
||||
int thread_get_priority(unsigned int thread_id);
|
||||
#else /* !HAVE_PRIORITY_SCHEDULING */
|
||||
#define wakeup_thread(list, proto...) \
|
||||
wakeup_thread_((list));
|
||||
#endif /* HAVE_PRIORITY_SCHEDULING */
|
||||
|
||||
#ifdef HAVE_IO_PRIORITY
|
||||
void thread_set_io_priority(unsigned int thread_id, int io_priority);
|
||||
int thread_get_io_priority(unsigned int thread_id);
|
||||
|
|
|
@ -27,31 +27,10 @@
|
|||
#include <stdbool.h>
|
||||
#include "config.h"
|
||||
#include "system.h"
|
||||
#include "mutex.h"
|
||||
#include "corelock.h"
|
||||
#include "kernel.h"
|
||||
#include "thread-internal.h"
|
||||
#include "kernel-internal.h"
|
||||
|
||||
static inline void __attribute__((always_inline))
|
||||
mutex_set_thread(struct mutex *mtx, struct thread_entry *td)
|
||||
{
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
mtx->blocker.thread = td;
|
||||
#else
|
||||
mtx->thread = td;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct thread_entry * __attribute__((always_inline))
|
||||
mutex_get_thread(volatile struct mutex *mtx)
|
||||
{
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
return mtx->blocker.thread;
|
||||
#else
|
||||
return mtx->thread;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Initialize a mutex object - call before any use and do not call again once
|
||||
* the object is available to other threads */
|
||||
void mutex_init(struct mutex *m)
|
||||
|
@ -59,10 +38,9 @@ void mutex_init(struct mutex *m)
|
|||
corelock_init(&m->cl);
|
||||
m->queue = NULL;
|
||||
m->recursion = 0;
|
||||
mutex_set_thread(m, NULL);
|
||||
m->blocker.thread = NULL;
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
m->blocker.priority = PRIORITY_IDLE;
|
||||
m->blocker.wakeup_protocol = wakeup_priority_protocol_transfer;
|
||||
m->no_preempt = false;
|
||||
#endif
|
||||
}
|
||||
|
@ -72,7 +50,7 @@ void mutex_lock(struct mutex *m)
|
|||
{
|
||||
struct thread_entry *current = thread_self_entry();
|
||||
|
||||
if(current == mutex_get_thread(m))
|
||||
if(current == m->blocker.thread)
|
||||
{
|
||||
/* current thread already owns this mutex */
|
||||
m->recursion++;
|
||||
|
@ -83,10 +61,10 @@ void mutex_lock(struct mutex *m)
|
|||
corelock_lock(&m->cl);
|
||||
|
||||
/* must read thread again inside cs (a multiprocessor concern really) */
|
||||
if(LIKELY(mutex_get_thread(m) == NULL))
|
||||
if(LIKELY(m->blocker.thread == NULL))
|
||||
{
|
||||
/* lock is open */
|
||||
mutex_set_thread(m, current);
|
||||
m->blocker.thread = current;
|
||||
corelock_unlock(&m->cl);
|
||||
return;
|
||||
}
|
||||
|
@ -97,7 +75,7 @@ void mutex_lock(struct mutex *m)
|
|||
current->bqp = &m->queue;
|
||||
|
||||
disable_irq();
|
||||
block_thread(current);
|
||||
block_thread(current, TIMEOUT_BLOCK);
|
||||
|
||||
corelock_unlock(&m->cl);
|
||||
|
||||
|
@ -109,9 +87,9 @@ void mutex_lock(struct mutex *m)
|
|||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
/* unlocker not being the owner is an unlocking violation */
|
||||
KERNEL_ASSERT(mutex_get_thread(m) == thread_self_entry(),
|
||||
KERNEL_ASSERT(m->blocker.thread == thread_self_entry(),
|
||||
"mutex_unlock->wrong thread (%s != %s)\n",
|
||||
mutex_get_thread(m)->name,
|
||||
m->blocker.thread->name,
|
||||
thread_self_entry()->name);
|
||||
|
||||
if(m->recursion > 0)
|
||||
|
@ -128,18 +106,17 @@ void mutex_unlock(struct mutex *m)
|
|||
if(LIKELY(m->queue == NULL))
|
||||
{
|
||||
/* no threads waiting - open the lock */
|
||||
mutex_set_thread(m, NULL);
|
||||
m->blocker.thread = NULL;
|
||||
corelock_unlock(&m->cl);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
const int oldlevel = disable_irq_save();
|
||||
/* Tranfer of owning thread is handled in the wakeup protocol
|
||||
* if priorities are enabled otherwise just set it from the
|
||||
* queue head. */
|
||||
IFN_PRIO( mutex_set_thread(m, m->queue); )
|
||||
IF_PRIO( unsigned int result = ) wakeup_thread(&m->queue);
|
||||
IFN_PRIO( m->blocker.thread = m->queue; )
|
||||
unsigned int result = wakeup_thread(&m->queue, WAKEUP_TRANSFER);
|
||||
restore_irq(oldlevel);
|
||||
|
||||
corelock_unlock(&m->cl);
|
||||
|
@ -148,5 +125,5 @@ void mutex_unlock(struct mutex *m)
|
|||
if((result & THREAD_SWITCH) && !m->no_preempt)
|
||||
switch_thread();
|
||||
#endif
|
||||
}
|
||||
(void)result;
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ static void queue_release_sender(struct thread_entry * volatile * sender,
|
|||
*thread->bqp = thread; /* Move blocking queue head to thread since
|
||||
wakeup_thread wakes the first thread in
|
||||
the list. */
|
||||
wakeup_thread(thread->bqp);
|
||||
wakeup_thread(thread->bqp, WAKEUP_RELEASE);
|
||||
}
|
||||
|
||||
/* Releases any waiting threads that are queued with queue_send -
|
||||
|
@ -108,16 +108,16 @@ static void queue_release_all_senders(struct event_queue *q)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
/* Callback to do extra forced removal steps from sender list in addition
|
||||
* to the normal blocking queue removal and priority dis-inherit */
|
||||
static void queue_remove_sender_thread_cb(struct thread_entry *thread)
|
||||
{
|
||||
*((struct thread_entry **)thread->retval) = NULL;
|
||||
#ifdef HAVE_WAKEUP_EXT_CB
|
||||
thread->wakeup_ext_cb = NULL;
|
||||
#endif
|
||||
thread->retval = 0;
|
||||
}
|
||||
#endif /* HAVE_WAKEUP_EXT_CB */
|
||||
|
||||
/* Enables queue_send on the specified queue - caller allocates the extra
|
||||
* data structure. Only queues which are taken to be owned by a thread should
|
||||
|
@ -139,7 +139,6 @@ void queue_enable_queue_send(struct event_queue *q,
|
|||
{
|
||||
memset(send, 0, sizeof(*send));
|
||||
#ifdef HAVE_PRIORITY_SCHEDULING
|
||||
send->blocker.wakeup_protocol = wakeup_priority_protocol_release;
|
||||
send->blocker.priority = PRIORITY_IDLE;
|
||||
if(owner_id != 0)
|
||||
{
|
||||
|
@ -268,7 +267,7 @@ void queue_delete(struct event_queue *q)
|
|||
corelock_unlock(&all_queues.cl);
|
||||
|
||||
/* Release thread(s) waiting on queue head */
|
||||
thread_queue_wake(&q->queue);
|
||||
thread_queue_wake(&q->queue, NULL);
|
||||
|
||||
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
||||
if(q->send)
|
||||
|
@ -325,7 +324,7 @@ void queue_wait(struct event_queue *q, struct queue_event *ev)
|
|||
IF_COP( current->obj_cl = &q->cl; )
|
||||
current->bqp = &q->queue;
|
||||
|
||||
block_thread(current);
|
||||
block_thread(current, TIMEOUT_BLOCK);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
switch_thread();
|
||||
|
@ -386,7 +385,7 @@ void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
|||
IF_COP( current->obj_cl = &q->cl; )
|
||||
current->bqp = &q->queue;
|
||||
|
||||
block_thread_w_tmo(current, ticks);
|
||||
block_thread(current, ticks);
|
||||
corelock_unlock(&q->cl);
|
||||
|
||||
switch_thread();
|
||||
|
@ -443,7 +442,7 @@ void queue_post(struct event_queue *q, long id, intptr_t data)
|
|||
queue_do_unblock_sender(q->send, wr);
|
||||
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
@ -481,7 +480,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
}
|
||||
|
||||
/* Wakeup a waiting thread if any */
|
||||
wakeup_thread(&q->queue);
|
||||
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
|
||||
|
||||
/* Save thread in slot, add to list and wait for reply */
|
||||
*spp = current;
|
||||
|
@ -493,7 +492,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
current->retval = (intptr_t)spp;
|
||||
current->bqp = &send->list;
|
||||
|
||||
block_thread(current);
|
||||
block_thread(current, TIMEOUT_BLOCK);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
switch_thread();
|
||||
|
@ -502,7 +501,7 @@ intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
|||
}
|
||||
|
||||
/* Function as queue_post if sending is not enabled */
|
||||
wakeup_thread(&q->queue);
|
||||
wakeup_thread(&q->queue, WAKEUP_DEFAULT);
|
||||
|
||||
corelock_unlock(&q->cl);
|
||||
restore_irq(oldlevel);
|
||||
|
|
|
@ -82,11 +82,7 @@ int semaphore_wait(struct semaphore *s, int timeout)
|
|||
* explicit in semaphore_release */
|
||||
current->retval = OBJ_WAIT_TIMEDOUT;
|
||||
|
||||
if(timeout > 0)
|
||||
block_thread_w_tmo(current, timeout); /* ...or timed out... */
|
||||
else
|
||||
block_thread(current); /* -timeout = infinite */
|
||||
|
||||
block_thread(current, timeout);
|
||||
corelock_unlock(&s->cl);
|
||||
|
||||
/* ...and turn control over to next thread */
|
||||
|
@ -118,7 +114,7 @@ void semaphore_release(struct semaphore *s)
|
|||
KERNEL_ASSERT(s->count == 0,
|
||||
"semaphore_release->threads queued but count=%d!\n", s->count);
|
||||
s->queue->retval = OBJ_WAIT_SUCCEEDED; /* indicate explicit wake */
|
||||
result = wakeup_thread(&s->queue);
|
||||
result = wakeup_thread(&s->queue, WAKEUP_DEFAULT);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -406,20 +406,20 @@ void sleep_thread(int ticks)
|
|||
current->tmo_tick = (1000/HZ) * ticks + ((1000/HZ)-1) - rem;
|
||||
}
|
||||
|
||||
void block_thread(struct thread_entry *current)
|
||||
void block_thread(struct thread_entry *current, int ticks)
|
||||
{
|
||||
if (ticks < 0)
|
||||
current->state = STATE_BLOCKED;
|
||||
add_to_list_l(current->bqp, current);
|
||||
}
|
||||
|
||||
void block_thread_w_tmo(struct thread_entry *current, int ticks)
|
||||
else
|
||||
{
|
||||
current->state = STATE_BLOCKED_W_TMO;
|
||||
current->tmo_tick = (1000/HZ)*ticks;
|
||||
}
|
||||
|
||||
add_to_list_l(current->bqp, current);
|
||||
}
|
||||
|
||||
unsigned int wakeup_thread(struct thread_entry **list)
|
||||
unsigned int wakeup_thread_(struct thread_entry **list)
|
||||
{
|
||||
struct thread_entry *thread = *list;
|
||||
|
||||
|
@ -439,20 +439,26 @@ unsigned int wakeup_thread(struct thread_entry **list)
|
|||
return THREAD_NONE;
|
||||
}
|
||||
|
||||
unsigned int thread_queue_wake(struct thread_entry **list)
|
||||
unsigned int thread_queue_wake(struct thread_entry **list,
|
||||
volatile int *count)
|
||||
{
|
||||
unsigned int result = THREAD_NONE;
|
||||
int num = 0;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
unsigned int rc = wakeup_thread(list);
|
||||
unsigned int rc = wakeup_thread_(list);
|
||||
|
||||
if (rc == THREAD_NONE)
|
||||
break;
|
||||
|
||||
result |= rc;
|
||||
num++;
|
||||
}
|
||||
|
||||
if (count)
|
||||
*count = num;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -615,7 +621,7 @@ void remove_thread(unsigned int thread_id)
|
|||
|
||||
new_thread_id(thread->id, thread);
|
||||
thread->state = STATE_KILLED;
|
||||
thread_queue_wake(&thread->queue);
|
||||
thread_queue_wake(&thread->queue, NULL);
|
||||
|
||||
SDL_DestroySemaphore(s);
|
||||
|
||||
|
@ -652,7 +658,7 @@ void thread_wait(unsigned int thread_id)
|
|||
if (thread->id == thread_id && thread->state != STATE_KILLED)
|
||||
{
|
||||
current->bqp = &thread->queue;
|
||||
block_thread(current);
|
||||
block_thread(current, TIMEOUT_BLOCK);
|
||||
switch_thread();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue