Cleanup preprocessor around corelock usage and move its definition outside #ifdef ASSEMBLER_THREADS

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29330 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Thomas Martitz 2011-02-19 00:09:08 +00:00
parent 8bc2801e7c
commit 70bb128ae9
3 changed files with 25 additions and 22 deletions

View file

@ -125,22 +125,6 @@ struct regs
uint32_t start; /* 40 - Thread start address, or NULL when started */
};
#ifdef CPU_PP
#ifdef HAVE_CORELOCK_OBJECT
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
void corelock_init(struct corelock *cl);
void corelock_lock(struct corelock *cl);
int corelock_try_lock(struct corelock *cl);
void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CPU_PP */
#elif defined(CPU_MIPS)
struct regs
{
@ -171,6 +155,23 @@ struct regs
#endif
#endif /* PLATFORM_NATIVE */
#ifdef CPU_PP
#ifdef HAVE_CORELOCK_OBJECT
/* No reliable atomic instruction available - use Peterson's algorithm */
struct corelock
{
volatile unsigned char myl[NUM_CORES];
volatile unsigned char turn;
} __attribute__((packed));
/* Too big to inline everywhere */
void corelock_init(struct corelock *cl);
void corelock_lock(struct corelock *cl);
int corelock_try_lock(struct corelock *cl);
void corelock_unlock(struct corelock *cl);
#endif /* HAVE_CORELOCK_OBJECT */
#endif /* CPU_PP */
/* NOTE: The use of the word "queue" may also refer to a linked list of
threads being maintained that are normally dealt with in FIFO order
and not necessarily kernel event_queue */
@ -266,7 +267,7 @@ struct thread_entry
object where thread is blocked - used
for implicit unblock and explicit wake
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
#if NUM_CORES > 1
#ifdef HAVE_CORELOCK_OBJECT
struct corelock *obj_cl; /* Object corelock where thead is blocked -
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
struct corelock waiter_cl; /* Corelock for thread_wait */
@ -323,7 +324,7 @@ struct thread_entry
/* Specify current thread in a function taking an ID. */
#define THREAD_ID_CURRENT ((unsigned int)-1)
#if NUM_CORES > 1
#ifdef HAVE_CORELOCK_OBJECT
/* Operations to be performed just before stopping a thread and starting
a new one if specified before calling switch_thread */
enum
@ -356,7 +357,7 @@ struct core_entry
threads */
#endif
long next_tmo_check; /* soonest time to check tmo threads */
#if NUM_CORES > 1
#ifdef HAVE_CORELOCK_OBJECT
struct thread_blk_ops blk_ops; /* operations to perform when
blocking a thread */
struct corelock rtr_cl; /* Lock for rtr list */

View file

@ -68,7 +68,9 @@ void (*tick_funcs[MAX_NUM_TICK_TASKS+1])(void);
static struct
{
struct event_queue *queues[MAX_NUM_QUEUES+1];
IF_COP( struct corelock cl; )
#ifdef HAVE_CORELOCK_OBJECT
struct corelock cl;
#endif
} all_queues SHAREDBSS_ATTR;
/****************************************************************************

View file

@ -967,7 +967,7 @@ void check_tmo_threads(void)
* life again. */
if (state == STATE_BLOCKED_W_TMO)
{
#if NUM_CORES > 1
#ifdef HAVE_CORELOCK_OBJECT
/* Lock the waiting thread's kernel object */
struct corelock *ocl = curr->obj_cl;
@ -1782,7 +1782,7 @@ void thread_exit(void)
*/
void remove_thread(unsigned int thread_id)
{
#if NUM_CORES > 1
#ifdef HAVE_CORELOCK_OBJECT
/* core is not constant here because of core switching */
unsigned int core = CURRENT_CORE;
unsigned int old_core = NUM_CORES;