2003-02-07 09:41:57 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 by Ulf Ralberg
|
|
|
|
*
|
2008-06-28 18:10:04 +00:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
2003-02-07 09:41:57 +00:00
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
#ifndef THREAD_H
|
|
|
|
#define THREAD_H
|
|
|
|
|
2006-09-17 15:10:22 +00:00
|
|
|
#include "config.h"
|
2007-03-21 22:58:53 +00:00
|
|
|
#include <inttypes.h>
|
2007-10-16 01:25:17 +00:00
|
|
|
#include <stddef.h>
|
2003-02-14 09:44:34 +00:00
|
|
|
#include <stdbool.h>
|
2010-08-12 13:38:25 +00:00
|
|
|
#include "gcc_extensions.h"
|
2003-02-14 09:44:34 +00:00
|
|
|
|
2006-09-16 16:18:11 +00:00
|
|
|
/* Priority scheduling (when enabled with HAVE_PRIORITY_SCHEDULING) works
|
2008-05-18 14:34:29 +00:00
|
|
|
* by giving high priority threads more CPU time than lower priority threads
|
2008-03-25 02:34:12 +00:00
|
|
|
* when they need it. Priority is differential such that the priority
|
|
|
|
* difference between a lower priority runnable thread and the highest priority
|
2008-05-18 14:34:29 +00:00
|
|
|
* runnable thread determines the amount of aging necessary for the lower
|
2008-03-25 02:34:12 +00:00
|
|
|
* priority thread to be scheduled in order to prevent starvation.
|
|
|
|
*
|
2006-09-16 16:18:11 +00:00
|
|
|
* If software playback codec pcm buffer is going down to critical, codec
|
2008-03-25 02:34:12 +00:00
|
|
|
* can gradually raise its own priority to override user interface and
|
2006-09-16 16:18:11 +00:00
|
|
|
* prevent playback skipping.
|
|
|
|
*/
|
2008-03-25 02:34:12 +00:00
|
|
|
#define PRIORITY_RESERVED_HIGH 0 /* Reserved */
|
|
|
|
#define PRIORITY_RESERVED_LOW 32 /* Reserved */
|
2007-10-16 01:25:17 +00:00
|
|
|
#define HIGHEST_PRIORITY 1 /* The highest possible thread priority */
|
2008-03-25 02:34:12 +00:00
|
|
|
#define LOWEST_PRIORITY 31 /* The lowest possible thread priority */
|
|
|
|
/* Realtime range reserved for threads that will not allow threads of lower
|
|
|
|
* priority to age and run (future expansion) */
|
|
|
|
#define PRIORITY_REALTIME_1 1
|
|
|
|
#define PRIORITY_REALTIME_2 2
|
|
|
|
#define PRIORITY_REALTIME_3 3
|
|
|
|
#define PRIORITY_REALTIME_4 4
|
|
|
|
#define PRIORITY_REALTIME 4 /* Lowest realtime range */
|
2008-03-29 23:21:19 +00:00
|
|
|
#define PRIORITY_BUFFERING 15 /* Codec buffering thread */
|
2008-03-25 02:34:12 +00:00
|
|
|
#define PRIORITY_USER_INTERFACE 16 /* The main thread */
|
|
|
|
#define PRIORITY_RECORDING 16 /* Recording thread */
|
|
|
|
#define PRIORITY_PLAYBACK 16 /* Variable between this and MAX */
|
|
|
|
#define PRIORITY_PLAYBACK_MAX 5 /* Maximum allowable playback priority */
|
|
|
|
#define PRIORITY_SYSTEM 18 /* All other firmware threads */
|
|
|
|
#define PRIORITY_BACKGROUND 20 /* Normal application threads */
|
|
|
|
#define NUM_PRIORITIES 32
|
|
|
|
#define PRIORITY_IDLE 32 /* Priority representative of no tasks */
|
2006-09-16 16:18:11 +00:00
|
|
|
|
2010-04-03 22:02:09 +00:00
|
|
|
#define IO_PRIORITY_IMMEDIATE 0
|
|
|
|
#define IO_PRIORITY_BACKGROUND 32
|
|
|
|
|
2005-11-07 20:09:08 +00:00
|
|
|
#if CONFIG_CODEC == SWCODEC
|
2007-12-29 19:46:35 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_RECORDING
|
2008-04-12 16:56:45 +00:00
|
|
|
#define BASETHREADS 17
|
2009-01-05 10:57:12 +00:00
|
|
|
#else
|
|
|
|
#define BASETHREADS 16
|
2007-12-29 19:46:35 +00:00
|
|
|
#endif
|
|
|
|
|
2005-11-07 20:09:08 +00:00
|
|
|
#else
|
2008-04-12 16:56:45 +00:00
|
|
|
#define BASETHREADS 11
|
2007-12-29 19:46:35 +00:00
|
|
|
#endif /* CONFIG_CODE == * */
|
2005-11-07 20:09:08 +00:00
|
|
|
|
2008-04-12 16:56:45 +00:00
|
|
|
#ifndef TARGET_EXTRA_THREADS
|
|
|
|
#define TARGET_EXTRA_THREADS 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MAXTHREADS (BASETHREADS+TARGET_EXTRA_THREADS)
|
|
|
|
|
2010-08-02 20:34:47 +00:00
|
|
|
/*
|
|
|
|
* We need more stack when we run under a host
|
|
|
|
* maybe more expensive C lib functions?
|
|
|
|
*
|
|
|
|
* simulator doesn't simulate stack usage anyway but well ... */
|
|
|
|
#if ((CONFIG_PLATFORM & PLATFORM_NATIVE) || defined(SIMULATOR))
|
2003-02-07 09:41:57 +00:00
|
|
|
#define DEFAULT_STACK_SIZE 0x400 /* Bytes */
|
2010-08-02 20:34:47 +00:00
|
|
|
#else
|
|
|
|
#define DEFAULT_STACK_SIZE 0x1000 /* Bytes */
|
|
|
|
#endif
|
|
|
|
|
2003-02-07 09:41:57 +00:00
|
|
|
|
2010-08-02 20:34:47 +00:00
|
|
|
#if (CONFIG_PLATFORM & (PLATFORM_NATIVE|PLATFORM_ANDROID))
|
2006-09-02 07:56:52 +00:00
|
|
|
/* Need to keep structures inside the header file because debug_menu
|
|
|
|
* needs them. */
|
2007-09-28 10:20:02 +00:00
|
|
|
#ifdef CPU_COLDFIRE
|
2006-09-02 07:56:52 +00:00
|
|
|
struct regs
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
uint32_t macsr; /* 0 - EMAC status register */
|
|
|
|
uint32_t d[6]; /* 4-24 - d2-d7 */
|
|
|
|
uint32_t a[5]; /* 28-44 - a2-a6 */
|
|
|
|
uint32_t sp; /* 48 - Stack pointer (a7) */
|
|
|
|
uint32_t start; /* 52 - Thread start address, or NULL when started */
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
2007-09-28 10:20:02 +00:00
|
|
|
#elif CONFIG_CPU == SH7034
|
2006-09-02 07:56:52 +00:00
|
|
|
struct regs
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
uint32_t r[7]; /* 0-24 - Registers r8 thru r14 */
|
|
|
|
uint32_t sp; /* 28 - Stack pointer (r15) */
|
|
|
|
uint32_t pr; /* 32 - Procedure register */
|
|
|
|
uint32_t start; /* 36 - Thread start address, or NULL when started */
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
2010-08-02 20:34:47 +00:00
|
|
|
#elif defined(CPU_ARM) || (CONFIG_PLATFORM & PLATFORM_ANDROID)
|
2006-09-02 07:56:52 +00:00
|
|
|
struct regs
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
uint32_t r[8]; /* 0-28 - Registers r4-r11 */
|
|
|
|
uint32_t sp; /* 32 - Stack pointer (r13) */
|
|
|
|
uint32_t lr; /* 36 - r14 (lr) */
|
|
|
|
uint32_t start; /* 40 - Thread start address, or NULL when started */
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
2010-06-10 17:31:45 +00:00
|
|
|
|
|
|
|
#ifdef CPU_PP
|
|
|
|
#ifdef HAVE_CORELOCK_OBJECT
|
|
|
|
/* No reliable atomic instruction available - use Peterson's algorithm */
|
|
|
|
struct corelock
|
|
|
|
{
|
|
|
|
volatile unsigned char myl[NUM_CORES];
|
|
|
|
volatile unsigned char turn;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
/* Too big to inline everywhere */
|
|
|
|
void corelock_init(struct corelock *cl);
|
|
|
|
void corelock_lock(struct corelock *cl);
|
|
|
|
int corelock_try_lock(struct corelock *cl);
|
|
|
|
void corelock_unlock(struct corelock *cl);
|
|
|
|
#endif /* HAVE_CORELOCK_OBJECT */
|
|
|
|
#endif /* CPU_PP */
|
2008-07-14 15:03:10 +00:00
|
|
|
#elif defined(CPU_MIPS)
|
|
|
|
struct regs
|
|
|
|
{
|
2009-05-06 19:51:34 +00:00
|
|
|
uint32_t r[9]; /* 0-32 - Registers s0-s7, fp */
|
|
|
|
uint32_t sp; /* 36 - Stack pointer */
|
|
|
|
uint32_t ra; /* 40 - Return address */
|
|
|
|
uint32_t start; /* 44 - Thread start address, or NULL when started */
|
2008-07-14 15:03:10 +00:00
|
|
|
};
|
2007-09-28 10:20:02 +00:00
|
|
|
#endif /* CONFIG_CPU */
|
2010-06-21 16:53:00 +00:00
|
|
|
#elif (CONFIG_PLATFORM & PLATFORM_HOSTED)
|
2007-09-08 12:20:53 +00:00
|
|
|
struct regs
|
|
|
|
{
|
2010-06-21 16:53:00 +00:00
|
|
|
void *t; /* OS thread */
|
2010-05-27 18:46:09 +00:00
|
|
|
void *told; /* Last thread in slot (explained in thead-sdl.c) */
|
2008-03-25 02:34:12 +00:00
|
|
|
void *s; /* Semaphore for blocking and wakeup */
|
2007-09-08 12:20:53 +00:00
|
|
|
void (*start)(void); /* Start function */
|
|
|
|
};
|
2010-06-21 16:53:00 +00:00
|
|
|
#endif /* PLATFORM_NATIVE */
|
2006-09-16 16:18:11 +00:00
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
/* NOTE: The use of the word "queue" may also refer to a linked list of
|
2008-05-18 14:34:29 +00:00
|
|
|
threads being maintained that are normally dealt with in FIFO order
|
|
|
|
and not necessarily kernel event_queue */
|
2007-10-16 01:25:17 +00:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
/* States without a timeout must be first */
|
|
|
|
STATE_KILLED = 0, /* Thread is killed (default) */
|
|
|
|
STATE_RUNNING, /* Thread is currently running */
|
|
|
|
STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
|
|
|
|
/* These states involve adding the thread to the tmo list */
|
|
|
|
STATE_SLEEPING, /* Thread is sleeping with a timeout */
|
|
|
|
STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
|
|
|
|
/* Miscellaneous states */
|
|
|
|
STATE_FROZEN, /* Thread is suspended and will not run until
|
|
|
|
thread_thaw is called with its ID */
|
|
|
|
THREAD_NUM_STATES,
|
|
|
|
TIMEOUT_STATE_FIRST = STATE_SLEEPING,
|
|
|
|
};
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Pointer value for name field to indicate thread is being killed. Using
|
|
|
|
* an alternate STATE_* won't work since that would interfere with operation
|
|
|
|
* while the thread is still running. */
|
|
|
|
#define THREAD_DESTRUCT ((const char *)~(intptr_t)0)
|
2007-03-21 22:58:53 +00:00
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
/* Link information for lists thread is in */
|
|
|
|
struct thread_entry; /* forward */
|
|
|
|
struct thread_list
|
|
|
|
{
|
|
|
|
struct thread_entry *prev; /* Previous thread in a list */
|
|
|
|
struct thread_entry *next; /* Next thread in a list */
|
2006-09-02 07:56:52 +00:00
|
|
|
};
|
|
|
|
|
2010-06-10 17:31:45 +00:00
|
|
|
#ifndef HAVE_CORELOCK_OBJECT
|
2007-10-16 01:25:17 +00:00
|
|
|
/* No atomic corelock op needed or just none defined */
|
|
|
|
#define corelock_init(cl)
|
|
|
|
#define corelock_lock(cl)
|
|
|
|
#define corelock_try_lock(cl)
|
|
|
|
#define corelock_unlock(cl)
|
2010-06-10 17:31:45 +00:00
|
|
|
#endif /* HAVE_CORELOCK_OBJECT */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
struct blocker
|
2007-10-16 01:25:17 +00:00
|
|
|
{
|
2010-12-29 13:53:30 +00:00
|
|
|
struct thread_entry * volatile thread; /* thread blocking other threads
|
|
|
|
(aka. object owner) */
|
2008-03-25 02:34:12 +00:00
|
|
|
int priority; /* highest priority waiter */
|
|
|
|
struct thread_entry * (*wakeup_protocol)(struct thread_entry *thread);
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Choices of wakeup protocol */
|
|
|
|
|
|
|
|
/* For transfer of object ownership by one thread to another thread by
|
|
|
|
* the owning thread itself (mutexes) */
|
|
|
|
struct thread_entry *
|
|
|
|
wakeup_priority_protocol_transfer(struct thread_entry *thread);
|
|
|
|
|
|
|
|
/* For release by owner where ownership doesn't change - other threads,
|
|
|
|
* interrupts, timeouts, etc. (mutex timeout, queues) */
|
|
|
|
struct thread_entry *
|
|
|
|
wakeup_priority_protocol_release(struct thread_entry *thread);
|
|
|
|
|
|
|
|
|
|
|
|
struct priority_distribution
|
|
|
|
{
|
|
|
|
uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
|
|
|
|
uint32_t mask; /* Bitmask of hist entries that are not zero */
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
#endif /* HAVE_PRIORITY_SCHEDULING */
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
/* Information kept in each thread slot
|
|
|
|
* members are arranged according to size - largest first - in order
|
|
|
|
* to ensure both alignment and packing at the same time.
|
|
|
|
*/
|
|
|
|
struct thread_entry
|
|
|
|
{
|
|
|
|
struct regs context; /* Register context at switch -
|
|
|
|
_must_ be first member */
|
2008-03-25 02:34:12 +00:00
|
|
|
uintptr_t *stack; /* Pointer to top of stack */
|
2007-10-16 01:25:17 +00:00
|
|
|
const char *name; /* Thread name */
|
|
|
|
long tmo_tick; /* Tick when thread should be woken from
|
2008-03-25 02:34:12 +00:00
|
|
|
timeout -
|
|
|
|
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
2007-10-16 01:25:17 +00:00
|
|
|
struct thread_list l; /* Links for blocked/waking/running -
|
|
|
|
circular linkage in both directions */
|
|
|
|
struct thread_list tmo; /* Links for timeout list -
|
2008-03-25 02:34:12 +00:00
|
|
|
Circular in reverse direction, NULL-terminated in
|
|
|
|
forward direction -
|
|
|
|
states: STATE_SLEEPING/STATE_BLOCKED_W_TMO */
|
|
|
|
struct thread_entry **bqp; /* Pointer to list variable in kernel
|
2007-10-16 01:25:17 +00:00
|
|
|
object where thread is blocked - used
|
2008-03-25 02:34:12 +00:00
|
|
|
for implicit unblock and explicit wake
|
|
|
|
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
struct corelock *obj_cl; /* Object corelock where thead is blocked -
|
|
|
|
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
2010-04-16 22:21:09 +00:00
|
|
|
struct corelock waiter_cl; /* Corelock for thread_wait */
|
|
|
|
struct corelock slot_cl; /* Corelock to lock thread slot */
|
|
|
|
unsigned char core; /* The core to which thread belongs */
|
2007-10-16 01:25:17 +00:00
|
|
|
#endif
|
|
|
|
struct thread_entry *queue; /* List of threads waiting for thread to be
|
|
|
|
removed */
|
2010-04-17 14:22:06 +00:00
|
|
|
#ifdef HAVE_WAKEUP_EXT_CB
|
2008-03-25 02:34:12 +00:00
|
|
|
void (*wakeup_ext_cb)(struct thread_entry *thread); /* Callback that
|
|
|
|
performs special steps needed when being
|
|
|
|
forced off of an object's wait queue that
|
|
|
|
go beyond the standard wait queue removal
|
|
|
|
and priority disinheritance */
|
|
|
|
/* Only enabled when using queue_send for now */
|
|
|
|
#endif
|
|
|
|
#if defined(HAVE_EXTENDED_MESSAGING_AND_NAME) || NUM_CORES > 1
|
2010-12-27 10:05:09 +00:00
|
|
|
volatile intptr_t retval; /* Return value from a blocked operation/
|
2008-03-25 02:34:12 +00:00
|
|
|
misc. use */
|
2007-10-16 01:25:17 +00:00
|
|
|
#endif
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Priority summary of owned objects that support inheritance */
|
|
|
|
struct blocker *blocker; /* Pointer to blocker when this thread is blocked
|
|
|
|
on an object that supports PIP -
|
|
|
|
states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
|
|
|
|
struct priority_distribution pdist; /* Priority summary of owned objects
|
|
|
|
that have blocked threads and thread's own
|
|
|
|
base priority */
|
|
|
|
int skip_count; /* Number of times skipped if higher priority
|
|
|
|
thread was running */
|
|
|
|
unsigned char base_priority; /* Base priority (set explicitly during
|
|
|
|
creation or thread_set_priority) */
|
|
|
|
unsigned char priority; /* Scheduled priority (higher of base or
|
|
|
|
all threads blocked by this one) */
|
2007-03-26 16:55:17 +00:00
|
|
|
#endif
|
2010-04-16 22:21:09 +00:00
|
|
|
uint16_t id; /* Current slot id */
|
|
|
|
unsigned short stack_size; /* Size of stack in bytes */
|
2007-10-16 01:25:17 +00:00
|
|
|
unsigned char state; /* Thread slot state (STATE_*) */
|
|
|
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
2008-03-25 02:34:12 +00:00
|
|
|
unsigned char cpu_boost; /* CPU frequency boost flag */
|
2007-10-16 01:25:17 +00:00
|
|
|
#endif
|
2010-04-03 22:02:09 +00:00
|
|
|
#ifdef HAVE_IO_PRIORITY
|
|
|
|
unsigned char io_priority;
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
|
|
|
|
2008-12-10 08:57:10 +00:00
|
|
|
/*** Macros for internal use ***/
|
|
|
|
/* Thread ID, 16 bits = |VVVVVVVV|SSSSSSSS| */
|
|
|
|
#define THREAD_ID_VERSION_SHIFT 8
|
|
|
|
#define THREAD_ID_VERSION_MASK 0xff00
|
|
|
|
#define THREAD_ID_SLOT_MASK 0x00ff
|
|
|
|
#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
|
|
|
|
|
|
|
|
/* Specify current thread in a function taking an ID. */
|
|
|
|
#define THREAD_ID_CURRENT ((unsigned int)-1)
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
#if NUM_CORES > 1
|
|
|
|
/* Operations to be performed just before stopping a thread and starting
|
|
|
|
a new one if specified before calling switch_thread */
|
2008-03-25 02:34:12 +00:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
TBOP_CLEAR = 0, /* No operation to do */
|
|
|
|
TBOP_UNLOCK_CORELOCK, /* Unlock a corelock variable */
|
|
|
|
TBOP_SWITCH_CORE, /* Call the core switch preparation routine */
|
|
|
|
};
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
struct thread_blk_ops
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
struct corelock *cl_p; /* pointer to corelock */
|
|
|
|
unsigned char flags; /* TBOP_* flags */
|
2007-10-16 01:25:17 +00:00
|
|
|
};
|
|
|
|
#endif /* NUM_CORES > 1 */
|
|
|
|
|
|
|
|
/* Information kept for each core
|
2008-05-18 14:34:29 +00:00
|
|
|
* Members are arranged for the same reason as in thread_entry
|
2007-10-16 01:25:17 +00:00
|
|
|
*/
|
|
|
|
struct core_entry
|
|
|
|
{
|
|
|
|
/* "Active" lists - core is constantly active on these and are never
|
|
|
|
locked and interrupts do not access them */
|
2008-03-25 02:34:12 +00:00
|
|
|
struct thread_entry *running; /* threads that are running (RTR) */
|
2007-10-16 01:25:17 +00:00
|
|
|
struct thread_entry *timeout; /* threads that are on a timeout before
|
|
|
|
running again */
|
2008-03-25 02:34:12 +00:00
|
|
|
struct thread_entry *block_task; /* Task going off running list */
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
struct priority_distribution rtr; /* Summary of running and ready-to-run
|
|
|
|
threads */
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
long next_tmo_check; /* soonest time to check tmo threads */
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
struct thread_blk_ops blk_ops; /* operations to perform when
|
|
|
|
blocking a thread */
|
2008-03-25 02:34:12 +00:00
|
|
|
struct corelock rtr_cl; /* Lock for rtr list */
|
|
|
|
#endif /* NUM_CORES */
|
2006-09-02 07:56:52 +00:00
|
|
|
};
|
2006-09-16 16:18:11 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
2007-09-28 10:20:02 +00:00
|
|
|
#define IF_PRIO(...) __VA_ARGS__
|
2008-03-25 02:34:12 +00:00
|
|
|
#define IFN_PRIO(...)
|
2006-09-16 16:18:11 +00:00
|
|
|
#else
|
2007-09-28 10:20:02 +00:00
|
|
|
#define IF_PRIO(...)
|
2008-03-25 02:34:12 +00:00
|
|
|
#define IFN_PRIO(...) __VA_ARGS__
|
2006-09-02 08:14:52 +00:00
|
|
|
#endif
|
2006-09-02 07:56:52 +00:00
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
void core_idle(void);
|
|
|
|
void core_wake(IF_COP_VOID(unsigned int core));
|
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Initialize the scheduler */
|
2010-03-03 23:20:32 +00:00
|
|
|
void init_threads(void) INIT_ATTR;
|
2008-03-25 02:34:12 +00:00
|
|
|
|
|
|
|
/* Allocate a thread in the scheduler */
|
2007-10-16 01:25:17 +00:00
|
|
|
#define CREATE_THREAD_FROZEN 0x00000001 /* Thread is frozen at create time */
|
2008-12-10 08:57:10 +00:00
|
|
|
unsigned int create_thread(void (*function)(void),
|
|
|
|
void* stack, size_t stack_size,
|
|
|
|
unsigned flags, const char *name
|
|
|
|
IF_PRIO(, int priority)
|
|
|
|
IF_COP(, unsigned int core));
|
2006-09-16 16:18:11 +00:00
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Set and clear the CPU frequency boost flag for the calling thread */
|
2006-10-15 11:57:52 +00:00
|
|
|
#ifdef HAVE_SCHEDULER_BOOSTCTRL
|
|
|
|
void trigger_cpu_boost(void);
|
2007-11-20 03:44:25 +00:00
|
|
|
void cancel_cpu_boost(void);
|
2006-10-15 11:57:52 +00:00
|
|
|
#else
|
2010-12-22 16:05:22 +00:00
|
|
|
#define trigger_cpu_boost() do { } while(0)
|
|
|
|
#define cancel_cpu_boost() do { } while(0)
|
2006-10-15 11:57:52 +00:00
|
|
|
#endif
|
2008-12-10 08:57:10 +00:00
|
|
|
/* Return thread entry from id */
|
|
|
|
struct thread_entry *thread_id_entry(unsigned int thread_id);
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Make a frozed thread runnable (when started with CREATE_THREAD_FROZEN).
|
|
|
|
* Has no effect on a thread not frozen. */
|
2008-12-10 08:57:10 +00:00
|
|
|
void thread_thaw(unsigned int thread_id);
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Wait for a thread to exit */
|
2008-12-10 08:57:10 +00:00
|
|
|
void thread_wait(unsigned int thread_id);
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Exit the current thread */
|
2010-08-12 13:38:25 +00:00
|
|
|
void thread_exit(void) NORETURN_ATTR;
|
2008-03-25 02:34:12 +00:00
|
|
|
#if defined(DEBUG) || defined(ROCKBOX_HAS_LOGF)
|
|
|
|
#define ALLOW_REMOVE_THREAD
|
|
|
|
/* Remove a thread from the scheduler */
|
2008-12-10 08:57:10 +00:00
|
|
|
void remove_thread(unsigned int thread_id);
|
2008-03-25 02:34:12 +00:00
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
/* Switch to next runnable thread */
|
|
|
|
void switch_thread(void);
|
|
|
|
/* Blocks a thread for at least the specified number of ticks (0 = wait until
|
|
|
|
* next tick) */
|
|
|
|
void sleep_thread(int ticks);
|
|
|
|
/* Indefinitely blocks the current thread on a thread queue */
|
|
|
|
void block_thread(struct thread_entry *current);
|
|
|
|
/* Blocks the current thread on a thread queue until explicitely woken or
|
|
|
|
* the timeout is reached */
|
|
|
|
void block_thread_w_tmo(struct thread_entry *current, int timeout);
|
|
|
|
|
|
|
|
/* Return bit flags for thread wakeup */
|
|
|
|
#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
|
|
|
|
#define THREAD_OK 0x1 /* A thread was woken up */
|
|
|
|
#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
|
|
|
|
higher priority than current were woken) */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
/* A convenience function for waking an entire queue of threads. */
|
2008-03-25 02:34:12 +00:00
|
|
|
unsigned int thread_queue_wake(struct thread_entry **list);
|
|
|
|
|
|
|
|
/* Wakeup a thread at the head of a list */
|
|
|
|
unsigned int wakeup_thread(struct thread_entry **list);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
2006-11-06 18:07:30 +00:00
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
2008-12-10 08:57:10 +00:00
|
|
|
int thread_set_priority(unsigned int thread_id, int priority);
|
|
|
|
int thread_get_priority(unsigned int thread_id);
|
2007-03-09 08:03:18 +00:00
|
|
|
#endif /* HAVE_PRIORITY_SCHEDULING */
|
2010-04-03 22:02:09 +00:00
|
|
|
#ifdef HAVE_IO_PRIORITY
|
|
|
|
void thread_set_io_priority(unsigned int thread_id, int io_priority);
|
|
|
|
int thread_get_io_priority(unsigned int thread_id);
|
|
|
|
#endif /* HAVE_IO_PRIORITY */
|
2007-10-16 01:25:17 +00:00
|
|
|
#if NUM_CORES > 1
|
|
|
|
unsigned int switch_core(unsigned int new_core);
|
|
|
|
#endif
|
2008-12-10 08:57:10 +00:00
|
|
|
unsigned int thread_get_current(void);
|
2008-03-25 02:34:12 +00:00
|
|
|
|
|
|
|
/* Debugging info - only! */
|
2006-09-16 16:18:11 +00:00
|
|
|
int thread_stack_usage(const struct thread_entry *thread);
|
2007-09-28 10:20:02 +00:00
|
|
|
#if NUM_CORES > 1
|
|
|
|
int idle_stack_usage(unsigned int core);
|
|
|
|
#endif
|
|
|
|
void thread_get_name(char *buffer, int size,
|
|
|
|
struct thread_entry *thread);
|
2006-01-18 20:54:13 +00:00
|
|
|
#ifdef RB_PROFILE
|
|
|
|
void profile_thread(void);
|
|
|
|
#endif
|
2003-02-07 09:41:57 +00:00
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
#endif /* THREAD_H */
|