2002-04-21 22:06:12 +00:00
|
|
|
|
/***************************************************************************
|
|
|
|
|
* __________ __ ___.
|
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
|
* $Id$
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2002 by Bj<EFBFBD>rn Stenberg
|
|
|
|
|
*
|
|
|
|
|
* All files in this archive are subject to the GNU General Public License.
|
|
|
|
|
* See the file COPYING in the source tree root for full license agreement.
|
|
|
|
|
*
|
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
|
* KIND, either express or implied.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
#ifndef _KERNEL_H_
|
|
|
|
|
#define _KERNEL_H_
|
|
|
|
|
|
2002-05-23 09:22:07 +00:00
|
|
|
|
#include <stdbool.h>
|
2006-12-19 16:50:07 +00:00
|
|
|
|
#include <inttypes.h>
|
2005-11-07 23:07:19 +00:00
|
|
|
|
#include "config.h"
|
2002-05-23 09:22:07 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#include "thread.h"
|
|
|
|
|
|
2002-04-21 22:06:12 +00:00
|
|
|
|
/* wrap-safe macros for tick comparison */
|
|
|
|
|
#define TIME_AFTER(a,b) ((long)(b) - (long)(a) < 0)
|
|
|
|
|
#define TIME_BEFORE(a,b) TIME_AFTER(b,a)
|
|
|
|
|
|
|
|
|
|
#define HZ 100 /* number of ticks per second */
|
|
|
|
|
|
2005-11-23 08:51:48 +00:00
|
|
|
|
#define MAX_NUM_TICK_TASKS 8
|
2002-06-04 12:25:53 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#define MAX_NUM_QUEUES 32
|
2002-04-29 14:25:44 +00:00
|
|
|
|
#define QUEUE_LENGTH 16 /* MUST be a power of 2 */
|
|
|
|
|
#define QUEUE_LENGTH_MASK (QUEUE_LENGTH - 1)
|
|
|
|
|
|
2007-07-28 08:12:05 +00:00
|
|
|
|
/* System defined message ID's - |sign bit = 1|class|id| */
|
|
|
|
|
/* Event class list */
|
|
|
|
|
#define SYS_EVENT_CLS_QUEUE 0
|
|
|
|
|
#define SYS_EVENT_CLS_USB 1
|
|
|
|
|
#define SYS_EVENT_CLS_POWER 2
|
|
|
|
|
#define SYS_EVENT_CLS_FILESYS 3
|
|
|
|
|
#define SYS_EVENT_CLS_PLUG 4
|
2007-09-09 01:59:07 +00:00
|
|
|
|
#define SYS_EVENT_CLS_MISC 5
|
2007-07-28 08:12:05 +00:00
|
|
|
|
/* make sure SYS_EVENT_CLS_BITS has enough range */
|
|
|
|
|
|
2007-07-28 08:45:57 +00:00
|
|
|
|
/* Bit 31->|S|c...c|i...i| */
|
|
|
|
|
#define SYS_EVENT ((long)(int)(1 << 31))
|
2007-07-28 08:12:05 +00:00
|
|
|
|
#define SYS_EVENT_CLS_BITS (3)
|
2007-07-28 08:45:57 +00:00
|
|
|
|
#define SYS_EVENT_CLS_SHIFT (31-SYS_EVENT_CLS_BITS)
|
2007-07-28 08:12:05 +00:00
|
|
|
|
#define SYS_EVENT_CLS_MASK (((1l << SYS_EVENT_CLS_BITS)-1) << SYS_EVENT_SHIFT)
|
|
|
|
|
#define MAKE_SYS_EVENT(cls, id) (SYS_EVENT | ((long)(cls) << SYS_EVENT_CLS_SHIFT) | (long)(id))
|
|
|
|
|
/* Macros for extracting codes */
|
|
|
|
|
#define SYS_EVENT_CLS(e) (((e) & SYS_EVENT_CLS_MASK) >> SYS_EVENT_SHIFT)
|
|
|
|
|
#define SYS_EVENT_ID(e) ((e) & ~(SYS_EVENT|SYS_EVENT_CLS_MASK))
|
|
|
|
|
|
|
|
|
|
#define SYS_TIMEOUT MAKE_SYS_EVENT(SYS_EVENT_CLS_QUEUE, 0)
|
|
|
|
|
#define SYS_USB_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 0)
|
|
|
|
|
#define SYS_USB_CONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 1)
|
|
|
|
|
#define SYS_USB_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 2)
|
|
|
|
|
#define SYS_USB_DISCONNECTED_ACK MAKE_SYS_EVENT(SYS_EVENT_CLS_USB, 3)
|
|
|
|
|
#define SYS_POWEROFF MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 0)
|
|
|
|
|
#define SYS_CHARGER_CONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 1)
|
|
|
|
|
#define SYS_CHARGER_DISCONNECTED MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 2)
|
2007-11-03 05:00:49 +00:00
|
|
|
|
#define SYS_BATTERY_UPDATE MAKE_SYS_EVENT(SYS_EVENT_CLS_POWER, 3)
|
2007-07-28 08:12:05 +00:00
|
|
|
|
#define SYS_FS_CHANGED MAKE_SYS_EVENT(SYS_EVENT_CLS_FILESYS, 0)
|
|
|
|
|
#define SYS_HOTSWAP_INSERTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 0)
|
|
|
|
|
#define SYS_HOTSWAP_EXTRACTED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 1)
|
|
|
|
|
#define SYS_PHONE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 2)
|
|
|
|
|
#define SYS_PHONE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 3)
|
|
|
|
|
#define SYS_REMOTE_PLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 4)
|
|
|
|
|
#define SYS_REMOTE_UNPLUGGED MAKE_SYS_EVENT(SYS_EVENT_CLS_PLUG, 5)
|
2007-09-09 01:59:07 +00:00
|
|
|
|
#define SYS_SCREENDUMP MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 0)
|
2007-10-08 09:09:40 +00:00
|
|
|
|
#define SYS_CAR_ADAPTER_RESUME MAKE_SYS_EVENT(SYS_EVENT_CLS_MISC, 1)
|
2004-10-10 00:35:19 +00:00
|
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
|
#define IS_SYSEVENT(ev) ((ev & SYS_EVENT) == SYS_EVENT)
|
|
|
|
|
|
2008-03-30 04:59:51 +00:00
|
|
|
|
#ifndef TIMEOUT_BLOCK
|
|
|
|
|
#define TIMEOUT_BLOCK -1
|
|
|
|
|
#define TIMEOUT_NOBLOCK 0
|
|
|
|
|
#endif
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
struct queue_event
|
2002-04-29 14:25:44 +00:00
|
|
|
|
{
|
2006-12-19 16:50:07 +00:00
|
|
|
|
long id;
|
|
|
|
|
intptr_t data;
|
2002-04-29 14:25:44 +00:00
|
|
|
|
};
|
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
struct queue_sender_list
|
|
|
|
|
{
|
|
|
|
|
/* If non-NULL, there is a thread waiting for the corresponding event */
|
2007-03-21 22:58:53 +00:00
|
|
|
|
/* Must be statically allocated to put in non-cached ram. */
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *senders[QUEUE_LENGTH]; /* message->thread map */
|
|
|
|
|
struct thread_entry *list; /* list of senders in map */
|
2006-12-16 18:35:12 +00:00
|
|
|
|
/* Send info for last message dequeued or NULL if replied or not sent */
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry *curr_sender;
|
2008-03-25 02:34:12 +00:00
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
struct blocker blocker;
|
|
|
|
|
#endif
|
2006-12-16 18:35:12 +00:00
|
|
|
|
};
|
|
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
|
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
#define QUEUE_GET_THREAD(q) \
|
|
|
|
|
(((q)->send == NULL) ? NULL : (q)->send->blocker.thread)
|
|
|
|
|
#else
|
|
|
|
|
/* Queue without priority enabled have no owner provision _at this time_ */
|
|
|
|
|
#define QUEUE_GET_THREAD(q) \
|
|
|
|
|
(NULL)
|
|
|
|
|
#endif
|
|
|
|
|
|
2002-04-29 14:25:44 +00:00
|
|
|
|
struct event_queue
|
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *queue; /* waiter list */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
struct queue_event events[QUEUE_LENGTH]; /* list of events */
|
2008-03-25 02:34:12 +00:00
|
|
|
|
unsigned int read; /* head of queue */
|
|
|
|
|
unsigned int write; /* tail of queue */
|
2006-12-16 18:41:45 +00:00
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct queue_sender_list *send; /* list of threads waiting for
|
|
|
|
|
reply to an event */
|
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
struct blocker *blocker_p; /* priority inheritance info
|
|
|
|
|
for sync message senders */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#endif
|
2006-12-16 18:41:45 +00:00
|
|
|
|
#endif
|
2008-03-25 02:34:12 +00:00
|
|
|
|
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
2002-04-29 14:25:44 +00:00
|
|
|
|
};
|
|
|
|
|
|
2008-03-25 02:34:12 +00:00
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
#define MUTEX_SET_THREAD(m, t) ((m)->blocker.thread = (t))
|
|
|
|
|
#define MUTEX_GET_THREAD(m) ((m)->blocker.thread)
|
|
|
|
|
#else
|
|
|
|
|
#define MUTEX_SET_THREAD(m, t) ((m)->thread = (t))
|
|
|
|
|
#define MUTEX_GET_THREAD(m) ((m)->thread)
|
|
|
|
|
#endif
|
|
|
|
|
|
2002-05-16 20:57:32 +00:00
|
|
|
|
struct mutex
|
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *queue; /* waiter list */
|
|
|
|
|
int count; /* lock owner recursion count */
|
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
struct blocker blocker; /* priority inheritance info
|
|
|
|
|
for waiters */
|
|
|
|
|
bool no_preempt; /* don't allow higher-priority thread
|
|
|
|
|
to be scheduled even if woken */
|
|
|
|
|
#else
|
|
|
|
|
struct thread_entry *thread;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#endif
|
2008-03-25 02:34:12 +00:00
|
|
|
|
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
|
|
|
|
unsigned char locked; /* locked semaphore */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
};
|
|
|
|
|
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#if NUM_CORES > 1
|
2007-10-16 01:25:17 +00:00
|
|
|
|
struct spinlock
|
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *thread; /* lock owner */
|
|
|
|
|
int count; /* lock owner recursion count */
|
|
|
|
|
struct corelock cl; /* multiprocessor sync */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
};
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_SEMAPHORE_OBJECTS
|
|
|
|
|
struct semaphore
|
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *queue; /* Waiter list */
|
|
|
|
|
int count; /* # of waits remaining before unsignaled */
|
|
|
|
|
int max; /* maximum # of waits to remain signaled */
|
|
|
|
|
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
};
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EVENT_OBJECTS
|
|
|
|
|
struct event
|
|
|
|
|
{
|
2008-03-25 02:34:12 +00:00
|
|
|
|
struct thread_entry *queues[2]; /* waiters for each state */
|
|
|
|
|
unsigned char automatic; /* event performs auto-reset */
|
|
|
|
|
unsigned char state; /* state: 1 = signaled */
|
|
|
|
|
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
2002-05-16 20:57:32 +00:00
|
|
|
|
};
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#endif
|
2002-05-16 20:57:32 +00:00
|
|
|
|
|
2008-03-30 04:59:51 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_WAKEUP_OBJECTS
|
|
|
|
|
struct wakeup
|
|
|
|
|
{
|
|
|
|
|
struct thread_entry *queue; /* waiter list */
|
|
|
|
|
unsigned char signalled; /* signalled status */
|
|
|
|
|
IF_COP( struct corelock cl; ) /* multiprocessor sync */
|
|
|
|
|
};
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
2002-04-21 22:06:12 +00:00
|
|
|
|
/* global tick variable */
|
2006-11-22 00:41:30 +00:00
|
|
|
|
#if defined(CPU_PP) && defined(BOOTLOADER)
|
2006-01-05 17:02:48 +00:00
|
|
|
|
/* We don't enable interrupts in the iPod bootloader, so we need to fake
|
|
|
|
|
the current_tick variable */
|
2006-03-05 18:40:51 +00:00
|
|
|
|
#define current_tick (signed)(USEC_TIMER/10000)
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#else
|
2007-06-25 20:46:54 +00:00
|
|
|
|
extern volatile long current_tick;
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#endif
|
2002-04-21 22:06:12 +00:00
|
|
|
|
|
2005-02-22 12:19:12 +00:00
|
|
|
|
#ifdef SIMULATOR
|
|
|
|
|
#define sleep(x) sim_sleep(x)
|
|
|
|
|
#endif
|
|
|
|
|
|
2002-04-21 22:06:12 +00:00
|
|
|
|
/* kernel functions */
|
2002-05-05 18:34:06 +00:00
|
|
|
|
extern void kernel_init(void);
|
2002-04-21 22:06:12 +00:00
|
|
|
|
extern void yield(void);
|
|
|
|
|
extern void sleep(int ticks);
|
2002-05-06 19:26:56 +00:00
|
|
|
|
int tick_add_task(void (*f)(void));
|
|
|
|
|
int tick_remove_task(void (*f)(void));
|
2007-10-16 01:25:17 +00:00
|
|
|
|
extern void tick_start(unsigned int interval_in_ms);
|
2002-04-21 22:06:12 +00:00
|
|
|
|
|
2007-07-29 04:49:19 +00:00
|
|
|
|
struct timeout;
|
|
|
|
|
|
|
|
|
|
/* timeout callback type
|
|
|
|
|
* tmo - pointer to struct timeout associated with event
|
|
|
|
|
*/
|
|
|
|
|
typedef bool (* timeout_cb_type)(struct timeout *tmo);
|
|
|
|
|
|
|
|
|
|
struct timeout
|
|
|
|
|
{
|
|
|
|
|
/* for use by callback/internal - read/write */
|
|
|
|
|
timeout_cb_type callback;/* callback - returning false cancels */
|
|
|
|
|
int ticks; /* timeout period in ticks */
|
|
|
|
|
intptr_t data; /* data passed to callback */
|
|
|
|
|
/* internal use - read-only */
|
|
|
|
|
const struct timeout * const next; /* next timeout in list */
|
|
|
|
|
const long expires; /* expiration tick */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
|
|
|
|
int ticks, intptr_t data);
|
|
|
|
|
void timeout_cancel(struct timeout *tmo);
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#define STATE_NONSIGNALED 0
|
|
|
|
|
#define STATE_SIGNALED 1
|
|
|
|
|
|
|
|
|
|
#define WAIT_TIMEDOUT (-1)
|
2008-03-30 04:59:51 +00:00
|
|
|
|
#define WAIT_FAILED 0
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#define WAIT_SUCCEEDED 1
|
|
|
|
|
|
2006-09-16 16:18:11 +00:00
|
|
|
|
extern void queue_init(struct event_queue *q, bool register_queue);
|
2006-01-23 10:53:47 +00:00
|
|
|
|
extern void queue_delete(struct event_queue *q);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
extern void queue_wait(struct event_queue *q, struct queue_event *ev);
|
|
|
|
|
extern void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev,
|
|
|
|
|
int ticks);
|
2006-12-19 16:50:07 +00:00
|
|
|
|
extern void queue_post(struct event_queue *q, long id, intptr_t data);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
2008-03-25 02:34:12 +00:00
|
|
|
|
extern void queue_enable_queue_send(struct event_queue *q,
|
|
|
|
|
struct queue_sender_list *send,
|
|
|
|
|
struct thread_entry *owner);
|
2006-12-19 16:50:07 +00:00
|
|
|
|
extern intptr_t queue_send(struct event_queue *q, long id, intptr_t data);
|
|
|
|
|
extern void queue_reply(struct event_queue *q, intptr_t retval);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
extern bool queue_in_queue_send(struct event_queue *q);
|
|
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
2004-08-16 23:37:23 +00:00
|
|
|
|
extern bool queue_empty(const struct event_queue* q);
|
2007-10-27 18:08:18 +00:00
|
|
|
|
extern bool queue_peek(struct event_queue *q, struct queue_event *ev);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
extern void queue_clear(struct event_queue* q);
|
2006-10-19 11:43:13 +00:00
|
|
|
|
extern void queue_remove_from_head(struct event_queue *q, long id);
|
2007-03-11 10:52:36 +00:00
|
|
|
|
extern int queue_count(const struct event_queue *q);
|
2006-12-19 16:50:07 +00:00
|
|
|
|
extern int queue_broadcast(long id, intptr_t data);
|
2002-04-21 22:06:12 +00:00
|
|
|
|
|
2002-05-16 20:57:32 +00:00
|
|
|
|
extern void mutex_init(struct mutex *m);
|
|
|
|
|
extern void mutex_lock(struct mutex *m);
|
|
|
|
|
extern void mutex_unlock(struct mutex *m);
|
2008-03-25 02:34:12 +00:00
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
|
/* Temporary function to disable mutex preempting a thread on unlock */
|
|
|
|
|
static inline void mutex_set_preempt(struct mutex *m, bool preempt)
|
|
|
|
|
{ m->no_preempt = !preempt; }
|
|
|
|
|
#endif
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
extern void spinlock_init(struct spinlock *l);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
extern void spinlock_lock(struct spinlock *l);
|
|
|
|
|
extern void spinlock_unlock(struct spinlock *l);
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#ifdef HAVE_SEMAPHORE_OBJECTS
|
|
|
|
|
extern void semaphore_init(struct semaphore *s, int max, int start);
|
|
|
|
|
extern void semaphore_wait(struct semaphore *s);
|
|
|
|
|
extern void semaphore_release(struct semaphore *s);
|
|
|
|
|
#endif /* HAVE_SEMAPHORE_OBJECTS */
|
|
|
|
|
#ifdef HAVE_EVENT_OBJECTS
|
|
|
|
|
#define EVENT_AUTOMATIC 0x10
|
|
|
|
|
#define EVENT_MANUAL 0x00
|
|
|
|
|
extern void event_init(struct event *e, unsigned int flags);
|
|
|
|
|
extern void event_wait(struct event *e, unsigned int for_state);
|
|
|
|
|
extern void event_set_state(struct event *e, unsigned int state);
|
|
|
|
|
#endif /* HAVE_EVENT_OBJECTS */
|
2005-04-06 13:05:06 +00:00
|
|
|
|
|
2008-03-30 04:59:51 +00:00
|
|
|
|
#ifdef HAVE_WAKEUP_OBJECTS
|
|
|
|
|
extern void wakeup_init(struct wakeup *w);
|
|
|
|
|
extern int wakeup_wait(struct wakeup *w, int timeout);
|
|
|
|
|
extern int wakeup_signal(struct wakeup *w);
|
|
|
|
|
#endif /* HAVE_WAKEUP_OBJECTS */
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#endif /* _KERNEL_H_ */
|