2006-01-09 11:22:36 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
2006-02-03 15:19:58 +00:00
|
|
|
* Copyright (C) 2002 by Felix Arends
|
2006-01-09 11:22:36 +00:00
|
|
|
*
|
|
|
|
* All files in this archive are subject to the GNU General Public License.
|
|
|
|
* See the file COPYING in the source tree root for full license agreement.
|
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2006-02-03 15:19:58 +00:00
|
|
|
#include <stdlib.h>
|
2006-12-16 18:35:12 +00:00
|
|
|
#include "memory.h"
|
2006-02-03 15:19:58 +00:00
|
|
|
#include "uisdl.h"
|
2006-01-09 11:22:36 +00:00
|
|
|
#include "kernel.h"
|
2006-02-03 15:19:58 +00:00
|
|
|
#include "thread-sdl.h"
|
2006-01-09 11:22:36 +00:00
|
|
|
#include "thread.h"
|
|
|
|
#include "debug.h"
|
|
|
|
|
|
|
|
static void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
|
|
|
|
|
|
|
|
int set_irq_level (int level)
|
|
|
|
{
|
|
|
|
static int _lv = 0;
|
|
|
|
return (_lv = level);
|
|
|
|
}
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
/* Moves waiting thread's descriptor to the current sender when a
|
|
|
|
message is dequeued */
|
|
|
|
static void queue_fetch_sender(struct queue_sender_list *send,
|
|
|
|
unsigned int i)
|
|
|
|
{
|
|
|
|
int old_level = set_irq_level(15<<4);
|
2007-03-21 23:33:49 +00:00
|
|
|
struct thread_entry **spp = &send->senders[i];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
if(*spp)
|
|
|
|
{
|
|
|
|
send->curr_sender = *spp;
|
|
|
|
*spp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_irq_level(old_level);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Puts the specified return value in the waiting thread's return value
|
|
|
|
and wakes the thread - a sender should be confirmed to exist first */
|
2007-03-21 23:33:49 +00:00
|
|
|
static void queue_release_sender(struct thread_entry **sender,
|
2006-12-19 16:50:07 +00:00
|
|
|
intptr_t retval)
|
2006-12-16 18:35:12 +00:00
|
|
|
{
|
|
|
|
(*sender)->retval = retval;
|
|
|
|
*sender = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Releases any waiting threads that are queued with queue_send -
|
|
|
|
reply with NULL */
|
|
|
|
static void queue_release_all_senders(struct event_queue *q)
|
|
|
|
{
|
|
|
|
if(q->send)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
for(i = q->read; i != q->write; i++)
|
|
|
|
{
|
2007-03-21 23:33:49 +00:00
|
|
|
struct thread_entry **spp =
|
2006-12-16 18:35:12 +00:00
|
|
|
&q->send->senders[i & QUEUE_LENGTH_MASK];
|
|
|
|
if(*spp)
|
|
|
|
{
|
2006-12-19 16:50:07 +00:00
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enables queue_send on the specified queue - caller allocates the extra
|
|
|
|
data structure */
|
|
|
|
void queue_enable_queue_send(struct event_queue *q,
|
|
|
|
struct queue_sender_list *send)
|
|
|
|
{
|
|
|
|
q->send = send;
|
|
|
|
memset(send, 0, sizeof(*send));
|
|
|
|
}
|
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
|
|
|
|
2006-09-16 16:18:11 +00:00
|
|
|
void queue_init(struct event_queue *q, bool register_queue)
|
2006-01-09 11:22:36 +00:00
|
|
|
{
|
2006-09-16 16:18:11 +00:00
|
|
|
(void)register_queue;
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
q->read = 0;
|
|
|
|
q->write = 0;
|
2006-11-03 10:12:15 +00:00
|
|
|
q->thread = NULL;
|
2006-12-16 18:35:12 +00:00
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
q->send = NULL; /* No message sending by default */
|
|
|
|
#endif
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
2006-01-23 10:53:47 +00:00
|
|
|
void queue_delete(struct event_queue *q)
|
|
|
|
{
|
|
|
|
(void)q;
|
|
|
|
}
|
|
|
|
|
2006-01-09 11:22:36 +00:00
|
|
|
void queue_wait(struct event_queue *q, struct event *ev)
|
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
unsigned int rd;
|
|
|
|
|
2006-01-09 11:22:36 +00:00
|
|
|
while(q->read == q->write)
|
|
|
|
{
|
2006-09-16 16:18:11 +00:00
|
|
|
switch_thread(true, NULL);
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
rd = q->read++ & QUEUE_LENGTH_MASK;
|
|
|
|
*ev = q->events[rd];
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
if(q->send && q->send->senders[rd])
|
|
|
|
{
|
|
|
|
/* Get data for a waiting thread if one */
|
|
|
|
queue_fetch_sender(q->send, rd);
|
|
|
|
}
|
|
|
|
#endif
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void queue_wait_w_tmo(struct event_queue *q, struct event *ev, int ticks)
|
|
|
|
{
|
|
|
|
unsigned int timeout = current_tick + ticks;
|
|
|
|
|
|
|
|
while(q->read == q->write && TIME_BEFORE( current_tick, timeout ))
|
|
|
|
{
|
2006-03-29 12:11:28 +00:00
|
|
|
sim_sleep(1);
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if(q->read != q->write)
|
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
|
|
|
|
*ev = q->events[rd];
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
if(q->send && q->send->senders[rd])
|
|
|
|
{
|
|
|
|
/* Get data for a waiting thread if one */
|
|
|
|
queue_fetch_sender(q->send, rd);
|
|
|
|
}
|
|
|
|
#endif
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ev->id = SYS_TIMEOUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-19 16:50:07 +00:00
|
|
|
void queue_post(struct event_queue *q, long id, intptr_t data)
|
2006-01-09 11:22:36 +00:00
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
int oldlevel = set_irq_level(15<<4);
|
|
|
|
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
|
|
|
|
|
|
|
|
q->events[wr].id = id;
|
|
|
|
q->events[wr].data = data;
|
2006-01-09 11:22:36 +00:00
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
if(q->send)
|
|
|
|
{
|
2007-03-21 23:33:49 +00:00
|
|
|
struct thread_entry **spp = &q->send->senders[wr];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
if(*spp)
|
|
|
|
{
|
|
|
|
/* overflow protect - unblock any thread waiting at this index */
|
2006-12-19 16:50:07 +00:00
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
2006-12-19 16:50:07 +00:00
|
|
|
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
2006-12-16 18:35:12 +00:00
|
|
|
{
|
|
|
|
int oldlevel = set_irq_level(15<<4);
|
|
|
|
unsigned int wr = q->write++ & QUEUE_LENGTH_MASK;
|
2006-01-09 11:22:36 +00:00
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
q->events[wr].id = id;
|
2006-01-09 11:22:36 +00:00
|
|
|
q->events[wr].data = data;
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
if(q->send)
|
|
|
|
{
|
2007-03-21 23:33:49 +00:00
|
|
|
struct thread_entry **spp = &q->send->senders[wr];
|
|
|
|
struct thread_entry sender;
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
if(*spp)
|
|
|
|
{
|
|
|
|
/* overflow protect - unblock any thread waiting at this index */
|
2006-12-19 16:50:07 +00:00
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*spp = &sender;
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
while (*spp != NULL)
|
|
|
|
{
|
|
|
|
switch_thread(true, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return sender.retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Function as queue_post if sending is not enabled */
|
2006-01-09 11:22:36 +00:00
|
|
|
set_irq_level(oldlevel);
|
2006-12-19 16:50:07 +00:00
|
|
|
return 0;
|
2006-12-16 18:35:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if 0 /* not used now but probably will be later */
|
|
|
|
/* Query if the last message dequeued was added by queue_send or not */
|
|
|
|
bool queue_in_queue_send(struct event_queue *q)
|
|
|
|
{
|
|
|
|
return q->send && q->send->curr_sender;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Replies with retval to any dequeued message sent with queue_send */
|
2006-12-19 16:50:07 +00:00
|
|
|
void queue_reply(struct event_queue *q, intptr_t retval)
|
2006-12-16 18:35:12 +00:00
|
|
|
{
|
|
|
|
if(q->send && q->send->curr_sender)
|
|
|
|
{
|
|
|
|
queue_release_sender(&q->send->curr_sender, retval);
|
|
|
|
}
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
2006-12-16 18:35:12 +00:00
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
2006-01-09 11:22:36 +00:00
|
|
|
|
|
|
|
bool queue_empty(const struct event_queue* q)
|
|
|
|
{
|
|
|
|
return ( q->read == q->write );
|
|
|
|
}
|
|
|
|
|
|
|
|
void queue_clear(struct event_queue* q)
|
|
|
|
{
|
|
|
|
/* fixme: This is potentially unsafe in case we do interrupt-like processing */
|
2006-12-16 18:35:12 +00:00
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
/* Release all thread waiting in the queue for a reply -
|
|
|
|
dequeued sent message will be handled by owning thread */
|
|
|
|
queue_release_all_senders(q);
|
|
|
|
#endif
|
2006-01-09 11:22:36 +00:00
|
|
|
q->read = 0;
|
|
|
|
q->write = 0;
|
|
|
|
}
|
|
|
|
|
2006-11-03 10:12:15 +00:00
|
|
|
void queue_remove_from_head(struct event_queue *q, long id)
|
|
|
|
{
|
|
|
|
int oldlevel = set_irq_level(15<<4);
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
while(q->read != q->write)
|
2006-11-03 10:12:15 +00:00
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
unsigned int rd = q->read & QUEUE_LENGTH_MASK;
|
|
|
|
|
|
|
|
if(q->events[rd].id != id)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
if(q->send)
|
|
|
|
{
|
2007-03-21 23:33:49 +00:00
|
|
|
struct thread_entry **spp = &q->send->senders[rd];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
if(*spp)
|
|
|
|
{
|
|
|
|
/* Release any thread waiting on this message */
|
2006-12-19 16:50:07 +00:00
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2006-11-03 10:12:15 +00:00
|
|
|
q->read++;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
}
|
|
|
|
|
2007-04-14 09:47:47 +00:00
|
|
|
int queue_count(const struct event_queue *q)
|
|
|
|
{
|
|
|
|
return q->write - q->read;
|
|
|
|
}
|
|
|
|
|
2006-09-16 16:18:11 +00:00
|
|
|
void switch_thread(bool save_context, struct thread_entry **blocked_list)
|
2006-01-09 11:22:36 +00:00
|
|
|
{
|
2006-09-16 16:18:11 +00:00
|
|
|
(void)save_context;
|
|
|
|
(void)blocked_list;
|
|
|
|
|
2006-01-09 11:22:36 +00:00
|
|
|
yield ();
|
|
|
|
}
|
|
|
|
|
|
|
|
void sim_tick_tasks(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Run through the list of tick tasks */
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
|
|
|
{
|
|
|
|
if(tick_funcs[i])
|
|
|
|
{
|
|
|
|
tick_funcs[i]();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int tick_add_task(void (*f)(void))
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Add a task if there is room */
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
|
|
|
{
|
|
|
|
if(tick_funcs[i] == NULL)
|
|
|
|
{
|
|
|
|
tick_funcs[i] = f;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUGF("Error! tick_add_task(): out of tasks");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tick_remove_task(void (*f)(void))
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Remove a task if it is there */
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
|
|
|
{
|
|
|
|
if(tick_funcs[i] == f)
|
|
|
|
{
|
|
|
|
tick_funcs[i] = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2006-04-12 15:38:56 +00:00
|
|
|
/* Very simple mutex simulation - won't work with pre-emptive
|
|
|
|
multitasking, but is better than nothing at all */
|
2006-01-09 11:22:36 +00:00
|
|
|
void mutex_init(struct mutex *m)
|
|
|
|
{
|
2006-04-12 15:38:56 +00:00
|
|
|
m->locked = false;
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void mutex_lock(struct mutex *m)
|
|
|
|
{
|
2006-04-12 15:38:56 +00:00
|
|
|
while(m->locked)
|
2006-09-16 16:18:11 +00:00
|
|
|
switch_thread(true, NULL);
|
2006-04-12 15:38:56 +00:00
|
|
|
m->locked = true;
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void mutex_unlock(struct mutex *m)
|
|
|
|
{
|
2006-04-12 15:38:56 +00:00
|
|
|
m->locked = false;
|
2006-01-09 11:22:36 +00:00
|
|
|
}
|
2007-03-09 08:03:18 +00:00
|
|
|
|
|
|
|
void spinlock_lock(struct mutex *m)
|
|
|
|
{
|
|
|
|
while(m->locked)
|
|
|
|
switch_thread(true, NULL);
|
|
|
|
m->locked = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void spinlock_unlock(struct mutex *m)
|
|
|
|
{
|
|
|
|
m->locked = false;
|
|
|
|
}
|