2002-04-21 22:06:12 +00:00
|
|
|
|
/***************************************************************************
|
|
|
|
|
* __________ __ ___.
|
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
|
* $Id$
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2002 by Bj<EFBFBD>rn Stenberg
|
|
|
|
|
*
|
|
|
|
|
* All files in this archive are subject to the GNU General Public License.
|
|
|
|
|
* See the file COPYING in the source tree root for full license agreement.
|
|
|
|
|
*
|
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
|
* KIND, either express or implied.
|
|
|
|
|
*
|
|
|
|
|
****************************************************************************/
|
2002-05-05 18:34:06 +00:00
|
|
|
|
#include <stdlib.h>
|
2002-06-29 21:30:42 +00:00
|
|
|
|
#include <string.h>
|
2005-03-01 14:33:45 +00:00
|
|
|
|
#include "config.h"
|
2002-04-21 22:06:12 +00:00
|
|
|
|
#include "kernel.h"
|
2002-04-25 00:15:04 +00:00
|
|
|
|
#include "thread.h"
|
2004-10-27 07:07:54 +00:00
|
|
|
|
#include "cpu.h"
|
2002-05-05 18:34:06 +00:00
|
|
|
|
#include "system.h"
|
|
|
|
|
#include "panic.h"
|
2002-04-21 22:06:12 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Make this nonzero to enable more elaborate checks on objects */
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
|
#define KERNEL_OBJECT_CHECKS 1 /* Always 1 for DEBUG */
|
|
|
|
|
#else
|
|
|
|
|
#define KERNEL_OBJECT_CHECKS 0
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if KERNEL_OBJECT_CHECKS
|
|
|
|
|
#define KERNEL_ASSERT(exp, msg...) \
|
|
|
|
|
({ if (!({ exp; })) panicf(msg); })
|
|
|
|
|
#else
|
|
|
|
|
#define KERNEL_ASSERT(exp, msg...) ({})
|
|
|
|
|
#endif
|
|
|
|
|
|
2008-02-08 02:20:05 +00:00
|
|
|
|
#if !defined(CPU_PP) || !defined(BOOTLOADER)
|
2007-06-25 20:46:54 +00:00
|
|
|
|
volatile long current_tick NOCACHEDATA_ATTR = 0;
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#endif
|
2002-04-21 22:06:12 +00:00
|
|
|
|
|
2007-07-06 21:36:32 +00:00
|
|
|
|
void (*tick_funcs[MAX_NUM_TICK_TASKS])(void);
|
2002-06-04 12:25:53 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
extern struct core_entry cores[NUM_CORES];
|
|
|
|
|
|
2002-06-29 21:19:55 +00:00
|
|
|
|
/* This array holds all queues that are initiated. It is used for broadcast. */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
static struct
|
|
|
|
|
{
|
|
|
|
|
int count;
|
|
|
|
|
struct event_queue *queues[MAX_NUM_QUEUES];
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
struct corelock cl;
|
|
|
|
|
#endif
|
|
|
|
|
} all_queues NOCACHEBSS_ATTR;
|
2002-06-29 21:19:55 +00:00
|
|
|
|
|
2002-05-05 18:34:06 +00:00
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Standard kernel stuff
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
void kernel_init(void)
|
|
|
|
|
{
|
2002-06-07 14:56:10 +00:00
|
|
|
|
/* Init the threading API */
|
2007-10-05 23:24:46 +00:00
|
|
|
|
init_threads();
|
|
|
|
|
|
|
|
|
|
/* Other processors will not reach this point in a multicore build.
|
|
|
|
|
* In a single-core build with multiple cores they fall-through and
|
|
|
|
|
* sleep in cop_main without returning. */
|
|
|
|
|
if (CURRENT_CORE == CPU)
|
2007-03-04 20:06:41 +00:00
|
|
|
|
{
|
2007-10-05 23:24:46 +00:00
|
|
|
|
memset(tick_funcs, 0, sizeof(tick_funcs));
|
2007-10-16 01:25:17 +00:00
|
|
|
|
memset(&all_queues, 0, sizeof(all_queues));
|
|
|
|
|
corelock_init(&all_queues.cl);
|
2007-09-28 10:20:02 +00:00
|
|
|
|
tick_start(1000/HZ);
|
2007-03-04 20:06:41 +00:00
|
|
|
|
}
|
2002-05-05 18:34:06 +00:00
|
|
|
|
}
|
|
|
|
|
|
2002-04-21 22:06:12 +00:00
|
|
|
|
void sleep(int ticks)
|
|
|
|
|
{
|
2006-08-12 08:27:48 +00:00
|
|
|
|
#if CONFIG_CPU == S3C2440 && defined(BOOTLOADER)
|
2006-12-29 02:49:12 +00:00
|
|
|
|
volatile int counter;
|
2006-08-12 08:27:48 +00:00
|
|
|
|
TCON &= ~(1 << 20); // stop timer 4
|
|
|
|
|
// TODO: this constant depends on dividers settings inherited from
|
|
|
|
|
// firmware. Set them explicitly somwhere.
|
|
|
|
|
TCNTB4 = 12193 * ticks / HZ;
|
|
|
|
|
TCON |= 1 << 21; // set manual bit
|
|
|
|
|
TCON &= ~(1 << 21); // reset manual bit
|
|
|
|
|
TCON &= ~(1 << 22); //autoreload Off
|
|
|
|
|
TCON |= (1 << 20); // start timer 4
|
|
|
|
|
do {
|
|
|
|
|
counter = TCNTO4;
|
|
|
|
|
} while(counter > 0);
|
|
|
|
|
|
2007-03-12 22:12:20 +00:00
|
|
|
|
#elif defined(CPU_PP) && defined(BOOTLOADER)
|
|
|
|
|
unsigned stop = USEC_TIMER + ticks * (1000000/HZ);
|
|
|
|
|
while (TIME_BEFORE(USEC_TIMER, stop))
|
2007-10-16 01:25:17 +00:00
|
|
|
|
switch_thread(NULL);
|
2006-08-12 08:27:48 +00:00
|
|
|
|
#else
|
2006-09-16 16:18:11 +00:00
|
|
|
|
sleep_thread(ticks);
|
2006-08-12 08:27:48 +00:00
|
|
|
|
#endif
|
2002-04-21 22:06:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void yield(void)
|
|
|
|
|
{
|
2008-02-08 02:20:05 +00:00
|
|
|
|
#if ((CONFIG_CPU == S3C2440 || defined(ELIO_TPJ1022)) && defined(BOOTLOADER))
|
2006-08-31 19:19:35 +00:00
|
|
|
|
/* Some targets don't like yielding in the bootloader */
|
2006-08-12 08:27:48 +00:00
|
|
|
|
#else
|
2007-10-16 01:25:17 +00:00
|
|
|
|
switch_thread(NULL);
|
2006-08-12 08:27:48 +00:00
|
|
|
|
#endif
|
2002-04-21 22:06:12 +00:00
|
|
|
|
}
|
2002-04-29 14:25:44 +00:00
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Queue handling stuff
|
|
|
|
|
****************************************************************************/
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
/* Moves waiting thread's descriptor to the current sender when a
|
|
|
|
|
message is dequeued */
|
|
|
|
|
static void queue_fetch_sender(struct queue_sender_list *send,
|
|
|
|
|
unsigned int i)
|
|
|
|
|
{
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry **spp = &send->senders[i];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
if(*spp)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
send->curr_sender = *spp;
|
|
|
|
|
*spp = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Puts the specified return value in the waiting thread's return value
|
2007-03-23 01:00:13 +00:00
|
|
|
|
* and wakes the thread.
|
|
|
|
|
* 1) A sender should be confirmed to exist before calling which makes it
|
|
|
|
|
* more efficent to reject the majority of cases that don't need this
|
|
|
|
|
called.
|
|
|
|
|
* 2) Requires interrupts disabled since queue overflows can cause posts
|
|
|
|
|
* from interrupt handlers to wake threads. Not doing so could cause
|
|
|
|
|
* an attempt at multiple wakes or other problems.
|
|
|
|
|
*/
|
2007-03-21 22:58:53 +00:00
|
|
|
|
static void queue_release_sender(struct thread_entry **sender,
|
2006-12-19 16:50:07 +00:00
|
|
|
|
intptr_t retval)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
(*sender)->retval = retval;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
wakeup_thread_no_listlock(sender);
|
2007-03-23 01:00:13 +00:00
|
|
|
|
/* This should _never_ happen - there must never be multiple
|
|
|
|
|
threads in this list and it is a corrupt state */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
KERNEL_ASSERT(*sender == NULL, "queue->send slot ovf: %08X", (int)*sender);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Releases any waiting threads that are queued with queue_send -
|
2007-03-23 01:00:13 +00:00
|
|
|
|
* reply with 0.
|
2007-10-16 01:25:17 +00:00
|
|
|
|
* Disable IRQs and lock before calling since it uses
|
|
|
|
|
* queue_release_sender.
|
2007-03-23 01:00:13 +00:00
|
|
|
|
*/
|
2006-12-16 18:35:12 +00:00
|
|
|
|
static void queue_release_all_senders(struct event_queue *q)
|
|
|
|
|
{
|
|
|
|
|
if(q->send)
|
|
|
|
|
{
|
|
|
|
|
unsigned int i;
|
|
|
|
|
for(i = q->read; i != q->write; i++)
|
|
|
|
|
{
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry **spp =
|
2006-12-16 18:35:12 +00:00
|
|
|
|
&q->send->senders[i & QUEUE_LENGTH_MASK];
|
2007-03-21 22:58:53 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
if(*spp)
|
|
|
|
|
{
|
2006-12-19 16:50:07 +00:00
|
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Enables queue_send on the specified queue - caller allocates the extra
|
2007-10-16 01:25:17 +00:00
|
|
|
|
data structure. Only queues which are taken to be owned by a thread should
|
|
|
|
|
enable this. Public waiting is not permitted. */
|
2006-12-16 18:35:12 +00:00
|
|
|
|
void queue_enable_queue_send(struct event_queue *q,
|
|
|
|
|
struct queue_sender_list *send)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
|
|
|
|
|
q->send = NULL;
|
|
|
|
|
if(send != NULL)
|
|
|
|
|
{
|
|
|
|
|
memset(send, 0, sizeof(*send));
|
|
|
|
|
q->send = send;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
|
|
|
|
set_irq_level(oldlevel);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Queue must not be available for use during this call */
|
2006-09-16 16:18:11 +00:00
|
|
|
|
void queue_init(struct event_queue *q, bool register_queue)
|
2002-04-29 14:25:44 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
|
|
|
|
|
if(register_queue)
|
|
|
|
|
{
|
|
|
|
|
corelock_lock(&all_queues.cl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
corelock_init(&q->cl);
|
|
|
|
|
thread_queue_init(&q->queue);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
q->read = 0;
|
|
|
|
|
q->write = 0;
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
q->send = NULL; /* No message sending by default */
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if(register_queue)
|
2006-09-16 16:18:11 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
if(all_queues.count >= MAX_NUM_QUEUES)
|
|
|
|
|
{
|
|
|
|
|
panicf("queue_init->out of queues");
|
|
|
|
|
}
|
2006-09-16 16:18:11 +00:00
|
|
|
|
/* Add it to the all_queues array */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
all_queues.queues[all_queues.count++] = q;
|
|
|
|
|
corelock_unlock(&all_queues.cl);
|
2006-09-16 16:18:11 +00:00
|
|
|
|
}
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
2002-04-29 14:25:44 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Queue must not be available for use during this call */
|
2006-01-23 10:53:47 +00:00
|
|
|
|
void queue_delete(struct event_queue *q)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
2006-01-23 10:53:47 +00:00
|
|
|
|
int i;
|
2007-05-12 05:20:04 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&all_queues.cl);
|
|
|
|
|
corelock_lock(&q->cl);
|
2007-05-12 05:20:04 +00:00
|
|
|
|
|
2006-01-23 10:53:47 +00:00
|
|
|
|
/* Find the queue to be deleted */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
for(i = 0;i < all_queues.count;i++)
|
2006-01-23 10:53:47 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
if(all_queues.queues[i] == q)
|
2006-01-23 10:59:07 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Move the following queues up in the list */
|
|
|
|
|
all_queues.count--;
|
|
|
|
|
|
|
|
|
|
for(;i < all_queues.count;i++)
|
|
|
|
|
{
|
|
|
|
|
all_queues.queues[i] = all_queues.queues[i+1];
|
|
|
|
|
}
|
|
|
|
|
|
2006-01-23 10:53:47 +00:00
|
|
|
|
break;
|
2006-01-23 10:59:07 +00:00
|
|
|
|
}
|
2006-01-23 10:53:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
corelock_unlock(&all_queues.cl);
|
|
|
|
|
|
|
|
|
|
/* Release threads waiting on queue head */
|
|
|
|
|
thread_queue_wake(&q->queue);
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
/* Release waiting threads for reply and reply to any dequeued
|
|
|
|
|
message waiting for one. */
|
|
|
|
|
queue_release_all_senders(q);
|
|
|
|
|
queue_reply(q, 0);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
q->read = 0;
|
|
|
|
|
q->write = 0;
|
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2007-05-12 05:20:04 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
2006-01-23 10:53:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* NOTE: multiple threads waiting on a queue head cannot have a well-
|
|
|
|
|
defined release order if timeouts are used. If multiple threads must
|
|
|
|
|
access the queue head, use a dispatcher or queue_wait only. */
|
|
|
|
|
void queue_wait(struct event_queue *q, struct queue_event *ev)
|
2002-04-29 14:25:44 +00:00
|
|
|
|
{
|
2007-05-12 05:20:04 +00:00
|
|
|
|
int oldlevel;
|
2006-12-16 18:35:12 +00:00
|
|
|
|
unsigned int rd;
|
|
|
|
|
|
2007-09-30 17:23:13 +00:00
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
corelock_lock(&q->cl);
|
2007-09-30 17:23:13 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if(q->send && q->send->curr_sender)
|
|
|
|
|
{
|
|
|
|
|
/* auto-reply */
|
|
|
|
|
queue_release_sender(&q->send->curr_sender, 0);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2007-03-26 16:55:17 +00:00
|
|
|
|
|
2007-03-21 22:58:53 +00:00
|
|
|
|
if (q->read == q->write)
|
2002-04-29 14:25:44 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE
|
|
|
|
|
#elif CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.cl_p = &q->cl;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
|
|
|
|
cores[core].blk_ops.var_u8v = 0;
|
|
|
|
|
#endif /* CONFIG_CORELOCK */
|
|
|
|
|
block_thread(&q->queue);
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
}
|
|
|
|
|
/* A message that woke us could now be gone */
|
|
|
|
|
while (q->read == q->write);
|
2007-03-21 22:58:53 +00:00
|
|
|
|
}
|
2002-04-29 14:25:44 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
rd = q->read++ & QUEUE_LENGTH_MASK;
|
|
|
|
|
*ev = q->events[rd];
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if(q->send && q->send->senders[rd])
|
|
|
|
|
{
|
|
|
|
|
/* Get data for a waiting thread if one */
|
|
|
|
|
queue_fetch_sender(q->send, rd);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2007-05-12 05:20:04 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
2002-04-29 14:25:44 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
void queue_wait_w_tmo(struct event_queue *q, struct queue_event *ev, int ticks)
|
2002-10-11 08:56:23 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
|
|
|
|
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
2007-09-30 17:23:13 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if (q->send && q->send->curr_sender)
|
|
|
|
|
{
|
|
|
|
|
/* auto-reply */
|
|
|
|
|
queue_release_sender(&q->send->curr_sender, 0);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
2007-03-21 22:58:53 +00:00
|
|
|
|
if (q->read == q->write && ticks > 0)
|
2002-10-11 08:56:23 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE
|
|
|
|
|
#elif CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.cl_p = &q->cl;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
|
|
|
|
cores[core].blk_ops.var_u8v = 0;
|
|
|
|
|
#endif
|
|
|
|
|
block_thread_w_tmo(&q->queue, ticks);
|
2007-05-12 05:20:04 +00:00
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
corelock_lock(&q->cl);
|
2002-10-11 08:56:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* no worry about a removed message here - status is checked inside
|
|
|
|
|
locks - perhaps verify if timeout or false alarm */
|
2007-03-21 22:58:53 +00:00
|
|
|
|
if (q->read != q->write)
|
2002-10-11 08:56:23 +00:00
|
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
|
unsigned int rd = q->read++ & QUEUE_LENGTH_MASK;
|
|
|
|
|
*ev = q->events[rd];
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if(q->send && q->send->senders[rd])
|
|
|
|
|
{
|
|
|
|
|
/* Get data for a waiting thread if one */
|
|
|
|
|
queue_fetch_sender(q->send, rd);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2002-10-11 08:56:23 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
ev->id = SYS_TIMEOUT;
|
|
|
|
|
}
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2007-05-12 05:20:04 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
2002-10-11 08:56:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
2006-12-19 16:50:07 +00:00
|
|
|
|
void queue_post(struct event_queue *q, long id, intptr_t data)
|
2002-04-29 14:25:44 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
2007-03-26 16:55:17 +00:00
|
|
|
|
unsigned int wr;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
|
2007-03-26 16:55:17 +00:00
|
|
|
|
wr = q->write++ & QUEUE_LENGTH_MASK;
|
2002-05-08 22:07:41 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
q->events[wr].id = id;
|
|
|
|
|
q->events[wr].data = data;
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if(q->send)
|
|
|
|
|
{
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry **spp = &q->send->senders[wr];
|
2002-04-29 14:25:44 +00:00
|
|
|
|
|
2007-03-21 22:58:53 +00:00
|
|
|
|
if (*spp)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
/* overflow protect - unblock any thread waiting at this index */
|
2006-12-19 16:50:07 +00:00
|
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Wakeup a waiting thread if any */
|
|
|
|
|
wakeup_thread(&q->queue);
|
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* IRQ handlers are not allowed use of this function - we only aim to
|
|
|
|
|
protect the queue integrity by turning them off. */
|
2006-12-19 16:50:07 +00:00
|
|
|
|
intptr_t queue_send(struct event_queue *q, long id, intptr_t data)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
2007-03-26 16:55:17 +00:00
|
|
|
|
unsigned int wr;
|
2007-04-14 09:47:47 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
|
2007-03-26 16:55:17 +00:00
|
|
|
|
wr = q->write++ & QUEUE_LENGTH_MASK;
|
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
q->events[wr].id = id;
|
2002-04-29 14:25:44 +00:00
|
|
|
|
q->events[wr].data = data;
|
2007-03-21 22:58:53 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
if(q->send)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry **spp = &q->send->senders[wr];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
if(*spp)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
/* overflow protect - unblock any thread waiting at this index */
|
2006-12-19 16:50:07 +00:00
|
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Wakeup a waiting thread if any */
|
|
|
|
|
wakeup_thread(&q->queue);
|
|
|
|
|
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE
|
|
|
|
|
#elif CONFIG_CORELOCK == SW_CORELOCK
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.cl_p = &q->cl;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
2008-01-19 13:27:47 +00:00
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
cores[core].blk_ops.var_u8p = &q->cl.locked;
|
|
|
|
|
cores[core].blk_ops.var_u8v = 0;
|
|
|
|
|
#endif
|
|
|
|
|
block_thread_no_listlock(spp);
|
|
|
|
|
return cores[core].running->retval;
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Function as queue_post if sending is not enabled */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
wakeup_thread(&q->queue);
|
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2002-05-08 22:07:41 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
2007-03-21 22:58:53 +00:00
|
|
|
|
|
2006-12-19 16:50:07 +00:00
|
|
|
|
return 0;
|
2002-04-29 14:25:44 +00:00
|
|
|
|
}
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
#if 0 /* not used now but probably will be later */
|
|
|
|
|
/* Query if the last message dequeued was added by queue_send or not */
|
|
|
|
|
bool queue_in_queue_send(struct event_queue *q)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
bool in_send;
|
|
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
in_send = q->send && q->send->curr_sender;
|
|
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
corelock_unlock(&q->cl);
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return in_send;
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Replies with retval to the last dequeued message sent with queue_send */
|
2006-12-19 16:50:07 +00:00
|
|
|
|
void queue_reply(struct event_queue *q, intptr_t retval)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
if(q->send && q->send->curr_sender)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
/* Double-check locking */
|
|
|
|
|
if(q->send && q->send->curr_sender)
|
|
|
|
|
{
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
queue_release_sender(&q->send->curr_sender, retval);
|
|
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
}
|
|
|
|
|
corelock_unlock(&q->cl);
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
#endif
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif /* HAVE_EXTENDED_MESSAGING_AND_NAME */
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Poll queue to see if a message exists - careful in using the result if
|
|
|
|
|
* queue_remove_from_head is called when messages are posted - possibly use
|
|
|
|
|
* queue_wait_w_tmo(&q, 0) in that case or else a removed message that
|
|
|
|
|
* unsignals the queue may cause an unwanted block */
|
2004-08-16 23:37:23 +00:00
|
|
|
|
bool queue_empty(const struct event_queue* q)
|
2002-05-23 09:22:07 +00:00
|
|
|
|
{
|
2007-09-28 10:20:02 +00:00
|
|
|
|
return ( q->read == q->write );
|
2002-05-23 09:22:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-27 18:08:18 +00:00
|
|
|
|
bool queue_peek(struct event_queue *q, struct queue_event *ev)
|
|
|
|
|
{
|
|
|
|
|
if (q->read == q->write)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
bool have_msg = false;
|
|
|
|
|
|
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
|
|
|
|
|
|
|
|
|
if (q->read != q->write)
|
|
|
|
|
{
|
|
|
|
|
*ev = q->events[q->read & QUEUE_LENGTH_MASK];
|
|
|
|
|
have_msg = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
|
|
|
|
|
return have_msg;
|
|
|
|
|
}
|
|
|
|
|
|
2004-09-01 06:24:05 +00:00
|
|
|
|
void queue_clear(struct event_queue* q)
|
2004-09-01 06:20:21 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
|
|
|
|
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Release all threads waiting in the queue for a reply -
|
2006-12-16 18:35:12 +00:00
|
|
|
|
dequeued sent message will be handled by owning thread */
|
|
|
|
|
queue_release_all_senders(q);
|
|
|
|
|
#endif
|
|
|
|
|
|
2004-09-01 06:20:21 +00:00
|
|
|
|
q->read = 0;
|
|
|
|
|
q->write = 0;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2004-09-01 06:24:05 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
2004-09-01 06:20:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
2006-10-19 11:43:13 +00:00
|
|
|
|
void queue_remove_from_head(struct event_queue *q, long id)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
int oldlevel;
|
|
|
|
|
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&q->cl);
|
2007-03-21 22:58:53 +00:00
|
|
|
|
|
2006-12-16 18:35:12 +00:00
|
|
|
|
while(q->read != q->write)
|
2006-10-19 11:43:13 +00:00
|
|
|
|
{
|
2006-12-16 18:35:12 +00:00
|
|
|
|
unsigned int rd = q->read & QUEUE_LENGTH_MASK;
|
|
|
|
|
|
|
|
|
|
if(q->events[rd].id != id)
|
|
|
|
|
{
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_EXTENDED_MESSAGING_AND_NAME
|
|
|
|
|
if(q->send)
|
|
|
|
|
{
|
2007-03-21 22:58:53 +00:00
|
|
|
|
struct thread_entry **spp = &q->send->senders[rd];
|
2006-12-16 18:35:12 +00:00
|
|
|
|
|
2007-03-21 22:58:53 +00:00
|
|
|
|
if (*spp)
|
2006-12-16 18:35:12 +00:00
|
|
|
|
{
|
|
|
|
|
/* Release any thread waiting on this message */
|
2006-12-19 16:50:07 +00:00
|
|
|
|
queue_release_sender(spp, 0);
|
2006-12-16 18:35:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2006-10-19 11:43:13 +00:00
|
|
|
|
q->read++;
|
|
|
|
|
}
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
corelock_unlock(&q->cl);
|
2006-10-19 11:43:13 +00:00
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
}
|
|
|
|
|
|
2007-03-11 10:52:36 +00:00
|
|
|
|
/**
|
|
|
|
|
* The number of events waiting in the queue.
|
|
|
|
|
*
|
|
|
|
|
* @param struct of event_queue
|
|
|
|
|
* @return number of events in the queue
|
|
|
|
|
*/
|
|
|
|
|
int queue_count(const struct event_queue *q)
|
|
|
|
|
{
|
2007-09-28 10:20:02 +00:00
|
|
|
|
return q->write - q->read;
|
2007-03-11 10:52:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2006-12-19 16:50:07 +00:00
|
|
|
|
int queue_broadcast(long id, intptr_t data)
|
2002-06-29 21:19:55 +00:00
|
|
|
|
{
|
2007-03-21 22:58:53 +00:00
|
|
|
|
int i;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
corelock_lock(&all_queues.cl);
|
|
|
|
|
#endif
|
2007-03-21 22:58:53 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
for(i = 0;i < all_queues.count;i++)
|
2007-03-21 22:58:53 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
queue_post(all_queues.queues[i], id, data);
|
2007-03-21 22:58:53 +00:00
|
|
|
|
}
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
corelock_unlock(&all_queues.cl);
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
#endif
|
2002-06-29 21:19:55 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
return i;
|
2002-06-29 21:19:55 +00:00
|
|
|
|
}
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Timer tick
|
|
|
|
|
****************************************************************************/
|
2004-10-27 07:07:54 +00:00
|
|
|
|
#if CONFIG_CPU == SH7034
|
2005-03-01 14:33:45 +00:00
|
|
|
|
void tick_start(unsigned int interval_in_ms)
|
2002-05-05 18:34:06 +00:00
|
|
|
|
{
|
2005-10-03 09:24:36 +00:00
|
|
|
|
unsigned long count;
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
count = CPU_FREQ * interval_in_ms / 1000 / 8;
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
if(count > 0x10000)
|
2002-05-05 18:34:06 +00:00
|
|
|
|
{
|
|
|
|
|
panicf("Error! The tick interval is too long (%d ms)\n",
|
|
|
|
|
interval_in_ms);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We are using timer 0 */
|
|
|
|
|
|
|
|
|
|
TSTR &= ~0x01; /* Stop the timer */
|
|
|
|
|
TSNC &= ~0x01; /* No synchronization */
|
|
|
|
|
TMDR &= ~0x01; /* Operate normally */
|
|
|
|
|
|
|
|
|
|
TCNT0 = 0; /* Start counting at 0 */
|
2005-10-03 09:24:36 +00:00
|
|
|
|
GRA0 = (unsigned short)(count - 1);
|
2002-05-05 18:34:06 +00:00
|
|
|
|
TCR0 = 0x23; /* Clear at GRA match, sysclock/8 */
|
|
|
|
|
|
|
|
|
|
/* Enable interrupt on level 1 */
|
|
|
|
|
IPRC = (IPRC & ~0x00f0) | 0x0010;
|
|
|
|
|
|
|
|
|
|
TSR0 &= ~0x01;
|
|
|
|
|
TIER0 = 0xf9; /* Enable GRA match interrupt */
|
|
|
|
|
|
|
|
|
|
TSTR |= 0x01; /* Start timer 1 */
|
|
|
|
|
}
|
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
void IMIA0(void) __attribute__ ((interrupt_handler));
|
2002-05-05 18:34:06 +00:00
|
|
|
|
void IMIA0(void)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Run through the list of tick tasks */
|
2002-06-04 12:47:39 +00:00
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
2002-05-05 18:34:06 +00:00
|
|
|
|
{
|
|
|
|
|
if(tick_funcs[i])
|
|
|
|
|
{
|
|
|
|
|
tick_funcs[i]();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
current_tick++;
|
|
|
|
|
|
|
|
|
|
TSR0 &= ~0x01;
|
|
|
|
|
}
|
2005-07-18 12:40:29 +00:00
|
|
|
|
#elif defined(CPU_COLDFIRE)
|
2005-03-01 14:33:45 +00:00
|
|
|
|
void tick_start(unsigned int interval_in_ms)
|
2004-10-27 07:07:54 +00:00
|
|
|
|
{
|
2005-10-03 09:24:36 +00:00
|
|
|
|
unsigned long count;
|
|
|
|
|
int prescale;
|
2004-10-27 07:07:54 +00:00
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
count = CPU_FREQ/2 * interval_in_ms / 1000 / 16;
|
2004-10-27 07:07:54 +00:00
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
if(count > 0x10000)
|
2004-10-27 07:07:54 +00:00
|
|
|
|
{
|
|
|
|
|
panicf("Error! The tick interval is too long (%d ms)\n",
|
|
|
|
|
interval_in_ms);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2005-10-03 09:24:36 +00:00
|
|
|
|
|
|
|
|
|
prescale = cpu_frequency / CPU_FREQ;
|
|
|
|
|
/* Note: The prescaler is later adjusted on-the-fly on CPU frequency
|
|
|
|
|
changes within timer.c */
|
2004-10-27 07:07:54 +00:00
|
|
|
|
|
|
|
|
|
/* We are using timer 0 */
|
|
|
|
|
|
2005-10-03 09:24:36 +00:00
|
|
|
|
TRR0 = (unsigned short)(count - 1); /* The reference count */
|
2004-10-27 07:07:54 +00:00
|
|
|
|
TCN0 = 0; /* reset the timer */
|
2005-10-03 09:24:36 +00:00
|
|
|
|
TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8);
|
|
|
|
|
/* restart, CLK/16, enabled, prescaler */
|
2004-10-27 07:07:54 +00:00
|
|
|
|
|
|
|
|
|
TER0 = 0xff; /* Clear all events */
|
|
|
|
|
|
2005-11-05 03:28:20 +00:00
|
|
|
|
ICR1 = 0x8c; /* Interrupt on level 3.0 */
|
2004-10-27 07:07:54 +00:00
|
|
|
|
IMR &= ~0x200;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TIMER0(void) __attribute__ ((interrupt_handler));
|
|
|
|
|
void TIMER0(void)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Run through the list of tick tasks */
|
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
|
|
|
|
{
|
|
|
|
|
if(tick_funcs[i])
|
|
|
|
|
{
|
|
|
|
|
tick_funcs[i]();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
current_tick++;
|
|
|
|
|
|
|
|
|
|
TER0 = 0xff; /* Clear all events */
|
|
|
|
|
}
|
2005-01-09 23:24:02 +00:00
|
|
|
|
|
2006-08-02 09:46:51 +00:00
|
|
|
|
#elif defined(CPU_PP)
|
2005-11-07 23:07:19 +00:00
|
|
|
|
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#ifndef BOOTLOADER
|
2005-12-12 13:53:22 +00:00
|
|
|
|
void TIMER1(void)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Run through the list of tick tasks (using main core) */
|
2006-01-24 22:31:57 +00:00
|
|
|
|
TIMER1_VAL; /* Read value to ack IRQ */
|
2007-09-28 10:20:02 +00:00
|
|
|
|
|
2007-10-05 23:24:46 +00:00
|
|
|
|
/* Run through the list of tick tasks using main CPU core -
|
|
|
|
|
wake up the COP through its control interface to provide pulse */
|
2007-09-28 10:20:02 +00:00
|
|
|
|
for (i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
2005-12-12 13:53:22 +00:00
|
|
|
|
{
|
2007-09-28 10:20:02 +00:00
|
|
|
|
if (tick_funcs[i])
|
2005-12-12 13:53:22 +00:00
|
|
|
|
{
|
2007-09-28 10:20:02 +00:00
|
|
|
|
tick_funcs[i]();
|
2005-12-12 13:53:22 +00:00
|
|
|
|
}
|
2007-03-04 20:06:41 +00:00
|
|
|
|
}
|
2007-09-28 10:20:02 +00:00
|
|
|
|
|
2007-10-05 23:24:46 +00:00
|
|
|
|
#if NUM_CORES > 1
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* Pulse the COP */
|
|
|
|
|
core_wake(COP);
|
2007-10-05 23:24:46 +00:00
|
|
|
|
#endif /* NUM_CORES */
|
|
|
|
|
|
2007-09-28 10:20:02 +00:00
|
|
|
|
current_tick++;
|
2005-12-12 13:53:22 +00:00
|
|
|
|
}
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#endif
|
2005-12-12 13:53:22 +00:00
|
|
|
|
|
2007-09-28 10:20:02 +00:00
|
|
|
|
/* Must be last function called init kernel/thread initialization */
|
2005-12-12 13:53:22 +00:00
|
|
|
|
void tick_start(unsigned int interval_in_ms)
|
|
|
|
|
{
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#ifndef BOOTLOADER
|
2007-10-05 23:24:46 +00:00
|
|
|
|
TIMER1_CFG = 0x0;
|
|
|
|
|
TIMER1_VAL;
|
|
|
|
|
/* enable timer */
|
|
|
|
|
TIMER1_CFG = 0xc0000000 | (interval_in_ms*1000 - 1);
|
|
|
|
|
/* unmask interrupt source */
|
|
|
|
|
CPU_INT_EN = TIMER1_MASK;
|
2006-01-05 17:02:48 +00:00
|
|
|
|
#else
|
|
|
|
|
/* We don't enable interrupts in the bootloader */
|
|
|
|
|
(void)interval_in_ms;
|
|
|
|
|
#endif
|
2005-11-07 23:07:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
2006-01-12 00:35:50 +00:00
|
|
|
|
#elif CONFIG_CPU == PNX0101
|
|
|
|
|
|
|
|
|
|
void timer_handler(void)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Run through the list of tick tasks */
|
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
|
|
|
|
{
|
|
|
|
|
if(tick_funcs[i])
|
|
|
|
|
tick_funcs[i]();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
current_tick++;
|
|
|
|
|
|
2007-03-24 19:26:13 +00:00
|
|
|
|
TIMER0.clr = 0;
|
2006-01-12 00:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void tick_start(unsigned int interval_in_ms)
|
|
|
|
|
{
|
2007-03-24 19:26:13 +00:00
|
|
|
|
TIMER0.ctrl &= ~0x80; /* Disable the counter */
|
|
|
|
|
TIMER0.ctrl |= 0x40; /* Reload after counting down to zero */
|
|
|
|
|
TIMER0.load = 3000000 * interval_in_ms / 1000;
|
|
|
|
|
TIMER0.ctrl &= ~0xc; /* No prescaler */
|
|
|
|
|
TIMER0.clr = 1; /* Clear the interrupt request */
|
|
|
|
|
|
|
|
|
|
irq_set_int_handler(IRQ_TIMER0, timer_handler);
|
|
|
|
|
irq_enable_int(IRQ_TIMER0);
|
|
|
|
|
|
|
|
|
|
TIMER0.ctrl |= 0x80; /* Enable the counter */
|
2006-01-12 00:35:50 +00:00
|
|
|
|
}
|
2004-10-27 07:07:54 +00:00
|
|
|
|
#endif
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
|
|
|
|
int tick_add_task(void (*f)(void))
|
|
|
|
|
{
|
|
|
|
|
int i;
|
2004-03-02 11:32:59 +00:00
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
|
|
|
|
/* Add a task if there is room */
|
2002-06-04 12:47:39 +00:00
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
2002-05-05 18:34:06 +00:00
|
|
|
|
{
|
|
|
|
|
if(tick_funcs[i] == NULL)
|
|
|
|
|
{
|
|
|
|
|
tick_funcs[i] = f;
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
set_irq_level(oldlevel);
|
2002-06-29 21:30:42 +00:00
|
|
|
|
panicf("Error! tick_add_task(): out of tasks");
|
2002-05-05 18:34:06 +00:00
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int tick_remove_task(void (*f)(void))
|
|
|
|
|
{
|
|
|
|
|
int i;
|
2004-03-02 11:32:59 +00:00
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
2002-05-05 18:34:06 +00:00
|
|
|
|
|
|
|
|
|
/* Remove a task if it is there */
|
2002-06-04 12:47:39 +00:00
|
|
|
|
for(i = 0;i < MAX_NUM_TICK_TASKS;i++)
|
2002-05-05 18:34:06 +00:00
|
|
|
|
{
|
|
|
|
|
if(tick_funcs[i] == f)
|
|
|
|
|
{
|
|
|
|
|
tick_funcs[i] = NULL;
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2002-05-16 20:57:32 +00:00
|
|
|
|
|
2007-07-29 04:49:19 +00:00
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Tick-based interval timers/one-shots - be mindful this is not really
|
|
|
|
|
* intended for continuous timers but for events that need to run for a short
|
|
|
|
|
* time and be cancelled without further software intervention.
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
#ifdef INCLUDE_TIMEOUT_API
|
|
|
|
|
static struct timeout *tmo_list = NULL; /* list of active timeout events */
|
|
|
|
|
|
|
|
|
|
/* timeout tick task - calls event handlers when they expire
|
|
|
|
|
* Event handlers may alter ticks, callback and data during operation.
|
|
|
|
|
*/
|
|
|
|
|
static void timeout_tick(void)
|
|
|
|
|
{
|
|
|
|
|
unsigned long tick = current_tick;
|
|
|
|
|
struct timeout *curr, *next;
|
|
|
|
|
|
|
|
|
|
for (curr = tmo_list; curr != NULL; curr = next)
|
|
|
|
|
{
|
|
|
|
|
next = (struct timeout *)curr->next;
|
|
|
|
|
|
|
|
|
|
if (TIME_BEFORE(tick, curr->expires))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* this event has expired - call callback */
|
|
|
|
|
if (curr->callback(curr))
|
|
|
|
|
*(long *)&curr->expires = tick + curr->ticks; /* reload */
|
|
|
|
|
else
|
|
|
|
|
timeout_cancel(curr); /* cancel */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Cancels a timeout callback - can be called from the ISR */
|
|
|
|
|
void timeout_cancel(struct timeout *tmo)
|
|
|
|
|
{
|
|
|
|
|
int oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
|
|
|
|
|
if (tmo_list != NULL)
|
|
|
|
|
{
|
|
|
|
|
struct timeout *curr = tmo_list;
|
|
|
|
|
struct timeout *prev = NULL;
|
|
|
|
|
|
|
|
|
|
while (curr != tmo && curr != NULL)
|
|
|
|
|
{
|
|
|
|
|
prev = curr;
|
|
|
|
|
curr = (struct timeout *)curr->next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (curr != NULL)
|
|
|
|
|
{
|
|
|
|
|
/* in list */
|
|
|
|
|
if (prev == NULL)
|
|
|
|
|
tmo_list = (struct timeout *)curr->next;
|
|
|
|
|
else
|
|
|
|
|
*(const struct timeout **)&prev->next = curr->next;
|
|
|
|
|
|
|
|
|
|
if (tmo_list == NULL)
|
|
|
|
|
tick_remove_task(timeout_tick); /* last one - remove task */
|
|
|
|
|
}
|
|
|
|
|
/* not in list or tmo == NULL */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Adds a timeout callback - calling with an active timeout resets the
|
|
|
|
|
interval - can be called from the ISR */
|
|
|
|
|
void timeout_register(struct timeout *tmo, timeout_cb_type callback,
|
|
|
|
|
int ticks, intptr_t data)
|
|
|
|
|
{
|
|
|
|
|
int oldlevel;
|
|
|
|
|
struct timeout *curr;
|
|
|
|
|
|
|
|
|
|
if (tmo == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
oldlevel = set_irq_level(HIGHEST_IRQ_LEVEL);
|
|
|
|
|
|
|
|
|
|
/* see if this one is already registered */
|
|
|
|
|
curr = tmo_list;
|
|
|
|
|
while (curr != tmo && curr != NULL)
|
|
|
|
|
curr = (struct timeout *)curr->next;
|
|
|
|
|
|
|
|
|
|
if (curr == NULL)
|
|
|
|
|
{
|
|
|
|
|
/* not found - add it */
|
|
|
|
|
if (tmo_list == NULL)
|
|
|
|
|
tick_add_task(timeout_tick); /* first one - add task */
|
|
|
|
|
|
|
|
|
|
*(struct timeout **)&tmo->next = tmo_list;
|
|
|
|
|
tmo_list = tmo;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tmo->callback = callback;
|
|
|
|
|
tmo->ticks = ticks;
|
|
|
|
|
tmo->data = data;
|
|
|
|
|
*(long *)&tmo->expires = current_tick + ticks;
|
|
|
|
|
|
|
|
|
|
set_irq_level(oldlevel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* INCLUDE_TIMEOUT_API */
|
|
|
|
|
|
2002-05-16 20:57:32 +00:00
|
|
|
|
/****************************************************************************
|
2007-10-16 01:25:17 +00:00
|
|
|
|
* Simple mutex functions ;)
|
2002-05-16 20:57:32 +00:00
|
|
|
|
****************************************************************************/
|
|
|
|
|
void mutex_init(struct mutex *m)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
m->queue = NULL;
|
2006-09-16 16:18:11 +00:00
|
|
|
|
m->thread = NULL;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
m->count = 0;
|
|
|
|
|
m->locked = 0;
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_init(&m->cl);
|
|
|
|
|
#endif
|
2002-05-16 20:57:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-03-04 20:06:41 +00:00
|
|
|
|
void mutex_lock(struct mutex *m)
|
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
|
|
|
|
struct thread_entry *const thread = cores[core].running;
|
|
|
|
|
|
|
|
|
|
if(thread == m->thread)
|
2007-03-04 20:06:41 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
m->count++;
|
|
|
|
|
return;
|
2007-03-04 20:06:41 +00:00
|
|
|
|
}
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
/* Repeat some stuff here or else all the variation is too difficult to
|
|
|
|
|
read */
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
/* peek at lock until it's no longer busy */
|
|
|
|
|
unsigned int locked;
|
|
|
|
|
while ((locked = xchg8(&m->locked, STATE_BUSYu8)) == STATE_BUSYu8);
|
|
|
|
|
if(locked == 0)
|
|
|
|
|
{
|
|
|
|
|
m->thread = thread;
|
|
|
|
|
m->locked = 1;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Block until the lock is open... */
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
|
|
|
|
cores[core].blk_ops.var_u8p = &m->locked;
|
|
|
|
|
cores[core].blk_ops.var_u8v = 1;
|
|
|
|
|
#else
|
|
|
|
|
corelock_lock(&m->cl);
|
|
|
|
|
if (m->locked == 0)
|
|
|
|
|
{
|
|
|
|
|
m->locked = 1;
|
|
|
|
|
m->thread = thread;
|
|
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Block until the lock is open... */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
|
|
|
|
cores[core].blk_ops.cl_p = &m->cl;
|
|
|
|
|
#endif
|
|
|
|
|
#endif /* CONFIG_CORELOCK */
|
|
|
|
|
|
|
|
|
|
block_thread_no_listlock(&m->queue);
|
2007-03-04 20:06:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-03-09 08:03:18 +00:00
|
|
|
|
void mutex_unlock(struct mutex *m)
|
2002-05-16 20:57:32 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* unlocker not being the owner is an unlocking violation */
|
|
|
|
|
KERNEL_ASSERT(m->thread == cores[CURRENT_CORE].running,
|
|
|
|
|
"mutex_unlock->wrong thread (recurse)");
|
|
|
|
|
|
|
|
|
|
if(m->count > 0)
|
|
|
|
|
{
|
|
|
|
|
/* this thread still owns lock */
|
|
|
|
|
m->count--;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
/* lock out other cores */
|
|
|
|
|
corelock_lock(&m->cl);
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
/* wait for peeker to move on */
|
|
|
|
|
while (xchg8(&m->locked, STATE_BUSYu8) == STATE_BUSYu8);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* transfer to next queued thread if any */
|
2007-10-21 19:10:03 +00:00
|
|
|
|
|
|
|
|
|
/* This can become busy using SWP but is safe since only one thread
|
|
|
|
|
will be changing things at a time. Allowing timeout waits will
|
|
|
|
|
change that however but not now. There is also a hazard the thread
|
|
|
|
|
could be killed before performing the wakeup but that's just
|
|
|
|
|
irresponsible. :-) */
|
|
|
|
|
m->thread = m->queue;
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
if(m->thread == NULL)
|
|
|
|
|
{
|
|
|
|
|
m->locked = 0; /* release lock */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
else /* another thread is waiting - remain locked */
|
|
|
|
|
{
|
2007-10-21 19:10:03 +00:00
|
|
|
|
wakeup_thread_no_listlock(&m->queue);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
m->locked = 1;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Simpl-er mutex functions ;)
|
|
|
|
|
****************************************************************************/
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#if NUM_CORES > 1
|
|
|
|
|
void spinlock_init(struct spinlock *l)
|
2007-10-16 01:25:17 +00:00
|
|
|
|
{
|
2008-01-18 13:12:33 +00:00
|
|
|
|
corelock_init(&l->cl);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
l->thread = NULL;
|
|
|
|
|
l->count = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void spinlock_lock(struct spinlock *l)
|
|
|
|
|
{
|
|
|
|
|
struct thread_entry *const thread = cores[CURRENT_CORE].running;
|
|
|
|
|
|
|
|
|
|
if (l->thread == thread)
|
|
|
|
|
{
|
|
|
|
|
l->count++;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2008-01-18 13:12:33 +00:00
|
|
|
|
corelock_lock(&l->cl);
|
2007-10-16 01:25:17 +00:00
|
|
|
|
|
|
|
|
|
l->thread = thread;
|
2007-03-09 08:03:18 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
void spinlock_unlock(struct spinlock *l)
|
2007-03-09 08:03:18 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* unlocker not being the owner is an unlocking violation */
|
|
|
|
|
KERNEL_ASSERT(l->thread == cores[CURRENT_CORE].running,
|
|
|
|
|
"spinlock_unlock->wrong thread");
|
|
|
|
|
|
|
|
|
|
if (l->count > 0)
|
|
|
|
|
{
|
|
|
|
|
/* this thread still owns lock */
|
|
|
|
|
l->count--;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear owner */
|
|
|
|
|
l->thread = NULL;
|
|
|
|
|
|
2008-01-18 13:12:33 +00:00
|
|
|
|
/* release lock */
|
2007-10-16 01:25:17 +00:00
|
|
|
|
corelock_unlock(&l->cl);
|
2002-05-16 20:57:32 +00:00
|
|
|
|
}
|
2008-01-18 13:12:33 +00:00
|
|
|
|
#endif /* NUM_CORES > 1 */
|
2002-05-16 20:57:32 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Simple semaphore functions ;)
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
#ifdef HAVE_SEMAPHORE_OBJECTS
|
|
|
|
|
void semaphore_init(struct semaphore *s, int max, int start)
|
2002-05-16 20:57:32 +00:00
|
|
|
|
{
|
2007-10-16 01:25:17 +00:00
|
|
|
|
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
|
|
|
|
|
"semaphore_init->inv arg");
|
|
|
|
|
s->queue = NULL;
|
|
|
|
|
s->max = max;
|
|
|
|
|
s->count = start;
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_init(&s->cl);
|
|
|
|
|
#endif
|
2002-05-16 20:57:32 +00:00
|
|
|
|
}
|
2005-02-22 12:19:12 +00:00
|
|
|
|
|
2007-10-16 01:25:17 +00:00
|
|
|
|
void semaphore_wait(struct semaphore *s)
|
|
|
|
|
{
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_lock(&s->cl);
|
|
|
|
|
if(--s->count >= 0)
|
|
|
|
|
{
|
|
|
|
|
corelock_unlock(&s->cl);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
int count;
|
|
|
|
|
while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
|
|
|
|
|
if(--count >= 0)
|
|
|
|
|
{
|
|
|
|
|
s->count = count;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* too many waits - block until dequeued */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
|
|
|
|
cores[core].blk_ops.cl_p = &s->cl;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARi;
|
|
|
|
|
cores[core].blk_ops.var_ip = &s->count;
|
|
|
|
|
cores[core].blk_ops.var_iv = count;
|
|
|
|
|
#endif
|
|
|
|
|
block_thread_no_listlock(&s->queue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void semaphore_release(struct semaphore *s)
|
|
|
|
|
{
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_lock(&s->cl);
|
|
|
|
|
if (s->count < s->max)
|
|
|
|
|
{
|
|
|
|
|
if (++s->count <= 0)
|
|
|
|
|
{
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
int count;
|
|
|
|
|
while ((count = xchg32(&s->count, STATE_BUSYi)) == STATE_BUSYi);
|
|
|
|
|
if(count < s->max)
|
|
|
|
|
{
|
|
|
|
|
if(++count <= 0)
|
|
|
|
|
{
|
|
|
|
|
#endif /* CONFIG_CORELOCK */
|
|
|
|
|
|
|
|
|
|
/* there should be threads in this queue */
|
2007-10-30 20:36:23 +00:00
|
|
|
|
KERNEL_ASSERT(s->queue != NULL, "semaphore->wakeup");
|
2007-10-16 01:25:17 +00:00
|
|
|
|
/* a thread was queued - wake it up */
|
|
|
|
|
wakeup_thread_no_listlock(&s->queue);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&s->cl);
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
s->count = count;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
#endif /* HAVE_SEMAPHORE_OBJECTS */
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Simple event functions ;)
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
#ifdef HAVE_EVENT_OBJECTS
|
|
|
|
|
void event_init(struct event *e, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
e->queues[STATE_NONSIGNALED] = NULL;
|
|
|
|
|
e->queues[STATE_SIGNALED] = NULL;
|
|
|
|
|
e->state = flags & STATE_SIGNALED;
|
|
|
|
|
e->automatic = (flags & EVENT_AUTOMATIC) ? 1 : 0;
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_init(&e->cl);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void event_wait(struct event *e, unsigned int for_state)
|
|
|
|
|
{
|
|
|
|
|
unsigned int last_state;
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_lock(&e->cl);
|
|
|
|
|
last_state = e->state;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if(e->automatic != 0)
|
|
|
|
|
{
|
|
|
|
|
/* wait for false always satisfied by definition
|
|
|
|
|
or if it just changed to false */
|
|
|
|
|
if(last_state == STATE_SIGNALED || for_state == STATE_NONSIGNALED)
|
|
|
|
|
{
|
|
|
|
|
/* automatic - unsignal */
|
|
|
|
|
e->state = STATE_NONSIGNALED;
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&e->cl);
|
|
|
|
|
#endif
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* block until state matches */
|
|
|
|
|
}
|
|
|
|
|
else if(for_state == last_state)
|
|
|
|
|
{
|
|
|
|
|
/* the state being waited for is the current state */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&e->cl);
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
e->state = last_state;
|
|
|
|
|
#endif
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
/* current state does not match wait-for state */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_UNLOCK_CORELOCK;
|
|
|
|
|
cores[core].blk_ops.cl_p = &e->cl;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
const unsigned int core = CURRENT_CORE;
|
|
|
|
|
cores[core].blk_ops.flags = TBOP_SET_VARu8;
|
|
|
|
|
cores[core].blk_ops.var_u8p = &e->state;
|
|
|
|
|
cores[core].blk_ops.var_u8v = last_state;
|
|
|
|
|
#endif
|
|
|
|
|
block_thread_no_listlock(&e->queues[for_state]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void event_set_state(struct event *e, unsigned int state)
|
|
|
|
|
{
|
|
|
|
|
unsigned int last_state;
|
|
|
|
|
#if CONFIG_CORELOCK == CORELOCK_NONE || CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_lock(&e->cl);
|
|
|
|
|
last_state = e->state;
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
while ((last_state = xchg8(&e->state, STATE_BUSYu8)) == STATE_BUSYu8);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if(last_state == state)
|
|
|
|
|
{
|
|
|
|
|
/* no change */
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&e->cl);
|
|
|
|
|
#elif CONFIG_CORELOCK == CORELOCK_SWAP
|
|
|
|
|
e->state = last_state;
|
|
|
|
|
#endif
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if(state == STATE_SIGNALED)
|
|
|
|
|
{
|
|
|
|
|
if(e->automatic != 0)
|
|
|
|
|
{
|
|
|
|
|
struct thread_entry *thread;
|
|
|
|
|
/* no thread should have ever blocked for unsignaled */
|
2007-10-30 20:36:23 +00:00
|
|
|
|
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL,
|
2007-10-16 01:25:17 +00:00
|
|
|
|
"set_event_state->queue[NS]:S");
|
|
|
|
|
/* pass to next thread and keep unsignaled - "pulse" */
|
|
|
|
|
thread = wakeup_thread_no_listlock(&e->queues[STATE_SIGNALED]);
|
|
|
|
|
e->state = thread != NULL ? STATE_NONSIGNALED : STATE_SIGNALED;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* release all threads waiting for signaled */
|
|
|
|
|
thread_queue_wake_no_listlock(&e->queues[STATE_SIGNALED]);
|
|
|
|
|
e->state = STATE_SIGNALED;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* release all threads waiting for unsignaled */
|
|
|
|
|
|
|
|
|
|
/* no thread should have ever blocked if automatic */
|
2007-10-30 20:36:23 +00:00
|
|
|
|
KERNEL_ASSERT(e->queues[STATE_NONSIGNALED] == NULL ||
|
2007-10-16 01:25:17 +00:00
|
|
|
|
e->automatic == 0, "set_event_state->queue[NS]:NS");
|
|
|
|
|
|
|
|
|
|
thread_queue_wake_no_listlock(&e->queues[STATE_NONSIGNALED]);
|
|
|
|
|
e->state = STATE_NONSIGNALED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if CONFIG_CORELOCK == SW_CORELOCK
|
|
|
|
|
corelock_unlock(&e->cl);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
#endif /* HAVE_EVENT_OBJECTS */
|