2013-12-04 16:06:17 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 by Björn Stenberg
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
#include "kernel-internal.h"
|
2014-08-08 05:39:29 +00:00
|
|
|
#include "semaphore.h"
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Simple semaphore functions ;)
|
|
|
|
****************************************************************************/
|
2014-08-08 10:33:51 +00:00
|
|
|
|
2013-12-04 16:06:17 +00:00
|
|
|
/* Initialize the semaphore object.
|
|
|
|
* max = maximum up count the semaphore may assume (max >= 1)
|
|
|
|
* start = initial count of semaphore (0 <= count <= max) */
|
|
|
|
void semaphore_init(struct semaphore *s, int max, int start)
|
|
|
|
{
|
|
|
|
KERNEL_ASSERT(max > 0 && start >= 0 && start <= max,
|
|
|
|
"semaphore_init->inv arg\n");
|
2014-08-08 10:33:51 +00:00
|
|
|
wait_queue_init(&s->queue);
|
2013-12-04 16:06:17 +00:00
|
|
|
s->max = max;
|
|
|
|
s->count = start;
|
|
|
|
corelock_init(&s->cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
|
|
|
|
* it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
|
|
|
|
* safely be used in an ISR. */
|
|
|
|
int semaphore_wait(struct semaphore *s, int timeout)
|
|
|
|
{
|
2014-08-08 10:33:51 +00:00
|
|
|
int ret = OBJ_WAIT_TIMEDOUT;
|
2013-12-04 16:06:17 +00:00
|
|
|
|
2014-08-08 10:33:51 +00:00
|
|
|
int oldlevel = disable_irq_save();
|
2013-12-04 16:06:17 +00:00
|
|
|
corelock_lock(&s->cl);
|
|
|
|
|
2014-08-08 10:33:51 +00:00
|
|
|
int count = s->count;
|
2013-12-04 16:06:17 +00:00
|
|
|
if(LIKELY(count > 0))
|
|
|
|
{
|
|
|
|
/* count is not zero; down it */
|
|
|
|
s->count = count - 1;
|
|
|
|
ret = OBJ_WAIT_SUCCEEDED;
|
|
|
|
}
|
2014-08-08 10:33:51 +00:00
|
|
|
else if(timeout != 0)
|
2013-12-04 16:06:17 +00:00
|
|
|
{
|
2017-01-21 19:18:37 +00:00
|
|
|
ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);
|
|
|
|
|
2013-12-04 16:06:17 +00:00
|
|
|
/* too many waits - block until count is upped... */
|
2014-08-08 10:33:51 +00:00
|
|
|
struct thread_entry *current = __running_self_entry();
|
|
|
|
|
|
|
|
block_thread(current, timeout, &s->queue, NULL);
|
2013-12-04 16:06:17 +00:00
|
|
|
corelock_unlock(&s->cl);
|
|
|
|
|
|
|
|
/* ...and turn control over to next thread */
|
|
|
|
switch_thread();
|
|
|
|
|
2014-08-08 10:33:51 +00:00
|
|
|
/* if explicit wake indicated; do no more */
|
|
|
|
if(LIKELY(!wait_queue_ptr(current)))
|
|
|
|
return OBJ_WAIT_SUCCEEDED;
|
|
|
|
|
|
|
|
disable_irq();
|
|
|
|
corelock_lock(&s->cl);
|
|
|
|
|
|
|
|
/* see if anyone got us after the expired wait */
|
|
|
|
if(wait_queue_try_remove(current))
|
|
|
|
{
|
|
|
|
count = s->count;
|
|
|
|
if(count > 0)
|
|
|
|
{
|
|
|
|
/* down it lately */
|
|
|
|
s->count = count - 1;
|
|
|
|
ret = OBJ_WAIT_SUCCEEDED;
|
|
|
|
}
|
|
|
|
}
|
2013-12-04 16:06:17 +00:00
|
|
|
}
|
2014-08-08 10:33:51 +00:00
|
|
|
/* else just polling it */
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
corelock_unlock(&s->cl);
|
|
|
|
restore_irq(oldlevel);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Up the semaphore's count and release any thread waiting at the head of the
|
|
|
|
* queue. The count is saturated to the value of the 'max' parameter specified
|
|
|
|
* in 'semaphore_init'. */
|
|
|
|
void semaphore_release(struct semaphore *s)
|
|
|
|
{
|
|
|
|
unsigned int result = THREAD_NONE;
|
|
|
|
|
2014-08-08 10:33:51 +00:00
|
|
|
int oldlevel = disable_irq_save();
|
2013-12-04 16:06:17 +00:00
|
|
|
corelock_lock(&s->cl);
|
|
|
|
|
2014-08-08 10:33:51 +00:00
|
|
|
struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue);
|
|
|
|
if(LIKELY(thread != NULL))
|
2013-12-04 16:06:17 +00:00
|
|
|
{
|
|
|
|
/* a thread was queued - wake it up and keep count at 0 */
|
|
|
|
KERNEL_ASSERT(s->count == 0,
|
|
|
|
"semaphore_release->threads queued but count=%d!\n", s->count);
|
2014-08-08 10:33:51 +00:00
|
|
|
result = wakeup_thread(thread, WAKEUP_DEFAULT);
|
2013-12-04 16:06:17 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int count = s->count;
|
|
|
|
if(count < s->max)
|
|
|
|
{
|
|
|
|
/* nothing waiting - up it */
|
|
|
|
s->count = count + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
corelock_unlock(&s->cl);
|
|
|
|
restore_irq(oldlevel);
|
|
|
|
|
|
|
|
#if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context)
|
|
|
|
/* No thread switch if not thread context */
|
|
|
|
if((result & THREAD_SWITCH) && is_thread_context())
|
|
|
|
switch_thread();
|
|
|
|
#endif
|
|
|
|
(void)result;
|
|
|
|
}
|