2013-12-04 16:06:17 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 by Björn Stenberg
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Simple mutex functions ;)
|
|
|
|
****************************************************************************/
|
|
|
|
#include "kernel-internal.h"
|
2014-08-08 05:39:29 +00:00
|
|
|
#include "mutex.h"
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
/* Initialize a mutex object - call before any use and do not call again once
|
|
|
|
* the object is available to other threads */
|
|
|
|
void mutex_init(struct mutex *m)
|
|
|
|
{
|
2014-08-08 10:33:51 +00:00
|
|
|
wait_queue_init(&m->queue);
|
2013-12-04 16:06:17 +00:00
|
|
|
m->recursion = 0;
|
2014-08-08 10:33:51 +00:00
|
|
|
blocker_init(&m->blocker);
|
2013-12-04 16:06:17 +00:00
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
|
|
|
m->no_preempt = false;
|
|
|
|
#endif
|
2014-08-08 10:33:51 +00:00
|
|
|
corelock_init(&m->cl);
|
2013-12-04 16:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Gain ownership of a mutex object or block until it becomes free */
|
|
|
|
void mutex_lock(struct mutex *m)
|
|
|
|
{
|
2014-08-08 10:33:51 +00:00
|
|
|
struct thread_entry *current = __running_self_entry();
|
2013-12-04 16:06:17 +00:00
|
|
|
|
2014-04-24 08:09:18 +00:00
|
|
|
if(current == m->blocker.thread)
|
2013-12-04 16:06:17 +00:00
|
|
|
{
|
|
|
|
/* current thread already owns this mutex */
|
|
|
|
m->recursion++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* lock out other cores */
|
|
|
|
corelock_lock(&m->cl);
|
|
|
|
|
|
|
|
/* must read thread again inside cs (a multiprocessor concern really) */
|
2014-04-24 08:09:18 +00:00
|
|
|
if(LIKELY(m->blocker.thread == NULL))
|
2013-12-04 16:06:17 +00:00
|
|
|
{
|
|
|
|
/* lock is open */
|
2014-04-24 08:09:18 +00:00
|
|
|
m->blocker.thread = current;
|
2013-12-04 16:06:17 +00:00
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* block until the lock is open... */
|
|
|
|
disable_irq();
|
2014-08-08 10:33:51 +00:00
|
|
|
block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
|
|
|
|
/* ...and turn control over to next thread */
|
|
|
|
switch_thread();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release ownership of a mutex object - only owning thread must call this */
|
|
|
|
void mutex_unlock(struct mutex *m)
|
|
|
|
{
|
|
|
|
/* unlocker not being the owner is an unlocking violation */
|
2014-08-08 10:33:51 +00:00
|
|
|
KERNEL_ASSERT(m->blocker.thread == __running_self_entry(),
|
2013-12-04 16:06:17 +00:00
|
|
|
"mutex_unlock->wrong thread (%s != %s)\n",
|
2014-04-24 08:09:18 +00:00
|
|
|
m->blocker.thread->name,
|
2014-08-08 10:33:51 +00:00
|
|
|
__running_self_entry()->name);
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
if(m->recursion > 0)
|
|
|
|
{
|
|
|
|
/* this thread still owns lock */
|
|
|
|
m->recursion--;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* lock out other cores */
|
|
|
|
corelock_lock(&m->cl);
|
|
|
|
|
|
|
|
/* transfer to next queued thread if any */
|
2014-08-08 10:33:51 +00:00
|
|
|
struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue);
|
|
|
|
if(LIKELY(thread == NULL))
|
2013-12-04 16:06:17 +00:00
|
|
|
{
|
|
|
|
/* no threads waiting - open the lock */
|
2014-04-24 08:09:18 +00:00
|
|
|
m->blocker.thread = NULL;
|
2013-12-04 16:06:17 +00:00
|
|
|
corelock_unlock(&m->cl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-04-24 08:09:18 +00:00
|
|
|
const int oldlevel = disable_irq_save();
|
2014-08-08 10:33:51 +00:00
|
|
|
unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER);
|
2014-04-24 08:09:18 +00:00
|
|
|
restore_irq(oldlevel);
|
|
|
|
|
|
|
|
corelock_unlock(&m->cl);
|
2013-12-04 16:06:17 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_PRIORITY_SCHEDULING
|
2014-04-24 08:09:18 +00:00
|
|
|
if((result & THREAD_SWITCH) && !m->no_preempt)
|
|
|
|
switch_thread();
|
2013-12-04 16:06:17 +00:00
|
|
|
#endif
|
2014-04-24 08:09:18 +00:00
|
|
|
(void)result;
|
2013-12-04 16:06:17 +00:00
|
|
|
}
|