buflib: Check the validity of of handles passed to buflib_get_data() in DEBUG builds.
Change-Id: Ic274bfb4a8e1a1a10f9a54186b9173dbc0faa4c8
This commit is contained in:
parent
d608d2203a
commit
d66346789c
6 changed files with 326 additions and 2 deletions
|
@ -898,6 +898,15 @@ const char* buflib_get_name(struct buflib_context *ctx, int handle)
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
void *buflib_get_data(struct buflib_context *ctx, int handle)
|
||||
{
|
||||
if (handle <= 0)
|
||||
buflib_panic(ctx, "invalid handle access: %d", handle);
|
||||
|
||||
return (void*)(ctx->handle_table[-handle].alloc);
|
||||
}
|
||||
|
||||
void buflib_check_valid(struct buflib_context *ctx)
|
||||
{
|
||||
union buflib_data *crc_slot;
|
||||
|
|
0
firmware/export/config/librockplay.h
Normal file
0
firmware/export/config/librockplay.h
Normal file
|
@ -237,10 +237,14 @@ int buflib_alloc_maximum(struct buflib_context* ctx, const char* name,
|
|||
*
|
||||
* Returns: The start pointer of the allocation
|
||||
*/
|
||||
static inline void* buflib_get_data(struct buflib_context *context, int handle)
|
||||
#ifdef DEBUG
|
||||
void* buflib_get_data(struct buflib_context *ctx, int handle);
|
||||
#else
|
||||
static inline void* buflib_get_data(struct buflib_context *ctx, int handle)
|
||||
{
|
||||
return (void*)(context->handle_table[-handle].alloc);
|
||||
return (void*)(ctx->handle_table[-handle].alloc);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Shrink the memory allocation associated with the given handle
|
||||
|
|
18
firmware/kernel/pthread/corelock.c
Normal file
18
firmware/kernel/pthread/corelock.c
Normal file
|
@ -0,0 +1,18 @@
|
|||
#include <pthread.h>
|
||||
#include "kernel.h"
|
||||
|
||||
void corelock_init(struct corelock *lk)
|
||||
{
|
||||
lk->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
|
||||
}
|
||||
|
||||
void corelock_lock(struct corelock *lk)
|
||||
{
|
||||
pthread_mutex_lock(&lk->mutex);
|
||||
}
|
||||
|
||||
|
||||
void corelock_unlock(struct corelock *lk)
|
||||
{
|
||||
pthread_mutex_unlock(&lk->mutex);
|
||||
}
|
21
firmware/kernel/pthread/mutex.c
Normal file
21
firmware/kernel/pthread/mutex.c
Normal file
|
@ -0,0 +1,21 @@
|
|||
#include <pthread.h>
|
||||
#include "kernel.h"
|
||||
|
||||
void mutex_init(struct mutex *m)
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
pthread_mutex_init(&m->mutex, &attr);
|
||||
pthread_mutexattr_destroy(&attr);
|
||||
}
|
||||
|
||||
void mutex_lock(struct mutex *m)
|
||||
{
|
||||
pthread_mutex_lock(&m->mutex);
|
||||
}
|
||||
|
||||
void mutex_unlock(struct mutex *m)
|
||||
{
|
||||
pthread_mutex_unlock(&m->mutex);
|
||||
}
|
272
firmware/kernel/pthread/thread.c
Normal file
272
firmware/kernel/pthread/thread.c
Normal file
|
@ -0,0 +1,272 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include "/usr/include/semaphore.h"
|
||||
#include "kernel.h"
|
||||
#include "thread.h"
|
||||
|
||||
#define NSEC_PER_SEC 1000000000L
|
||||
static inline void timespec_add_ns(struct timespec *a, uint64_t ns)
|
||||
{
|
||||
lldiv_t q = lldiv(a->tv_nsec + ns, NSEC_PER_SEC);
|
||||
a->tv_sec += q.quot;
|
||||
a->tv_nsec = q.rem;
|
||||
}
|
||||
|
||||
static int threads_initialized;
|
||||
|
||||
struct thread_init_data {
|
||||
void (*function)(void);
|
||||
bool start_frozen;
|
||||
sem_t init_sem;
|
||||
struct thread_entry *entry;
|
||||
};
|
||||
|
||||
__thread struct thread_entry *_current;
|
||||
|
||||
struct thread_entry* thread_self_entry(void)
|
||||
{
|
||||
return _current;
|
||||
}
|
||||
|
||||
unsigned int thread_self(void)
|
||||
{
|
||||
return (unsigned) pthread_self();
|
||||
}
|
||||
|
||||
static struct thread_entry_item {
|
||||
unsigned thread_id;
|
||||
struct thread_entry *entry;
|
||||
} entry_lookup[32];
|
||||
|
||||
|
||||
|
||||
static struct thread_entry_item *__find_thread_entry(unsigned thread_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
{
|
||||
if (entry_lookup[i].thread_id == thread_id)
|
||||
return &entry_lookup[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct thread_entry *find_thread_entry(unsigned thread_id)
|
||||
{
|
||||
return __find_thread_entry(thread_id)->entry;
|
||||
}
|
||||
|
||||
static void *trampoline(void *arg)
|
||||
{
|
||||
struct thread_init_data *data = arg;
|
||||
|
||||
void (*thread_fn)(void) = data->function;
|
||||
|
||||
_current = data->entry;
|
||||
|
||||
if (data->start_frozen)
|
||||
{
|
||||
struct corelock thaw_lock;
|
||||
struct thread_entry *queue = NULL;
|
||||
corelock_init(&thaw_lock);
|
||||
corelock_lock(&thaw_lock);
|
||||
|
||||
_current->lock = &thaw_lock;
|
||||
_current->bqp = &queue;
|
||||
sem_post(&data->init_sem);
|
||||
block_thread_switch(_current, _current->lock);
|
||||
_current->lock = NULL;
|
||||
|
||||
corelock_unlock(&thaw_lock);
|
||||
}
|
||||
else
|
||||
sem_post(&data->init_sem);
|
||||
|
||||
free(data);
|
||||
thread_fn();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void thread_thaw(unsigned int thread_id)
|
||||
{
|
||||
struct thread_entry *e = find_thread_entry(thread_id);
|
||||
if (e->lock)
|
||||
{
|
||||
corelock_lock(e->lock);
|
||||
wakeup_thread(e->bqp);
|
||||
corelock_unlock(e->lock);
|
||||
}
|
||||
/* else: no lock. must be running already */
|
||||
}
|
||||
|
||||
void init_threads(void)
|
||||
{
|
||||
struct thread_entry_item *item0 = &entry_lookup[0];
|
||||
item0->entry = calloc(1, sizeof(struct thread_entry));
|
||||
item0->thread_id = pthread_self();
|
||||
|
||||
_current = item0->entry;
|
||||
pthread_cond_init(&item0->entry->cond, NULL);
|
||||
threads_initialized = 1;
|
||||
}
|
||||
|
||||
|
||||
unsigned int create_thread(void (*function)(void),
|
||||
void* stack, size_t stack_size,
|
||||
unsigned flags, const char *name
|
||||
//~ IF_PRIO(, int priority)
|
||||
IF_COP(, unsigned int core))
|
||||
{
|
||||
pthread_t retval;
|
||||
|
||||
struct thread_init_data *data = calloc(1, sizeof(struct thread_init_data));
|
||||
struct thread_entry *entry = calloc(1, sizeof(struct thread_entry));
|
||||
struct thread_entry_item *item;
|
||||
|
||||
if (!threads_initialized)
|
||||
abort();
|
||||
|
||||
data->function = function;
|
||||
data->start_frozen = flags & CREATE_THREAD_FROZEN;
|
||||
data->entry = entry;
|
||||
pthread_cond_init(&entry->cond, NULL);
|
||||
entry->runnable = true;
|
||||
entry->l = (struct thread_list) { NULL, NULL };
|
||||
sem_init(&data->init_sem, 0, 0);
|
||||
|
||||
if (pthread_create(&retval, NULL, trampoline, data) < 0)
|
||||
return -1;
|
||||
|
||||
sem_wait(&data->init_sem);
|
||||
|
||||
item = __find_thread_entry(0);
|
||||
item->thread_id = retval;
|
||||
item->entry = entry;
|
||||
|
||||
pthread_setname_np(retval, name);
|
||||
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void add_to_list_l(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (*list == NULL)
|
||||
{
|
||||
/* Insert into unoccupied list */
|
||||
thread->l.next = thread;
|
||||
thread->l.prev = thread;
|
||||
*list = thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Insert last */
|
||||
thread->l.next = *list;
|
||||
thread->l.prev = (*list)->l.prev;
|
||||
thread->l.prev->l.next = thread;
|
||||
(*list)->l.prev = thread;
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_from_list_l(struct thread_entry **list,
|
||||
struct thread_entry *thread)
|
||||
{
|
||||
if (thread == thread->l.next)
|
||||
{
|
||||
/* The only item */
|
||||
*list = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread == *list)
|
||||
{
|
||||
/* List becomes next item */
|
||||
*list = thread->l.next;
|
||||
}
|
||||
|
||||
/* Fix links to jump over the removed entry. */
|
||||
thread->l.prev->l.next = thread->l.next;
|
||||
thread->l.next->l.prev = thread->l.prev;
|
||||
}
|
||||
|
||||
unsigned int thread_queue_wake(struct thread_entry **list)
|
||||
{
|
||||
unsigned int result = THREAD_NONE;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
unsigned int rc = wakeup_thread(list);
|
||||
|
||||
if (rc == THREAD_NONE)
|
||||
break;
|
||||
|
||||
result |= rc;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* for block_thread(), _w_tmp() and wakeup_thread() t->lock must point
|
||||
* to a corelock instance, and this corelock must be held by the caller */
|
||||
void block_thread_switch(struct thread_entry *t, struct corelock *cl)
|
||||
{
|
||||
t->runnable = false;
|
||||
add_to_list_l(t->bqp, t);
|
||||
while(!t->runnable)
|
||||
pthread_cond_wait(&t->cond, &cl->mutex);
|
||||
}
|
||||
|
||||
void block_thread_switch_w_tmo(struct thread_entry *t, int timeout, struct corelock *cl)
|
||||
{
|
||||
int err = 0;
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
timespec_add_ns(&ts, timeout * (NSEC_PER_SEC/HZ));
|
||||
|
||||
t->runnable = false;
|
||||
add_to_list_l(t->bqp, t);
|
||||
while(!t->runnable && !err)
|
||||
err = pthread_cond_timedwait(&t->cond, &cl->mutex, &ts);
|
||||
|
||||
if (err == ETIMEDOUT)
|
||||
{ /* the thread timed out and was not explicitely woken up.
|
||||
* we need to do this now to mark it runnable again */
|
||||
remove_from_list_l(t->bqp, t);
|
||||
t->runnable = true;
|
||||
if (t->wakeup_ext_cb)
|
||||
t->wakeup_ext_cb(t);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int wakeup_thread(struct thread_entry **list)
|
||||
{
|
||||
struct thread_entry *t = *list;
|
||||
if (t)
|
||||
{
|
||||
remove_from_list_l(list, t);
|
||||
t->runnable = true;
|
||||
pthread_cond_signal(&t->cond);
|
||||
}
|
||||
return THREAD_NONE;
|
||||
}
|
||||
|
||||
|
||||
void yield(void) {}
|
||||
|
||||
unsigned sleep(unsigned ticks)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
ts.tv_sec = ticks/HZ;
|
||||
ts.tv_nsec = (ticks % HZ) * (NSEC_PER_SEC/HZ);
|
||||
|
||||
nanosleep(&ts, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in a new issue