2007-10-25 21:27:45 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Nicolas Pennequin
|
|
|
|
*
|
2008-06-28 18:10:04 +00:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
2007-10-25 21:27:45 +00:00
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
#include "config.h"
|
2014-04-03 01:03:30 +00:00
|
|
|
#include <string.h>
|
2014-04-03 22:49:16 +00:00
|
|
|
#include "strlcpy.h"
|
2007-10-25 21:27:45 +00:00
|
|
|
#include "system.h"
|
2013-08-26 20:49:53 +00:00
|
|
|
#include "storage.h"
|
2007-10-25 21:27:45 +00:00
|
|
|
#include "thread.h"
|
|
|
|
#include "kernel.h"
|
2013-08-26 20:49:53 +00:00
|
|
|
#include "panic.h"
|
2007-10-25 21:27:45 +00:00
|
|
|
#include "debug.h"
|
2013-08-26 20:49:53 +00:00
|
|
|
#include "file.h"
|
2008-10-16 10:38:03 +00:00
|
|
|
#include "appevents.h"
|
2008-04-14 16:17:47 +00:00
|
|
|
#include "metadata.h"
|
2013-08-26 20:49:53 +00:00
|
|
|
#include "bmp.h"
|
2008-12-09 23:07:59 +00:00
|
|
|
#ifdef HAVE_ALBUMART
|
|
|
|
#include "albumart.h"
|
2009-05-01 23:31:43 +00:00
|
|
|
#include "jpeg_load.h"
|
2011-02-09 20:13:13 +00:00
|
|
|
#include "playback.h"
|
2008-12-09 23:07:59 +00:00
|
|
|
#endif
|
2013-08-26 20:49:53 +00:00
|
|
|
#include "buffering.h"
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
/* Define LOGF_ENABLE to enable logf output in this file */
|
2011-04-27 03:08:23 +00:00
|
|
|
/* #define LOGF_ENABLE */
|
2007-10-25 21:27:45 +00:00
|
|
|
#include "logf.h"
|
|
|
|
|
|
|
|
/* macros to enable logf for queues
|
|
|
|
logging on SYS_TIMEOUT can be disabled */
|
|
|
|
#ifdef SIMULATOR
|
|
|
|
/* Define this for logf output of all queuing except SYS_TIMEOUT */
|
|
|
|
#define BUFFERING_LOGQUEUES
|
|
|
|
/* Define this to logf SYS_TIMEOUT messages */
|
|
|
|
/* #define BUFFERING_LOGQUEUES_SYS_TIMEOUT */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef BUFFERING_LOGQUEUES
|
|
|
|
#define LOGFQUEUE logf
|
|
|
|
#else
|
|
|
|
#define LOGFQUEUE(...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef BUFFERING_LOGQUEUES_SYS_TIMEOUT
|
|
|
|
#define LOGFQUEUE_SYS_TIMEOUT logf
|
|
|
|
#else
|
|
|
|
#define LOGFQUEUE_SYS_TIMEOUT(...)
|
|
|
|
#endif
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
#define GUARD_BUFSIZE (32*1024)
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/* amount of data to read in one read() call */
|
2008-03-29 20:52:56 +00:00
|
|
|
#define BUFFERING_DEFAULT_FILECHUNK (1024*32)
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2007-11-04 19:01:02 +00:00
|
|
|
#define BUF_HANDLE_MASK 0x7FFFFFFF
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
enum handle_flags
|
|
|
|
{
|
|
|
|
H_CANWRAP = 0x1, /* Handle data may wrap in buffer */
|
|
|
|
H_ALLOCALL = 0x2, /* All data must be allocated up front */
|
|
|
|
H_FIXEDDATA = 0x4, /* Data is fixed in position */
|
|
|
|
};
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
struct memory_handle {
|
2013-08-26 20:49:53 +00:00
|
|
|
int id; /* A unique ID for the handle */
|
|
|
|
enum data_type type; /* Type of data buffered with this handle */
|
|
|
|
uint8_t flags; /* Handle property flags */
|
|
|
|
int8_t pinned; /* Count of pinnings */
|
|
|
|
int8_t signaled; /* Stop any attempt at waiting to get the data */
|
|
|
|
char path[MAX_PATH]; /* Path if data originated in a file */
|
|
|
|
int fd; /* File descriptor to path (-1 if closed) */
|
|
|
|
size_t data; /* Start index of the handle's data buffer */
|
|
|
|
size_t ridx; /* Read pointer, relative to the main buffer */
|
|
|
|
size_t widx; /* Write pointer, relative to the main buffer */
|
|
|
|
ssize_t filesize; /* File total length */
|
|
|
|
off_t start; /* Offset at which we started reading the file */
|
|
|
|
off_t pos; /* Read position in file */
|
|
|
|
off_t volatile end; /* Offset at which we stopped reading the file */
|
2007-10-25 21:27:45 +00:00
|
|
|
struct memory_handle *next;
|
|
|
|
};
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
struct buf_message_data
|
|
|
|
{
|
|
|
|
int handle_id;
|
|
|
|
intptr_t data;
|
|
|
|
};
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
static char *buffer;
|
|
|
|
static char *guard_buffer;
|
|
|
|
|
|
|
|
static size_t buffer_len;
|
|
|
|
|
|
|
|
/* Configuration */
|
|
|
|
static size_t conf_watermark = 0; /* Level to trigger filebuf fill */
|
|
|
|
static size_t high_watermark = 0; /* High watermark for rebuffer */
|
|
|
|
|
|
|
|
/* current memory handle in the linked list. NULL when the list is empty. */
|
|
|
|
static struct memory_handle *cur_handle;
|
|
|
|
/* first memory handle in the linked list. NULL when the list is empty. */
|
|
|
|
static struct memory_handle *first_handle;
|
|
|
|
|
|
|
|
static int num_handles; /* number of handles in the list */
|
|
|
|
|
2007-11-03 22:06:56 +00:00
|
|
|
static int base_handle_id;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Main lock for adding / removing handles */
|
|
|
|
static struct mutex llist_mutex SHAREDBSS_ATTR;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
/* Handle cache (makes find_handle faster).
|
2007-10-27 01:37:33 +00:00
|
|
|
This is global so that move_handle and rm_handle can invalidate it. */
|
2007-10-25 21:27:45 +00:00
|
|
|
static struct memory_handle *cached_handle = NULL;
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
static struct data_counters
|
|
|
|
{
|
2007-10-25 21:27:45 +00:00
|
|
|
size_t remaining; /* Amount of data needing to be buffered */
|
|
|
|
size_t buffered; /* Amount of data currently in the buffer */
|
|
|
|
size_t useful; /* Amount of data still useful to the user */
|
|
|
|
} data_counters;
|
|
|
|
|
|
|
|
|
|
|
|
/* Messages available to communicate with the buffering thread */
|
2011-02-14 09:18:58 +00:00
|
|
|
enum
|
|
|
|
{
|
2007-11-05 17:48:21 +00:00
|
|
|
Q_BUFFER_HANDLE = 1, /* Request buffering of a handle, this should not be
|
|
|
|
used in a low buffer situation. */
|
2011-02-14 08:36:29 +00:00
|
|
|
Q_REBUFFER_HANDLE, /* Request reset and rebuffering of a handle at a new
|
|
|
|
file starting position. */
|
2007-10-25 21:27:45 +00:00
|
|
|
Q_CLOSE_HANDLE, /* Request closing a handle */
|
|
|
|
|
|
|
|
/* Configuration: */
|
2007-11-05 17:51:55 +00:00
|
|
|
Q_START_FILL, /* Request that the buffering thread initiate a buffer
|
2007-11-05 03:11:58 +00:00
|
|
|
fill at its earliest convenience */
|
2008-02-12 23:15:59 +00:00
|
|
|
Q_HANDLE_ADDED, /* Inform the buffering thread that a handle was added,
|
|
|
|
(which means the disk is spinning) */
|
2007-10-25 21:27:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Buffering thread */
|
2008-03-26 08:57:25 +00:00
|
|
|
static void buffering_thread(void);
|
2007-10-25 21:27:45 +00:00
|
|
|
static long buffering_stack[(DEFAULT_STACK_SIZE + 0x2000)/sizeof(long)];
|
|
|
|
static const char buffering_thread_name[] = "buffering";
|
2008-12-10 08:57:10 +00:00
|
|
|
static unsigned int buffering_thread_id = 0;
|
2011-02-14 11:27:45 +00:00
|
|
|
static struct event_queue buffering_queue SHAREDBSS_ATTR;
|
|
|
|
static struct queue_sender_list buffering_queue_sender_list SHAREDBSS_ATTR;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
static void close_fd(int *fd_p)
|
|
|
|
{
|
|
|
|
int fd = *fd_p;
|
|
|
|
if (fd >= 0) {
|
|
|
|
close(fd);
|
|
|
|
*fd_p = -1;
|
|
|
|
}
|
|
|
|
}
|
2007-11-26 21:13:08 +00:00
|
|
|
|
2010-02-12 13:12:59 +00:00
|
|
|
/* Ring buffer helper functions */
|
2013-08-26 20:49:53 +00:00
|
|
|
static inline void * ringbuf_ptr(uintptr_t p)
|
|
|
|
{
|
|
|
|
return buffer + p;
|
|
|
|
}
|
2010-02-20 15:13:53 +00:00
|
|
|
|
|
|
|
static inline uintptr_t ringbuf_offset(const void *ptr)
|
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
return (uintptr_t)(ptr - (void *)buffer);
|
2010-02-20 15:13:53 +00:00
|
|
|
}
|
|
|
|
|
2010-02-12 13:12:59 +00:00
|
|
|
/* Buffer pointer (p) plus value (v), wrapped if necessary */
|
|
|
|
static inline uintptr_t ringbuf_add(uintptr_t p, size_t v)
|
|
|
|
{
|
|
|
|
uintptr_t res = p + v;
|
|
|
|
if (res >= buffer_len)
|
|
|
|
res -= buffer_len; /* wrap if necssary */
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Buffer pointer (p) minus value (v), wrapped if necessary */
|
|
|
|
static inline uintptr_t ringbuf_sub(uintptr_t p, size_t v)
|
|
|
|
{
|
|
|
|
uintptr_t res = p;
|
|
|
|
if (p < v)
|
|
|
|
res += buffer_len; /* wrap */
|
2013-06-30 02:18:17 +00:00
|
|
|
|
2010-02-12 13:12:59 +00:00
|
|
|
return res - v;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* How far value (v) plus buffer pointer (p1) will cross buffer pointer (p2) */
|
|
|
|
static inline ssize_t ringbuf_add_cross(uintptr_t p1, size_t v, uintptr_t p2)
|
|
|
|
{
|
|
|
|
ssize_t res = p1 + v - p2;
|
|
|
|
if (p1 >= p2) /* wrap if necessary */
|
|
|
|
res -= buffer_len;
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
/* Real buffer watermark */
|
|
|
|
#define BUF_WATERMARK MIN(conf_watermark, high_watermark)
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/*
|
|
|
|
LINKED LIST MANAGEMENT
|
|
|
|
======================
|
|
|
|
|
|
|
|
add_handle : Add a handle to the list
|
|
|
|
rm_handle : Remove a handle from the list
|
|
|
|
find_handle : Get a handle pointer from an ID
|
|
|
|
move_handle : Move a handle in the buffer (with or without its data)
|
|
|
|
|
|
|
|
These functions only handle the linked list structure. They don't touch the
|
2013-08-26 20:49:53 +00:00
|
|
|
contents of the struct memory_handle headers.
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
The first and current (== last) handle are kept track of.
|
2013-08-26 20:49:53 +00:00
|
|
|
A new handle is added at to the end and becomes the current one.
|
|
|
|
|
|
|
|
num_handles = N
|
|
|
|
first_handle -> h0 -> h1 -> h2 -> ... hN-1 -> NULL
|
|
|
|
^
|
|
|
|
cur_handle -------------------------+
|
2007-10-25 21:27:45 +00:00
|
|
|
*/
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
static int next_handle_id(void)
|
|
|
|
{
|
|
|
|
static int cur_handle_id = 0;
|
|
|
|
|
|
|
|
/* Wrap signed int is safe and 0 doesn't happen */
|
|
|
|
int next_hid = (cur_handle_id + 1) & BUF_HANDLE_MASK;
|
|
|
|
if (next_hid == 0)
|
|
|
|
next_hid = 1;
|
|
|
|
|
|
|
|
cur_handle_id = next_hid;
|
|
|
|
|
|
|
|
return next_hid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* adds the handle to the linked list */
|
|
|
|
static void link_cur_handle(struct memory_handle *h)
|
|
|
|
{
|
|
|
|
h->next = NULL;
|
|
|
|
|
|
|
|
if (first_handle)
|
|
|
|
cur_handle->next = h;
|
|
|
|
else
|
|
|
|
first_handle = h; /* the first one */
|
|
|
|
|
|
|
|
cur_handle = h;
|
|
|
|
num_handles++;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
/* Add a new handle to the linked list and return it. It will have become the
|
2007-10-27 04:49:04 +00:00
|
|
|
new current handle.
|
2013-08-26 20:49:53 +00:00
|
|
|
flags contains information on how this may be allocated
|
2007-10-27 04:49:04 +00:00
|
|
|
data_size must contain the size of what will be in the handle.
|
2013-08-26 20:49:53 +00:00
|
|
|
widx_out points to variable to receive first available byte of data area
|
2007-10-28 19:19:54 +00:00
|
|
|
returns a valid memory handle if all conditions for allocation are met.
|
|
|
|
NULL if there memory_handle itself cannot be allocated or if the
|
2011-02-13 10:44:13 +00:00
|
|
|
data_size cannot be allocated and alloc_all is set. */
|
2013-08-26 20:49:53 +00:00
|
|
|
static struct memory_handle *
|
|
|
|
add_handle(unsigned int flags, size_t data_size, size_t *data_out)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Gives each handle a unique id */
|
2007-11-03 02:54:34 +00:00
|
|
|
if (num_handles >= BUF_MAX_HANDLES)
|
|
|
|
return NULL;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t ridx = 0, widx = 0;
|
|
|
|
off_t cur_total = 0;
|
|
|
|
|
|
|
|
if (first_handle) {
|
|
|
|
/* Buffer is not empty */
|
|
|
|
ridx = ringbuf_offset(first_handle);
|
|
|
|
widx = cur_handle->data;
|
|
|
|
cur_total = cur_handle->filesize - cur_handle->start;
|
|
|
|
}
|
2011-02-13 10:44:13 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (cur_total > 0) {
|
2007-10-30 14:11:03 +00:00
|
|
|
/* the current handle hasn't finished buffering. We can only add
|
|
|
|
a new one if there is already enough free space to finish
|
|
|
|
the buffering. */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (ringbuf_add_cross(widx, cur_total, ridx) >= 0) {
|
2011-02-14 02:14:26 +00:00
|
|
|
/* Not enough space to finish allocation */
|
2007-10-30 14:11:03 +00:00
|
|
|
return NULL;
|
|
|
|
} else {
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Apply all the needed reserve */
|
|
|
|
widx = ringbuf_add(widx, cur_total);
|
2007-10-30 14:11:03 +00:00
|
|
|
}
|
|
|
|
}
|
2007-10-27 04:49:04 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Align to pointer size up */
|
|
|
|
size_t adjust = ALIGN_UP(widx, sizeof(intptr_t)) - widx;
|
|
|
|
size_t index = ringbuf_add(widx, adjust);
|
|
|
|
size_t len = data_size + sizeof(struct memory_handle);
|
2007-10-27 04:49:04 +00:00
|
|
|
|
|
|
|
/* First, will the handle wrap? */
|
|
|
|
/* If the handle would wrap, move to the beginning of the buffer,
|
2009-02-22 10:12:34 +00:00
|
|
|
* or if the data must not but would wrap, move it to the beginning */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (index + sizeof(struct memory_handle) > buffer_len ||
|
|
|
|
(!(flags & H_CANWRAP) && index + len > buffer_len)) {
|
|
|
|
index = 0;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* How far we shifted index to align things, must be < buffer_len */
|
|
|
|
size_t shift = ringbuf_sub(index, widx);
|
2007-10-28 15:54:10 +00:00
|
|
|
|
2007-10-27 04:49:04 +00:00
|
|
|
/* How much space are we short in the actual ring buffer? */
|
2013-08-26 20:49:53 +00:00
|
|
|
ssize_t overlap = ringbuf_add_cross(widx, shift + len, ridx);
|
|
|
|
if (overlap >= 0 &&
|
|
|
|
((flags & H_ALLOCALL) || (size_t)overlap >= data_size)) {
|
2007-10-27 04:49:04 +00:00
|
|
|
/* Not enough space for required allocations */
|
2007-10-25 21:27:45 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* There is enough space for the required data, initialize the struct */
|
|
|
|
struct memory_handle *h = ringbuf_ptr(index);
|
2011-04-27 03:08:23 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h->id = next_handle_id();
|
|
|
|
h->flags = flags;
|
|
|
|
h->pinned = 0; /* Can be moved */
|
|
|
|
h->signaled = 0; /* Data can be waited for */
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Return the start of the data area */
|
|
|
|
*data_out = ringbuf_add(index, sizeof (struct memory_handle));
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
return h;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete a given memory handle from the linked list
|
|
|
|
and return true for success. Nothing is actually erased from memory. */
|
2007-10-27 01:37:33 +00:00
|
|
|
static bool rm_handle(const struct memory_handle *h)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2007-10-27 01:37:33 +00:00
|
|
|
if (h == NULL)
|
2007-11-04 19:01:02 +00:00
|
|
|
return true;
|
2007-10-27 01:37:33 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *m = first_handle;
|
|
|
|
struct memory_handle *c = cur_handle;
|
|
|
|
|
|
|
|
if (h == m) {
|
|
|
|
m = m->next;
|
|
|
|
first_handle = m;
|
|
|
|
if (!m) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* h was the first and last handle: the buffer is now empty */
|
|
|
|
cur_handle = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
2007-11-26 21:13:08 +00:00
|
|
|
/* Find the previous handle */
|
2007-10-25 21:27:45 +00:00
|
|
|
while (m && m->next != h) {
|
|
|
|
m = m->next;
|
|
|
|
}
|
2007-10-27 01:37:33 +00:00
|
|
|
if (m && m->next == h) {
|
2007-10-25 21:27:45 +00:00
|
|
|
m->next = h->next;
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h == c)
|
2007-10-25 21:27:45 +00:00
|
|
|
cur_handle = m;
|
|
|
|
} else {
|
2011-04-27 03:08:23 +00:00
|
|
|
/* If we don't find ourselves, this is a seriously incoherent
|
|
|
|
state with a corrupted list and severe action is needed! */
|
|
|
|
panicf("rm_handle fail: %d", h->id);
|
2007-10-25 21:27:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Invalidate the cache to prevent it from keeping the old location of h */
|
|
|
|
if (h == cached_handle)
|
|
|
|
cached_handle = NULL;
|
|
|
|
|
|
|
|
num_handles--;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return a pointer to the memory handle of given ID.
|
|
|
|
NULL if the handle wasn't found */
|
2008-03-28 12:51:33 +00:00
|
|
|
static struct memory_handle *find_handle(int handle_id)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2012-05-21 06:18:46 +00:00
|
|
|
if (handle_id < 0 || !first_handle)
|
2007-10-25 21:27:45 +00:00
|
|
|
return NULL;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Simple caching because most of the time the requested handle
|
|
|
|
will either be the same as the last, or the one after the last */
|
|
|
|
struct memory_handle *cached = cached_handle;
|
|
|
|
if (cached) {
|
|
|
|
if (cached->id == handle_id) {
|
|
|
|
return cached;
|
|
|
|
} else {
|
|
|
|
cached = cached->next;
|
|
|
|
if (cached && cached->id == handle_id) {
|
|
|
|
cached_handle = cached;
|
|
|
|
return cached;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct memory_handle *m = first_handle;
|
|
|
|
while (m && m->id != handle_id) {
|
|
|
|
m = m->next;
|
|
|
|
}
|
2013-08-26 20:49:53 +00:00
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/* This condition can only be reached with !m or m->id == handle_id */
|
2007-10-27 01:37:33 +00:00
|
|
|
if (m)
|
2007-10-25 21:27:45 +00:00
|
|
|
cached_handle = m;
|
|
|
|
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
2007-10-28 20:18:59 +00:00
|
|
|
/* Move a memory handle and data_size of its data delta bytes along the buffer.
|
|
|
|
delta maximum bytes available to move the handle. If the move is performed
|
|
|
|
it is set to the actual distance moved.
|
|
|
|
data_size is the amount of data to move along with the struct.
|
2010-05-24 10:49:36 +00:00
|
|
|
returns true if the move is successful and false if the handle is NULL,
|
|
|
|
the move would be less than the size of a memory_handle after
|
|
|
|
correcting for wraps or if the handle is not found in the linked
|
|
|
|
list for adjustment. This function has no side effects if false
|
|
|
|
is returned. */
|
2007-11-02 14:06:48 +00:00
|
|
|
static bool move_handle(struct memory_handle **h, size_t *delta,
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t data_size)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2007-10-29 16:48:16 +00:00
|
|
|
const struct memory_handle *src;
|
2007-10-27 04:49:04 +00:00
|
|
|
|
2007-10-29 16:48:16 +00:00
|
|
|
if (h == NULL || (src = *h) == NULL)
|
|
|
|
return false;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t size_to_move = sizeof(struct memory_handle) + data_size;
|
2007-10-27 04:49:04 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Align to pointer size down */
|
|
|
|
size_t final_delta = *delta;
|
|
|
|
final_delta = ALIGN_DOWN(final_delta, sizeof(intptr_t));
|
2007-10-29 16:48:16 +00:00
|
|
|
if (final_delta < sizeof(struct memory_handle)) {
|
2007-10-28 20:18:59 +00:00
|
|
|
/* It's not legal to move less than the size of the struct */
|
2007-10-29 16:48:16 +00:00
|
|
|
return false;
|
2007-10-28 20:18:59 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
uintptr_t oldpos = ringbuf_offset(src);
|
|
|
|
uintptr_t newpos = ringbuf_add(oldpos, final_delta);
|
|
|
|
intptr_t overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len);
|
|
|
|
intptr_t overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len);
|
2007-10-27 04:49:04 +00:00
|
|
|
|
|
|
|
if (overlap > 0) {
|
2007-10-28 20:18:59 +00:00
|
|
|
/* Some part of the struct + data would wrap, maybe ok */
|
2011-02-12 12:18:09 +00:00
|
|
|
ssize_t correction = 0;
|
2007-10-27 04:49:04 +00:00
|
|
|
/* If the overlap lands inside the memory_handle */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!(src->flags & H_CANWRAP)) {
|
2007-10-28 20:18:59 +00:00
|
|
|
/* Otherwise the overlap falls in the data area and must all be
|
|
|
|
* backed out. This may become conditional if ever we move
|
|
|
|
* data that is allowed to wrap (ie audio) */
|
2007-10-27 04:49:04 +00:00
|
|
|
correction = overlap;
|
2010-02-20 15:13:53 +00:00
|
|
|
} else if ((uintptr_t)overlap > data_size) {
|
2009-11-21 17:00:38 +00:00
|
|
|
/* Correct the position and real delta to prevent the struct from
|
2011-02-12 12:18:09 +00:00
|
|
|
* wrapping, this guarantees an aligned delta if the struct size is
|
|
|
|
* aligned and the buffer is aligned */
|
2009-11-21 17:00:38 +00:00
|
|
|
correction = overlap - data_size;
|
2007-10-27 04:49:04 +00:00
|
|
|
}
|
2007-11-02 14:06:48 +00:00
|
|
|
if (correction) {
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Align correction to pointer size up */
|
|
|
|
correction = ALIGN_UP(correction, sizeof(intptr_t));
|
2007-11-02 14:06:48 +00:00
|
|
|
if (final_delta < correction + sizeof(struct memory_handle)) {
|
|
|
|
/* Delta cannot end up less than the size of the struct */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
newpos -= correction;
|
|
|
|
overlap -= correction;/* Used below to know how to split the data */
|
|
|
|
final_delta -= correction;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
2007-10-28 15:54:10 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *dest = ringbuf_ptr(newpos);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2007-10-29 16:48:16 +00:00
|
|
|
if (src == first_handle) {
|
2007-10-25 21:27:45 +00:00
|
|
|
first_handle = dest;
|
|
|
|
} else {
|
|
|
|
struct memory_handle *m = first_handle;
|
2007-10-29 16:48:16 +00:00
|
|
|
while (m && m->next != src) {
|
2007-10-25 21:27:45 +00:00
|
|
|
m = m->next;
|
|
|
|
}
|
2007-10-29 16:48:16 +00:00
|
|
|
if (m && m->next == src) {
|
2007-10-25 21:27:45 +00:00
|
|
|
m->next = dest;
|
|
|
|
} else {
|
2007-10-29 16:48:16 +00:00
|
|
|
return false;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-27 04:49:04 +00:00
|
|
|
/* Update the cache to prevent it from keeping the old location of h */
|
2007-10-29 16:48:16 +00:00
|
|
|
if (src == cached_handle)
|
2007-10-27 04:49:04 +00:00
|
|
|
cached_handle = dest;
|
|
|
|
|
|
|
|
/* the cur_handle pointer might need updating */
|
2007-10-29 16:48:16 +00:00
|
|
|
if (src == cur_handle)
|
2007-10-27 04:49:04 +00:00
|
|
|
cur_handle = dest;
|
|
|
|
|
2011-02-12 12:18:09 +00:00
|
|
|
/* x = handle(s) following this one...
|
|
|
|
* ...if last handle, unmoveable if metadata, only shrinkable if audio.
|
|
|
|
* In other words, no legal move can be made that would have the src head
|
|
|
|
* and dest tail of the data overlap itself. These facts reduce the
|
|
|
|
* problem to four essential permutations.
|
|
|
|
*
|
|
|
|
* movement: always "clockwise" >>>>
|
|
|
|
*
|
|
|
|
* (src nowrap, dest nowrap)
|
|
|
|
* |0123 x |
|
|
|
|
* | 0123x | etc...
|
|
|
|
* move: "0123"
|
|
|
|
*
|
|
|
|
* (src nowrap, dest wrap)
|
|
|
|
* | x0123 |
|
|
|
|
* |23x 01|
|
|
|
|
* move: "23", "01"
|
|
|
|
*
|
|
|
|
* (src wrap, dest nowrap)
|
|
|
|
* |23 x01|
|
|
|
|
* | 0123x |
|
|
|
|
* move: "23", "01"
|
|
|
|
*
|
|
|
|
* (src wrap, dest wrap)
|
|
|
|
* |23 x 01|
|
|
|
|
* |123x 0|
|
|
|
|
* move: "23", "1", "0"
|
|
|
|
*/
|
|
|
|
if (overlap_old > 0) {
|
|
|
|
/* Move over already wrapped data by the final delta */
|
2013-08-26 20:49:53 +00:00
|
|
|
memmove(ringbuf_ptr(final_delta), ringbuf_ptr(0), overlap_old);
|
2011-02-12 12:18:09 +00:00
|
|
|
if (overlap <= 0)
|
|
|
|
size_to_move -= overlap_old;
|
|
|
|
}
|
2009-11-21 17:00:38 +00:00
|
|
|
|
2011-02-12 12:18:09 +00:00
|
|
|
if (overlap > 0) {
|
|
|
|
/* Move data that now wraps to the beginning */
|
|
|
|
size_to_move -= overlap;
|
2013-08-26 20:49:53 +00:00
|
|
|
memmove(ringbuf_ptr(0), SKIPBYTES(src, size_to_move),
|
2011-02-12 12:18:09 +00:00
|
|
|
overlap_old > 0 ? final_delta : (size_t)overlap);
|
2009-11-22 13:51:25 +00:00
|
|
|
}
|
2009-11-21 17:00:38 +00:00
|
|
|
|
2011-02-12 12:18:09 +00:00
|
|
|
/* Move leading fragment containing handle struct */
|
|
|
|
memmove(dest, src, size_to_move);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2007-10-29 16:48:16 +00:00
|
|
|
/* Update the caller with the new location of h and the distance moved */
|
|
|
|
*h = dest;
|
|
|
|
*delta = final_delta;
|
2010-05-24 10:49:36 +00:00
|
|
|
return true;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
BUFFER SPACE MANAGEMENT
|
|
|
|
=======================
|
|
|
|
|
2007-11-05 21:11:54 +00:00
|
|
|
update_data_counters: Updates the values in data_counters
|
|
|
|
buffer_is_low : Returns true if the amount of useful data in the buffer is low
|
2007-10-25 21:27:45 +00:00
|
|
|
buffer_handle : Buffer data for a handle
|
|
|
|
rebuffer_handle : Seek to a nonbuffered part of a handle by rebuffering the data
|
|
|
|
shrink_handle : Free buffer space by moving a handle
|
|
|
|
fill_buffer : Call buffer_handle for all handles that have data to buffer
|
|
|
|
|
|
|
|
These functions are used by the buffering thread to manage buffer space.
|
|
|
|
*/
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
static int update_data_counters(struct data_counters *dc)
|
2007-11-05 21:11:54 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t buffered = 0;
|
2007-11-05 21:11:54 +00:00
|
|
|
size_t remaining = 0;
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t useful = 0;
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
if (dc == NULL)
|
|
|
|
dc = &data_counters;
|
|
|
|
|
2008-05-13 20:51:06 +00:00
|
|
|
mutex_lock(&llist_mutex);
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
int num = num_handles;
|
|
|
|
struct memory_handle *m = find_handle(base_handle_id);
|
|
|
|
bool is_useful = m == NULL;
|
|
|
|
|
|
|
|
for (m = first_handle; m; m = m->next)
|
|
|
|
{
|
|
|
|
off_t pos = m->pos;
|
|
|
|
off_t end = m->end;
|
2011-02-14 08:36:29 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
buffered += end - m->start;
|
|
|
|
remaining += m->filesize - end;
|
2007-11-05 21:11:54 +00:00
|
|
|
|
|
|
|
if (m->id == base_handle_id)
|
|
|
|
is_useful = true;
|
|
|
|
|
|
|
|
if (is_useful)
|
2013-08-26 20:49:53 +00:00
|
|
|
useful += end - pos;
|
2007-11-05 21:11:54 +00:00
|
|
|
}
|
|
|
|
|
2008-05-13 20:51:06 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
dc->buffered = buffered;
|
2011-02-14 08:36:29 +00:00
|
|
|
dc->remaining = remaining;
|
2013-08-26 20:49:53 +00:00
|
|
|
dc->useful = useful;
|
|
|
|
|
|
|
|
return num;
|
2007-11-05 21:11:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool buffer_is_low(void)
|
|
|
|
{
|
2011-02-14 08:36:29 +00:00
|
|
|
update_data_counters(NULL);
|
2011-04-27 03:08:23 +00:00
|
|
|
return data_counters.useful < BUF_WATERMARK / 2;
|
2007-11-05 21:11:54 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Q_BUFFER_HANDLE event and buffer data for the given handle.
|
2007-11-03 17:55:45 +00:00
|
|
|
Return whether or not the buffering should continue explicitly. */
|
2011-02-14 08:36:29 +00:00
|
|
|
static bool buffer_handle(int handle_id, size_t to_buffer)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-04-27 03:08:23 +00:00
|
|
|
logf("buffer_handle(%d, %lu)", handle_id, (unsigned long)to_buffer);
|
2007-10-25 21:27:45 +00:00
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
2007-11-03 21:57:27 +00:00
|
|
|
return true;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
logf(" type: %d", (int)h->type);
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->end >= h->filesize) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* nothing left to buffer */
|
2007-11-03 21:57:27 +00:00
|
|
|
return true;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (h->fd < 0) { /* file closed, reopen */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->path[0] != '\0')
|
2007-10-25 21:27:45 +00:00
|
|
|
h->fd = open(h->path, O_RDONLY);
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->fd < 0) {
|
2007-11-04 05:57:48 +00:00
|
|
|
/* could not open the file, truncate it where it is */
|
2013-08-26 20:49:53 +00:00
|
|
|
h->filesize = h->end;
|
2007-11-03 21:57:27 +00:00
|
|
|
return true;
|
2007-11-04 05:57:48 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->start)
|
|
|
|
lseek(h->fd, h->start, SEEK_SET);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
trigger_cpu_boost();
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (h->type == TYPE_ID3) {
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!get_metadata(ringbuf_ptr(h->data), h->fd, h->path)) {
|
2008-04-16 16:18:05 +00:00
|
|
|
/* metadata parsing failed: clear the buffer. */
|
2013-08-26 20:49:53 +00:00
|
|
|
wipe_mp3entry(ringbuf_ptr(h->data));
|
2008-04-16 16:18:05 +00:00
|
|
|
}
|
2013-08-26 20:49:53 +00:00
|
|
|
close_fd(&h->fd);
|
|
|
|
h->widx = ringbuf_add(h->data, h->filesize);
|
|
|
|
h->end = h->filesize;
|
2011-02-14 08:36:29 +00:00
|
|
|
send_event(BUFFER_EVENT_FINISHED, &handle_id);
|
2008-04-14 16:17:47 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
bool stop = false;
|
|
|
|
while (h->end < h->filesize && !stop)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
|
|
|
/* max amount to copy */
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t widx = h->widx;
|
|
|
|
|
|
|
|
ssize_t copy_n = h->filesize - h->end;
|
|
|
|
copy_n = MIN(copy_n, BUFFERING_DEFAULT_FILECHUNK);
|
|
|
|
copy_n = MIN(copy_n, (off_t)(buffer_len - widx));
|
|
|
|
|
|
|
|
uintptr_t offset = ringbuf_offset(h->next ?: first_handle);
|
|
|
|
ssize_t overlap = ringbuf_add_cross(widx, copy_n, offset);
|
|
|
|
|
|
|
|
/* read only up to available space and stop if it would overwrite
|
|
|
|
the next handle; stop one byte early for last handle to avoid
|
|
|
|
empty/full alias */
|
|
|
|
if (!h->next)
|
|
|
|
overlap++;
|
2010-02-18 15:38:30 +00:00
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (overlap > 0) {
|
2010-02-18 15:38:30 +00:00
|
|
|
stop = true;
|
|
|
|
copy_n -= overlap;
|
|
|
|
}
|
2007-11-01 05:12:55 +00:00
|
|
|
|
2011-02-10 05:56:21 +00:00
|
|
|
if (copy_n <= 0)
|
|
|
|
return false; /* no space for read */
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/* rc is the actual amount read */
|
2013-08-26 20:49:53 +00:00
|
|
|
ssize_t rc = read(h->fd, ringbuf_ptr(widx), copy_n);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-03-21 15:57:07 +00:00
|
|
|
if (rc <= 0) {
|
2007-11-03 17:55:45 +00:00
|
|
|
/* Some kind of filesystem error, maybe recoverable if not codec */
|
2007-10-25 21:27:45 +00:00
|
|
|
if (h->type == TYPE_CODEC) {
|
|
|
|
logf("Partial codec");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
logf("File ended %lu bytes early\n",
|
|
|
|
(unsigned long)(h->filesize - h->end));
|
|
|
|
h->filesize = h->end;
|
2007-10-25 21:27:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Advance buffer and make data available to users */
|
|
|
|
h->widx = ringbuf_add(widx, rc);
|
|
|
|
h->end += rc;
|
|
|
|
|
|
|
|
yield();
|
2007-11-20 22:45:46 +00:00
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (to_buffer == 0) {
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Normal buffering - check queue */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!queue_empty(&buffering_queue))
|
2011-02-14 08:36:29 +00:00
|
|
|
break;
|
2011-02-14 09:18:58 +00:00
|
|
|
} else {
|
2011-02-14 08:36:29 +00:00
|
|
|
if (to_buffer <= (size_t)rc)
|
|
|
|
break; /* Done */
|
|
|
|
to_buffer -= rc;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->end >= h->filesize) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* finished buffering the file */
|
2013-08-26 20:49:53 +00:00
|
|
|
close_fd(&h->fd);
|
2011-02-14 08:36:29 +00:00
|
|
|
send_event(BUFFER_EVENT_FINISHED, &handle_id);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2010-02-18 15:38:30 +00:00
|
|
|
return !stop;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Close the specified handle id and free its allocation. */
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Q_CLOSE_HANDLE */
|
2008-03-28 12:51:33 +00:00
|
|
|
static bool close_handle(int handle_id)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-02-14 08:36:29 +00:00
|
|
|
bool retval = true;
|
|
|
|
|
|
|
|
mutex_lock(&llist_mutex);
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
2007-11-04 19:01:02 +00:00
|
|
|
|
|
|
|
/* If the handle is not found, it is closed */
|
2011-02-14 08:36:29 +00:00
|
|
|
if (h) {
|
2013-08-26 20:49:53 +00:00
|
|
|
close_fd(&h->fd);
|
2011-02-14 08:36:29 +00:00
|
|
|
/* rm_handle returns true unless the handle somehow persists after
|
|
|
|
exit */
|
|
|
|
retval = rm_handle(h);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
|
|
|
return retval;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free buffer space by moving the handle struct right before the useful
|
|
|
|
part of its data buffer or by moving all the data. */
|
2007-11-03 02:54:34 +00:00
|
|
|
static void shrink_handle(struct memory_handle *h)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
|
|
|
if (!h)
|
|
|
|
return;
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
if (h->type == TYPE_PACKET_AUDIO) {
|
|
|
|
/* only move the handle struct */
|
|
|
|
/* data is pinned by default - if we start moving packet audio,
|
|
|
|
the semantics will determine whether or not data is movable
|
|
|
|
but the handle will remain movable in either case */
|
|
|
|
size_t delta = ringbuf_sub(h->ridx, h->data);
|
|
|
|
|
|
|
|
/* The value of delta might change for alignment reasons */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!move_handle(&h, &delta, 0))
|
2011-04-27 03:08:23 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
h->data = ringbuf_add(h->data, delta);
|
2013-08-26 20:49:53 +00:00
|
|
|
h->start += delta;
|
2011-04-27 03:08:23 +00:00
|
|
|
} else {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* metadata handle: we can move all of it */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->pinned || !h->next)
|
|
|
|
return; /* Pinned, last handle */
|
2011-02-10 05:56:21 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t data_size = h->filesize - h->start;
|
2010-02-20 15:13:53 +00:00
|
|
|
uintptr_t handle_distance =
|
|
|
|
ringbuf_sub(ringbuf_offset(h->next), h->data);
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t delta = handle_distance - data_size;
|
2007-10-27 04:49:04 +00:00
|
|
|
|
|
|
|
/* The value of delta might change for alignment reasons */
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!move_handle(&h, &delta, data_size))
|
2007-10-29 16:48:16 +00:00
|
|
|
return;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
size_t olddata = h->data;
|
2010-02-12 13:12:59 +00:00
|
|
|
h->data = ringbuf_add(h->data, delta);
|
|
|
|
h->ridx = ringbuf_add(h->ridx, delta);
|
|
|
|
h->widx = ringbuf_add(h->widx, delta);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
switch (h->type)
|
|
|
|
{
|
|
|
|
case TYPE_ID3:
|
|
|
|
if (h->filesize != sizeof(struct mp3entry))
|
|
|
|
break;
|
|
|
|
/* when moving an mp3entry we need to readjust its pointers */
|
|
|
|
adjust_mp3entry(ringbuf_ptr(h->data), ringbuf_ptr(h->data),
|
|
|
|
ringbuf_ptr(olddata));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TYPE_BITMAP:
|
|
|
|
/* adjust the bitmap's pointer */
|
|
|
|
((struct bitmap *)ringbuf_ptr(h->data))->data =
|
|
|
|
ringbuf_ptr(h->data + sizeof(struct bitmap));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill the buffer by buffering as much data as possible for handles that still
|
2007-11-03 06:21:32 +00:00
|
|
|
have data left to buffer
|
|
|
|
Return whether or not to continue filling after this */
|
|
|
|
static bool fill_buffer(void)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
|
|
|
logf("fill_buffer()");
|
2011-02-14 08:36:29 +00:00
|
|
|
struct memory_handle *m = first_handle;
|
|
|
|
|
|
|
|
shrink_handle(m);
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
while (queue_empty(&buffering_queue) && m) {
|
2013-08-26 20:49:53 +00:00
|
|
|
if (m->end < m->filesize && !buffer_handle(m->id, 0)) {
|
|
|
|
m = NULL;
|
|
|
|
break;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
m = m->next;
|
|
|
|
}
|
|
|
|
|
2007-11-03 06:21:32 +00:00
|
|
|
if (m) {
|
|
|
|
return true;
|
2011-02-14 09:18:58 +00:00
|
|
|
} else {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* only spin the disk down if the filling wasn't interrupted by an
|
|
|
|
event arriving in the queue. */
|
2008-11-01 16:14:28 +00:00
|
|
|
storage_sleep();
|
2007-11-03 06:21:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2007-11-11 13:15:36 +00:00
|
|
|
#ifdef HAVE_ALBUMART
|
2007-11-11 12:29:37 +00:00
|
|
|
/* Given a file descriptor to a bitmap file, write the bitmap data to the
|
|
|
|
buffer, with a struct bitmap and the actual data immediately following.
|
|
|
|
Return value is the total size (struct + data). */
|
2011-02-14 09:18:58 +00:00
|
|
|
static int load_image(int fd, const char *path,
|
2013-08-26 20:49:53 +00:00
|
|
|
struct bufopen_bitmap_data *data,
|
|
|
|
size_t bufidx)
|
2007-11-11 12:29:37 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2013-08-26 20:49:53 +00:00
|
|
|
struct bitmap *bmp = ringbuf_ptr(bufidx);
|
2011-02-09 20:13:13 +00:00
|
|
|
struct dim *dim = data->dim;
|
|
|
|
struct mp3_albumart *aa = data->embedded_albumart;
|
2009-10-16 19:14:41 +00:00
|
|
|
|
|
|
|
/* get the desired image size */
|
|
|
|
bmp->width = dim->width, bmp->height = dim->height;
|
2007-11-11 12:29:37 +00:00
|
|
|
/* FIXME: alignment may be needed for the data buffer. */
|
2013-08-26 20:49:53 +00:00
|
|
|
bmp->data = ringbuf_ptr(bufidx + sizeof(struct bitmap));
|
|
|
|
|
2007-11-11 13:15:36 +00:00
|
|
|
#if (LCD_DEPTH > 1) || defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)
|
2007-11-11 12:29:37 +00:00
|
|
|
bmp->maskdata = NULL;
|
2007-11-11 13:15:36 +00:00
|
|
|
#endif
|
2013-08-26 20:49:53 +00:00
|
|
|
int free = (int)MIN(buffer_len - buf_used(), buffer_len - bufidx)
|
|
|
|
- sizeof(struct bitmap);
|
2008-12-09 23:07:59 +00:00
|
|
|
|
2009-05-04 15:46:41 +00:00
|
|
|
#ifdef HAVE_JPEG
|
2011-02-14 09:18:58 +00:00
|
|
|
if (aa != NULL) {
|
2011-02-09 20:13:13 +00:00
|
|
|
lseek(fd, aa->pos, SEEK_SET);
|
|
|
|
rc = clip_jpeg_fd(fd, aa->size, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
|
|
|
|
FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
|
|
|
|
}
|
|
|
|
else if (strcmp(path + strlen(path) - 4, ".bmp"))
|
2009-05-01 23:31:43 +00:00
|
|
|
rc = read_jpeg_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
|
2013-08-26 20:49:53 +00:00
|
|
|
FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
|
2009-05-01 23:31:43 +00:00
|
|
|
else
|
2009-05-04 15:46:41 +00:00
|
|
|
#endif
|
2009-05-01 23:31:43 +00:00
|
|
|
rc = read_bmp_fd(fd, bmp, free, FORMAT_NATIVE|FORMAT_DITHER|
|
|
|
|
FORMAT_RESIZE|FORMAT_KEEP_ASPECT, NULL);
|
2013-08-26 20:49:53 +00:00
|
|
|
|
2007-11-11 12:29:37 +00:00
|
|
|
return rc + (rc > 0 ? sizeof(struct bitmap) : 0);
|
2013-08-26 20:49:53 +00:00
|
|
|
(void)path;
|
2007-11-11 12:29:37 +00:00
|
|
|
}
|
2013-08-26 20:49:53 +00:00
|
|
|
#endif /* HAVE_ALBUMART */
|
2007-11-11 12:29:37 +00:00
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
MAIN BUFFERING API CALLS
|
|
|
|
========================
|
|
|
|
|
|
|
|
bufopen : Request the opening of a new handle for a file
|
|
|
|
bufalloc : Open a new handle for data other than a file.
|
|
|
|
bufclose : Close an open handle
|
|
|
|
bufseek : Set the read pointer in a handle
|
|
|
|
bufadvance : Move the read pointer in a handle
|
|
|
|
bufread : Copy data from a handle into a given buffer
|
|
|
|
bufgetdata : Give a pointer to the handle's data
|
|
|
|
|
|
|
|
These functions are exported, to allow interaction with the buffer.
|
|
|
|
They take care of the content of the structs, and rely on the linked list
|
|
|
|
management functions for all the actual handle management work.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* Reserve space in the buffer for a file.
|
|
|
|
filename: name of the file to open
|
|
|
|
offset: offset at which to start buffering the file, useful when the first
|
2011-02-14 08:36:29 +00:00
|
|
|
offset bytes of the file aren't needed.
|
2009-10-16 19:14:41 +00:00
|
|
|
type: one of the data types supported (audio, image, cuesheet, others
|
|
|
|
user_data: user data passed possibly passed in subcalls specific to a
|
|
|
|
data_type (only used for image (albumart) buffering so far )
|
2007-10-25 21:27:45 +00:00
|
|
|
return value: <0 if the file cannot be opened, or one file already
|
|
|
|
queued to be opened, otherwise the handle for the file in the buffer
|
|
|
|
*/
|
2009-10-16 19:14:41 +00:00
|
|
|
int bufopen(const char *file, size_t offset, enum data_type type,
|
|
|
|
void *user_data)
|
2008-03-29 14:09:14 +00:00
|
|
|
{
|
2011-02-14 08:36:29 +00:00
|
|
|
int handle_id = ERR_BUFFER_FULL;
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t data;
|
|
|
|
struct memory_handle *h;
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
/* No buffer refs until after the mutex_lock call! */
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (type == TYPE_ID3) {
|
2008-04-14 16:17:47 +00:00
|
|
|
/* ID3 case: allocate space, init the handle and return. */
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_lock(&llist_mutex);
|
2008-04-14 16:17:47 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h = add_handle(H_ALLOCALL, sizeof(struct mp3entry), &data);
|
2011-02-14 09:18:58 +00:00
|
|
|
|
|
|
|
if (h) {
|
2011-02-14 08:36:29 +00:00
|
|
|
handle_id = h->id;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h->type = type;
|
|
|
|
strlcpy(h->path, file, MAX_PATH);
|
|
|
|
h->fd = -1;
|
|
|
|
h->data = data;
|
|
|
|
h->ridx = data;
|
|
|
|
h->widx = data;
|
|
|
|
h->filesize = sizeof(struct mp3entry);
|
|
|
|
h->start = 0;
|
|
|
|
h->pos = 0;
|
|
|
|
h->end = 0;
|
2011-03-13 11:56:51 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
link_cur_handle(h);
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
/* Inform the buffering thread that we added a handle */
|
|
|
|
LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
|
|
|
|
queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
|
|
|
|
}
|
2008-12-02 21:07:12 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
|
|
|
return handle_id;
|
2008-04-14 16:17:47 +00:00
|
|
|
}
|
2011-04-27 03:08:23 +00:00
|
|
|
else if (type == TYPE_UNKNOWN)
|
|
|
|
return ERR_UNSUPPORTED_TYPE;
|
2011-02-09 20:27:23 +00:00
|
|
|
#ifdef APPLICATION
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Loading code from memory is not supported in application builds */
|
2011-02-09 20:27:23 +00:00
|
|
|
else if (type == TYPE_CODEC)
|
|
|
|
return ERR_UNSUPPORTED_TYPE;
|
|
|
|
#endif
|
2008-04-14 16:17:47 +00:00
|
|
|
/* Other cases: there is a little more work. */
|
2007-10-25 21:27:45 +00:00
|
|
|
int fd = open(file, O_RDONLY);
|
|
|
|
if (fd < 0)
|
2007-10-29 14:15:59 +00:00
|
|
|
return ERR_FILE_ERROR;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-09 20:13:13 +00:00
|
|
|
size_t size = 0;
|
|
|
|
#ifdef HAVE_ALBUMART
|
2011-02-14 09:18:58 +00:00
|
|
|
if (type == TYPE_BITMAP) {
|
2013-08-26 20:49:53 +00:00
|
|
|
/* If albumart is embedded, the complete file is not buffered,
|
2011-02-09 20:13:13 +00:00
|
|
|
* but only the jpeg part; filesize() would be wrong */
|
2013-08-26 20:49:53 +00:00
|
|
|
struct bufopen_bitmap_data *aa = user_data;
|
2011-02-09 20:13:13 +00:00
|
|
|
if (aa->embedded_albumart)
|
|
|
|
size = aa->embedded_albumart->size;
|
|
|
|
}
|
|
|
|
#endif
|
2013-08-26 20:49:53 +00:00
|
|
|
|
2011-02-09 20:13:13 +00:00
|
|
|
if (size == 0)
|
|
|
|
size = filesize(fd);
|
2013-08-26 20:49:53 +00:00
|
|
|
|
|
|
|
unsigned int hflags = 0;
|
|
|
|
if (type == TYPE_PACKET_AUDIO || type == TYPE_CODEC)
|
|
|
|
hflags = H_CANWRAP;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2008-04-14 16:17:47 +00:00
|
|
|
size_t adjusted_offset = offset;
|
2008-03-28 11:24:24 +00:00
|
|
|
if (adjusted_offset > size)
|
|
|
|
adjusted_offset = 0;
|
2008-01-08 23:48:51 +00:00
|
|
|
|
2010-02-01 17:16:39 +00:00
|
|
|
/* Reserve extra space because alignment can move data forward */
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t padded_size = STORAGE_PAD(size - adjusted_offset);
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
mutex_lock(&llist_mutex);
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h = add_handle(hflags, padded_size, &data);
|
2011-02-14 09:18:58 +00:00
|
|
|
if (!h) {
|
2010-02-18 15:38:30 +00:00
|
|
|
DEBUGF("%s(): failed to add handle\n", __func__);
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
2007-10-25 21:27:45 +00:00
|
|
|
close(fd);
|
2007-10-29 14:15:59 +00:00
|
|
|
return ERR_BUFFER_FULL;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
handle_id = h->id;
|
2013-08-26 20:49:53 +00:00
|
|
|
|
|
|
|
h->type = type;
|
2009-07-14 13:57:45 +00:00
|
|
|
strlcpy(h->path, file, MAX_PATH);
|
2013-08-26 20:49:53 +00:00
|
|
|
h->fd = -1;
|
2010-02-01 17:16:39 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
#ifdef STORAGE_WANTS_ALIGN
|
2010-02-01 17:16:39 +00:00
|
|
|
/* Don't bother to storage align bitmaps because they are not
|
|
|
|
* loaded directly into the buffer.
|
|
|
|
*/
|
2011-02-14 09:18:58 +00:00
|
|
|
if (type != TYPE_BITMAP) {
|
2010-02-01 17:16:39 +00:00
|
|
|
/* Align to desired storage alignment */
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)adjusted_offset -
|
|
|
|
(uintptr_t)ringbuf_ptr(data));
|
|
|
|
data = ringbuf_add(data, alignment_pad);
|
2010-02-01 17:16:39 +00:00
|
|
|
}
|
2011-02-14 08:36:29 +00:00
|
|
|
#endif /* STORAGE_WANTS_ALIGN */
|
2010-02-01 17:16:39 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h->data = data;
|
|
|
|
h->ridx = data;
|
|
|
|
h->start = adjusted_offset;
|
|
|
|
h->pos = adjusted_offset;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2007-11-11 13:28:24 +00:00
|
|
|
#ifdef HAVE_ALBUMART
|
2011-02-14 09:18:58 +00:00
|
|
|
if (type == TYPE_BITMAP) {
|
2007-11-11 12:29:37 +00:00
|
|
|
/* Bitmap file: we load the data instead of the file */
|
2013-08-26 20:49:53 +00:00
|
|
|
int rc = load_image(fd, file, user_data, data);
|
2011-02-14 09:18:58 +00:00
|
|
|
if (rc <= 0) {
|
2011-02-14 08:36:29 +00:00
|
|
|
handle_id = ERR_FILE_ERROR;
|
2011-02-14 09:18:58 +00:00
|
|
|
} else {
|
2013-08-26 20:49:53 +00:00
|
|
|
data = ringbuf_add(data, rc);
|
|
|
|
size = rc;
|
|
|
|
adjusted_offset = rc;
|
2007-11-12 15:16:41 +00:00
|
|
|
}
|
2007-11-11 12:29:37 +00:00
|
|
|
}
|
2007-11-12 15:16:41 +00:00
|
|
|
else
|
2007-11-11 12:29:37 +00:00
|
|
|
#endif
|
2013-08-26 20:49:53 +00:00
|
|
|
if (type == TYPE_CUESHEET) {
|
|
|
|
h->fd = fd;
|
|
|
|
}
|
2011-02-14 08:36:29 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (handle_id >= 0) {
|
|
|
|
h->widx = data;
|
2007-11-12 15:16:41 +00:00
|
|
|
h->filesize = size;
|
2013-08-26 20:49:53 +00:00
|
|
|
h->end = adjusted_offset;
|
|
|
|
link_cur_handle(h);
|
2007-11-12 15:16:41 +00:00
|
|
|
}
|
2007-11-11 12:29:37 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (type == TYPE_CUESHEET) {
|
2007-10-27 04:49:04 +00:00
|
|
|
/* Immediately start buffering those */
|
2011-02-14 08:36:29 +00:00
|
|
|
LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", handle_id);
|
|
|
|
queue_send(&buffering_queue, Q_BUFFER_HANDLE, handle_id);
|
2011-02-14 09:18:58 +00:00
|
|
|
} else {
|
2007-10-27 04:49:04 +00:00
|
|
|
/* Other types will get buffered in the course of normal operations */
|
|
|
|
close(fd);
|
2008-02-12 23:15:59 +00:00
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (handle_id >= 0) {
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Inform the buffering thread that we added a handle */
|
|
|
|
LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", handle_id);
|
|
|
|
queue_post(&buffering_queue, Q_HANDLE_ADDED, handle_id);
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
logf("bufopen: new hdl %d", handle_id);
|
|
|
|
return handle_id;
|
2013-08-26 20:49:53 +00:00
|
|
|
|
|
|
|
/* Currently only used for aa loading */
|
|
|
|
(void)user_data;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open a new handle from data that needs to be copied from memory.
|
|
|
|
src is the source buffer from which to copy data. It can be NULL to simply
|
|
|
|
reserve buffer space.
|
|
|
|
size is the requested size. The call will only be successful if the
|
|
|
|
requested amount of data can entirely fit in the buffer without wrapping.
|
|
|
|
Return value is the handle id for success or <0 for failure.
|
|
|
|
*/
|
2008-03-28 12:51:33 +00:00
|
|
|
int bufalloc(const void *src, size_t size, enum data_type type)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-04-27 03:08:23 +00:00
|
|
|
if (type == TYPE_UNKNOWN)
|
|
|
|
return ERR_UNSUPPORTED_TYPE;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
int handle_id = ERR_BUFFER_FULL;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_lock(&llist_mutex);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t data;
|
|
|
|
struct memory_handle *h = add_handle(H_ALLOCALL, size, &data);
|
2011-02-14 08:36:29 +00:00
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
if (h) {
|
2011-02-14 08:36:29 +00:00
|
|
|
handle_id = h->id;
|
|
|
|
|
|
|
|
if (src) {
|
|
|
|
if (type == TYPE_ID3 && size == sizeof(struct mp3entry)) {
|
|
|
|
/* specially take care of struct mp3entry */
|
2013-08-26 20:49:53 +00:00
|
|
|
copy_mp3entry(ringbuf_ptr(data), src);
|
2011-02-14 08:36:29 +00:00
|
|
|
} else {
|
2013-08-26 20:49:53 +00:00
|
|
|
memcpy(ringbuf_ptr(data), src, size);
|
2011-02-14 08:36:29 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
2013-06-30 02:18:17 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h->type = type;
|
|
|
|
h->path[0] = '\0';
|
|
|
|
h->fd = -1;
|
|
|
|
h->data = data;
|
|
|
|
h->ridx = data;
|
|
|
|
h->widx = ringbuf_add(data, size);
|
|
|
|
h->filesize = size;
|
|
|
|
h->start = 0;
|
|
|
|
h->pos = 0;
|
|
|
|
h->end = size;
|
|
|
|
|
|
|
|
link_cur_handle(h);
|
2011-02-14 08:36:29 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
mutex_unlock(&llist_mutex);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
logf("bufalloc: new hdl %d", handle_id);
|
|
|
|
return handle_id;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Close the handle. Return true for success and false for failure */
|
2008-03-28 12:51:33 +00:00
|
|
|
bool bufclose(int handle_id)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
|
|
|
logf("bufclose(%d)", handle_id);
|
2011-04-27 03:08:23 +00:00
|
|
|
#if 0
|
|
|
|
/* Don't interrupt the buffering thread if the handle is already
|
|
|
|
stale */
|
|
|
|
if (!find_handle(handle_id)) {
|
|
|
|
logf(" handle already closed");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
2007-10-27 01:25:47 +00:00
|
|
|
LOGFQUEUE("buffering >| Q_CLOSE_HANDLE %d", handle_id);
|
2007-10-25 21:27:45 +00:00
|
|
|
return queue_send(&buffering_queue, Q_CLOSE_HANDLE, handle_id);
|
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Backend to bufseek and bufadvance. Call only in response to
|
|
|
|
Q_REBUFFER_HANDLE! */
|
2013-08-26 20:49:53 +00:00
|
|
|
static void rebuffer_handle(int handle_id, off_t newpos)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
2011-02-14 09:18:58 +00:00
|
|
|
if (!h) {
|
2011-02-14 08:36:29 +00:00
|
|
|
queue_reply(&buffering_queue, ERR_HANDLE_NOT_FOUND);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Check that we still need to do this since the request could have
|
|
|
|
possibly been met by this time */
|
|
|
|
if (newpos >= h->start && newpos <= h->end) {
|
|
|
|
h->ridx = ringbuf_add(h->data, newpos - h->start);
|
|
|
|
h->pos = newpos;
|
|
|
|
queue_reply(&buffering_queue, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* When seeking foward off of the buffer, if it is a short seek attempt to
|
|
|
|
avoid rebuffering the whole track, just read enough to satisfy */
|
2013-08-26 20:49:53 +00:00
|
|
|
off_t amount = newpos - h->pos;
|
|
|
|
|
|
|
|
if (amount > 0 && amount <= BUFFERING_DEFAULT_FILECHUNK) {
|
|
|
|
h->ridx = ringbuf_add(h->data, newpos - h->start);
|
|
|
|
h->pos = newpos;
|
|
|
|
|
|
|
|
if (buffer_handle(handle_id, amount + 1) && h->end >= h->pos) {
|
|
|
|
/* It really did succeed */
|
|
|
|
queue_reply(&buffering_queue, 0);
|
|
|
|
buffer_handle(handle_id, 0); /* Ok, try the rest */
|
|
|
|
return;
|
2011-02-14 08:36:29 +00:00
|
|
|
}
|
2011-03-02 06:24:50 +00:00
|
|
|
/* Data collision or other file error - must reset */
|
|
|
|
|
|
|
|
if (newpos > h->filesize)
|
|
|
|
newpos = h->filesize; /* file truncation happened above */
|
2011-02-14 08:36:29 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t next = ringbuf_offset(h->next ?: first_handle);
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
#ifdef STORAGE_WANTS_ALIGN
|
|
|
|
/* Strip alignment padding then redo */
|
|
|
|
size_t new_index = ringbuf_add(ringbuf_offset(h), sizeof (*h));
|
|
|
|
|
2011-02-14 09:18:58 +00:00
|
|
|
/* Align to desired storage alignment if space permits - handle could
|
|
|
|
have been shrunken too close to the following one after a previous
|
|
|
|
rebuffer. */
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t alignment_pad = STORAGE_OVERLAP((uintptr_t)newpos -
|
|
|
|
(uintptr_t)ringbuf_ptr(new_index));
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
if (ringbuf_add_cross(new_index, alignment_pad, next) >= 0)
|
|
|
|
alignment_pad = 0; /* Forego storage alignment this time */
|
|
|
|
|
|
|
|
new_index = ringbuf_add(new_index, alignment_pad);
|
|
|
|
#else
|
|
|
|
/* Just clear the data buffer */
|
|
|
|
size_t new_index = h->data;
|
|
|
|
#endif /* STORAGE_WANTS_ALIGN */
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* Reset the handle to its new position */
|
2011-02-14 08:36:29 +00:00
|
|
|
h->ridx = h->widx = h->data = new_index;
|
2013-08-26 20:49:53 +00:00
|
|
|
h->start = h->pos = h->end = newpos;
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
if (h->fd >= 0)
|
2013-08-26 20:49:53 +00:00
|
|
|
lseek(h->fd, newpos, SEEK_SET);
|
2011-02-14 08:36:29 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
off_t filerem = h->filesize - newpos;
|
|
|
|
if (h->next && ringbuf_add_cross(new_index, filerem, next) > 0) {
|
2011-02-14 08:36:29 +00:00
|
|
|
/* There isn't enough space to rebuffer all of the track from its new
|
|
|
|
offset, so we ask the user to free some */
|
|
|
|
DEBUGF("%s(): space is needed\n", __func__);
|
2013-08-26 20:49:53 +00:00
|
|
|
send_event(BUFFER_EVENT_REBUFFER, &(int){ handle_id });
|
2011-02-14 08:36:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now we do the rebuffer */
|
|
|
|
queue_reply(&buffering_queue, 0);
|
|
|
|
buffer_handle(handle_id, 0);
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Backend to bufseek and bufadvance */
|
2013-08-26 20:49:53 +00:00
|
|
|
static int seek_handle(struct memory_handle *h, off_t newpos)
|
2011-02-14 08:36:29 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
if ((newpos < h->start || newpos >= h->end) &&
|
|
|
|
(newpos < h->filesize || h->end < h->filesize)) {
|
2011-02-14 08:36:29 +00:00
|
|
|
/* access before or after buffered data and not to end of file or file
|
|
|
|
is not buffered to the end-- a rebuffer is needed. */
|
|
|
|
return queue_send(&buffering_queue, Q_REBUFFER_HANDLE,
|
2013-08-26 20:49:53 +00:00
|
|
|
(intptr_t)&(struct buf_message_data){ h->id, newpos });
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
else {
|
2013-08-26 20:49:53 +00:00
|
|
|
h->ridx = ringbuf_add(h->data, newpos - h->start);
|
|
|
|
h->pos = newpos;
|
|
|
|
return 0;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
/* Set reading index in handle (relatively to the start of the file).
|
|
|
|
Access before the available data will trigger a rebuffer.
|
2011-04-27 03:08:23 +00:00
|
|
|
Return 0 for success and for failure:
|
|
|
|
ERR_HANDLE_NOT_FOUND if the handle wasn't found
|
|
|
|
ERR_INVALID_VALUE if the new requested position was beyond the end of
|
|
|
|
the file
|
2011-02-14 08:36:29 +00:00
|
|
|
*/
|
|
|
|
int bufseek(int handle_id, size_t newpos)
|
|
|
|
{
|
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (newpos > (size_t)h->filesize)
|
|
|
|
return ERR_INVALID_VALUE;
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
return seek_handle(h, newpos);
|
|
|
|
}
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/* Advance the reading index in a handle (relatively to its current position).
|
2011-04-27 03:08:23 +00:00
|
|
|
Return 0 for success and for failure:
|
|
|
|
ERR_HANDLE_NOT_FOUND if the handle wasn't found
|
2013-08-26 20:49:53 +00:00
|
|
|
ERR_INVALID_VALUE if the new requested position was before the beginning
|
|
|
|
or beyond the end of the file
|
2011-04-27 03:08:23 +00:00
|
|
|
*/
|
2008-03-28 12:51:33 +00:00
|
|
|
int bufadvance(int handle_id, off_t offset)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-02-14 08:36:29 +00:00
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
2007-10-25 21:27:45 +00:00
|
|
|
if (!h)
|
2007-10-29 14:15:59 +00:00
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
off_t pos = h->pos;
|
|
|
|
|
|
|
|
if ((offset < 0 && offset < -pos) ||
|
|
|
|
(offset >= 0 && offset > h->filesize - pos))
|
|
|
|
return ERR_INVALID_VALUE;
|
|
|
|
|
|
|
|
return seek_handle(h, pos + offset);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
/* Get the read position from the start of the file
|
|
|
|
Returns the offset from byte 0 of the file and for failure:
|
|
|
|
ERR_HANDLE_NOT_FOUND if the handle wasn't found
|
|
|
|
*/
|
|
|
|
off_t bufftell(int handle_id)
|
|
|
|
{
|
|
|
|
const struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
2013-08-26 20:49:53 +00:00
|
|
|
|
|
|
|
return h->pos;
|
2011-04-27 03:08:23 +00:00
|
|
|
}
|
|
|
|
|
2007-11-06 16:49:30 +00:00
|
|
|
/* Used by bufread and bufgetdata to prepare the buffer and retrieve the
|
2013-08-26 20:49:53 +00:00
|
|
|
* actual amount of data available for reading. It does range checks on
|
|
|
|
* size and returns a valid (and explicit) amount of data for reading */
|
2008-03-28 12:51:33 +00:00
|
|
|
static struct memory_handle *prep_bufdata(int handle_id, size_t *size,
|
|
|
|
bool guardbuf_limit)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2007-11-08 15:34:23 +00:00
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return NULL;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->pos >= h->filesize) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* File is finished reading */
|
2007-11-08 18:27:19 +00:00
|
|
|
*size = 0;
|
|
|
|
return h;
|
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
off_t realsize = *size;
|
|
|
|
off_t filerem = h->filesize - h->pos;
|
2011-02-14 08:36:29 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (realsize <= 0 || realsize > filerem)
|
|
|
|
realsize = filerem; /* clip to eof */
|
2007-11-06 16:49:30 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (guardbuf_limit && realsize > GUARD_BUFSIZE) {
|
2007-11-08 15:52:10 +00:00
|
|
|
logf("data request > guardbuf");
|
|
|
|
/* If more than the size of the guardbuf is requested and this is a
|
|
|
|
* bufgetdata, limit to guard_bufsize over the end of the buffer */
|
2013-08-26 20:49:53 +00:00
|
|
|
realsize = MIN((size_t)realsize, buffer_len - h->ridx + GUARD_BUFSIZE);
|
2007-11-08 16:12:28 +00:00
|
|
|
/* this ensures *size <= buffer_len - h->ridx + GUARD_BUFSIZE */
|
2007-11-06 16:49:30 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
off_t end = h->end;
|
|
|
|
off_t wait_end = h->pos + realsize;
|
|
|
|
|
|
|
|
if (end < wait_end && end < h->filesize) {
|
2007-11-02 19:13:03 +00:00
|
|
|
/* Wait for the data to be ready */
|
2013-08-26 20:49:53 +00:00
|
|
|
unsigned int request = 1;
|
|
|
|
|
2007-11-02 19:13:03 +00:00
|
|
|
do
|
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
if (--request == 0) {
|
|
|
|
request = 100;
|
|
|
|
/* Data (still) isn't ready; ping buffering thread */
|
|
|
|
LOGFQUEUE("buffering >| Q_START_FILL %d",handle_id);
|
|
|
|
queue_send(&buffering_queue, Q_START_FILL, handle_id);
|
|
|
|
}
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
sleep(0);
|
2007-11-08 16:06:24 +00:00
|
|
|
/* it is not safe for a non-buffering thread to sleep while
|
|
|
|
* holding a handle */
|
2007-11-08 15:34:23 +00:00
|
|
|
h = find_handle(handle_id);
|
2013-08-26 20:49:53 +00:00
|
|
|
if (!h)
|
2007-11-08 16:06:24 +00:00
|
|
|
return NULL;
|
2013-08-26 20:49:53 +00:00
|
|
|
|
|
|
|
if (h->signaled != 0)
|
|
|
|
return NULL; /* Wait must be abandoned */
|
|
|
|
|
|
|
|
end = h->end;
|
2007-11-02 19:13:03 +00:00
|
|
|
}
|
2013-08-26 20:49:53 +00:00
|
|
|
while (end < wait_end && end < h->filesize);
|
|
|
|
|
|
|
|
filerem = h->filesize - h->pos;
|
|
|
|
if (realsize > filerem)
|
|
|
|
realsize = filerem;
|
2007-11-02 19:13:03 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
*size = realsize;
|
2007-11-08 15:34:23 +00:00
|
|
|
return h;
|
2007-11-06 16:49:30 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
|
|
|
|
/* Note: It is safe for the thread responsible for handling the rebuffer
|
|
|
|
* cleanup request to call bufread or bufgetdata only when the data will
|
|
|
|
* be available-- not if it could be blocked waiting for it in prep_bufdata.
|
|
|
|
* It should be apparent that if said thread is being forced to wait for
|
|
|
|
* buffering but has not yet responded to the cleanup request, the space
|
|
|
|
* can never be cleared to allow further reading of the file because it is
|
|
|
|
* not listening to callbacks any longer. */
|
|
|
|
|
2007-11-06 16:49:30 +00:00
|
|
|
/* Copy data from the given handle to the dest buffer.
|
|
|
|
Return the number of bytes copied or < 0 for failure (handle not found).
|
|
|
|
The caller is blocked until the requested amount of data is available.
|
|
|
|
*/
|
2008-03-28 12:51:33 +00:00
|
|
|
ssize_t bufread(int handle_id, size_t size, void *dest)
|
2007-11-06 16:49:30 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
const struct memory_handle *h =
|
|
|
|
prep_bufdata(handle_id, &size, false);
|
2007-11-06 16:49:30 +00:00
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->ridx + size > buffer_len) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* the data wraps around the end of the buffer */
|
|
|
|
size_t read = buffer_len - h->ridx;
|
2013-08-26 20:49:53 +00:00
|
|
|
memcpy(dest, ringbuf_ptr(h->ridx), read);
|
|
|
|
memcpy(dest + read, ringbuf_ptr(0), size - read);
|
2011-02-14 09:18:58 +00:00
|
|
|
} else {
|
2013-08-26 20:49:53 +00:00
|
|
|
memcpy(dest, ringbuf_ptr(h->ridx), size);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
return size;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the "data" pointer to make the handle's data available to the caller.
|
2007-11-02 19:13:03 +00:00
|
|
|
Return the length of the available linear data or < 0 for failure (handle
|
|
|
|
not found).
|
|
|
|
The caller is blocked until the requested amount of data is available.
|
2007-10-25 21:27:45 +00:00
|
|
|
size is the amount of linear data requested. it can be 0 to get as
|
|
|
|
much as possible.
|
2007-11-02 19:13:03 +00:00
|
|
|
The guard buffer may be used to provide the requested size. This means it's
|
|
|
|
unsafe to request more than the size of the guard buffer.
|
|
|
|
*/
|
2008-03-28 12:51:33 +00:00
|
|
|
ssize_t bufgetdata(int handle_id, size_t size, void **data)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *h =
|
|
|
|
prep_bufdata(handle_id, &size, true);
|
2007-10-25 21:27:45 +00:00
|
|
|
if (!h)
|
2007-10-29 14:15:59 +00:00
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->ridx + size > buffer_len) {
|
2007-10-25 21:27:45 +00:00
|
|
|
/* the data wraps around the end of the buffer :
|
|
|
|
use the guard buffer to provide the requested amount of data. */
|
2013-08-26 20:49:53 +00:00
|
|
|
size_t copy_n = h->ridx + size - buffer_len;
|
2011-02-14 09:18:58 +00:00
|
|
|
/* prep_bufdata ensures
|
|
|
|
adjusted_size <= buffer_len - h->ridx + GUARD_BUFSIZE,
|
2007-11-08 16:12:28 +00:00
|
|
|
so copy_n <= GUARD_BUFSIZE */
|
2013-08-26 20:49:53 +00:00
|
|
|
memcpy(guard_buffer, ringbuf_ptr(0), copy_n);
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2007-12-16 01:38:56 +00:00
|
|
|
if (data)
|
2013-08-26 20:49:53 +00:00
|
|
|
*data = ringbuf_ptr(h->ridx);
|
2007-12-16 01:38:56 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
return size;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2008-03-28 12:51:33 +00:00
|
|
|
ssize_t bufgettail(int handle_id, size_t size, void **data)
|
2007-11-28 04:58:16 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
if (thread_self() != buffering_thread_id)
|
|
|
|
return ERR_WRONG_THREAD; /* only from buffering thread */
|
2007-12-16 01:38:56 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
/* We don't support tail requests of > guardbuf_size, for simplicity */
|
|
|
|
if (size > GUARD_BUFSIZE)
|
|
|
|
return ERR_INVALID_VALUE;
|
2007-11-28 04:58:16 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
const struct memory_handle *h = find_handle(handle_id);
|
2007-11-28 04:58:16 +00:00
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->end >= h->filesize) {
|
|
|
|
size_t tidx = ringbuf_sub(h->widx, size);
|
2007-11-28 04:58:16 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (tidx + size > buffer_len) {
|
|
|
|
size_t copy_n = tidx + size - buffer_len;
|
|
|
|
memcpy(guard_buffer, ringbuf_ptr(0), copy_n);
|
|
|
|
}
|
2007-11-28 04:58:16 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
*data = ringbuf_ptr(tidx);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size = ERR_HANDLE_NOT_DONE;
|
2007-11-28 04:58:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2008-03-28 12:51:33 +00:00
|
|
|
ssize_t bufcuttail(int handle_id, size_t size)
|
2007-11-28 04:58:16 +00:00
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
if (thread_self() != buffering_thread_id)
|
|
|
|
return ERR_WRONG_THREAD; /* only from buffering thread */
|
2007-11-28 04:58:16 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
2007-11-28 04:58:16 +00:00
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (h->end >= h->filesize) {
|
|
|
|
/* Cannot trim to before read position */
|
|
|
|
size_t available = h->end - MAX(h->start, h->pos);
|
|
|
|
if (available < size)
|
|
|
|
size = available;
|
2007-11-28 04:58:16 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
h->widx = ringbuf_sub(h->widx, size);
|
|
|
|
h->filesize -= size;
|
|
|
|
h->end -= size;
|
|
|
|
} else {
|
|
|
|
size = ERR_HANDLE_NOT_DONE;
|
|
|
|
}
|
2008-03-26 08:57:25 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
return size;
|
2007-11-28 04:58:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/*
|
|
|
|
SECONDARY EXPORTED FUNCTIONS
|
|
|
|
============================
|
|
|
|
|
|
|
|
buf_handle_offset
|
|
|
|
buf_set_base_handle
|
2011-04-27 03:08:23 +00:00
|
|
|
buf_handle_data_type
|
|
|
|
buf_is_handle
|
|
|
|
buf_pin_handle
|
|
|
|
buf_signal_handle
|
|
|
|
buf_length
|
2007-10-25 21:27:45 +00:00
|
|
|
buf_used
|
2011-04-27 03:08:23 +00:00
|
|
|
buf_set_watermark
|
|
|
|
buf_get_watermark
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
These functions are exported, to allow interaction with the buffer.
|
|
|
|
They take care of the content of the structs, and rely on the linked list
|
|
|
|
management functions for all the actual handle management work.
|
|
|
|
*/
|
|
|
|
|
2008-03-28 12:51:33 +00:00
|
|
|
ssize_t buf_handle_offset(int handle_id)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2007-10-27 01:37:33 +00:00
|
|
|
const struct memory_handle *h = find_handle(handle_id);
|
2007-10-25 21:27:45 +00:00
|
|
|
if (!h)
|
2007-10-29 14:15:59 +00:00
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
2013-08-26 20:49:53 +00:00
|
|
|
return h->start;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2008-03-28 12:51:33 +00:00
|
|
|
void buf_set_base_handle(int handle_id)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-04-27 03:08:23 +00:00
|
|
|
mutex_lock(&llist_mutex);
|
|
|
|
base_handle_id = handle_id;
|
|
|
|
mutex_unlock(&llist_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum data_type buf_handle_data_type(int handle_id)
|
|
|
|
{
|
|
|
|
const struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return TYPE_UNKNOWN;
|
|
|
|
return h->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t buf_handle_remaining(int handle_id)
|
|
|
|
{
|
|
|
|
const struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return ERR_HANDLE_NOT_FOUND;
|
2013-08-26 20:49:53 +00:00
|
|
|
return h->filesize - h->end;
|
2011-04-27 03:08:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool buf_is_handle(int handle_id)
|
|
|
|
{
|
|
|
|
return find_handle(handle_id) != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool buf_pin_handle(int handle_id, bool pin)
|
|
|
|
{
|
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pin) {
|
|
|
|
h->pinned++;
|
|
|
|
} else if (h->pinned > 0) {
|
|
|
|
h->pinned--;
|
|
|
|
}
|
|
|
|
|
2013-06-30 02:18:17 +00:00
|
|
|
return true;
|
2011-04-27 03:08:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool buf_signal_handle(int handle_id, bool signal)
|
|
|
|
{
|
|
|
|
struct memory_handle *h = find_handle(handle_id);
|
|
|
|
if (!h)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
h->signaled = signal ? 1 : 0;
|
2013-06-30 02:18:17 +00:00
|
|
|
return true;
|
2011-04-27 03:08:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the size of the ringbuffer */
|
|
|
|
size_t buf_length(void)
|
|
|
|
{
|
|
|
|
return buffer_len;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the amount of buffer space used */
|
|
|
|
size_t buf_used(void)
|
|
|
|
{
|
2013-08-26 20:49:53 +00:00
|
|
|
struct memory_handle *first = first_handle;
|
|
|
|
if (!first)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ringbuf_sub(cur_handle->widx, ringbuf_offset(first));
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2008-03-28 12:51:33 +00:00
|
|
|
void buf_set_watermark(size_t bytes)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2009-01-10 21:10:56 +00:00
|
|
|
conf_watermark = bytes;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
size_t buf_get_watermark(void)
|
|
|
|
{
|
|
|
|
return BUF_WATERMARK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** -- buffer thread helpers -- **/
|
2008-03-29 14:09:14 +00:00
|
|
|
static void shrink_buffer_inner(struct memory_handle *h)
|
2008-03-26 08:57:25 +00:00
|
|
|
{
|
2007-11-03 02:54:34 +00:00
|
|
|
if (h == NULL)
|
|
|
|
return;
|
|
|
|
|
2007-11-05 15:24:21 +00:00
|
|
|
shrink_buffer_inner(h->next);
|
2007-11-03 02:54:34 +00:00
|
|
|
|
|
|
|
shrink_handle(h);
|
2007-10-29 16:48:16 +00:00
|
|
|
}
|
|
|
|
|
2008-03-29 14:09:14 +00:00
|
|
|
static void shrink_buffer(void)
|
2008-03-26 08:57:25 +00:00
|
|
|
{
|
2007-11-05 15:24:21 +00:00
|
|
|
logf("shrink_buffer()");
|
|
|
|
shrink_buffer_inner(first_handle);
|
|
|
|
}
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
static void NORETURN_ATTR buffering_thread(void)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2007-11-03 06:21:32 +00:00
|
|
|
bool filling = false;
|
2007-10-25 21:27:45 +00:00
|
|
|
struct queue_event ev;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
2011-04-27 03:08:23 +00:00
|
|
|
if (num_handles > 0) {
|
|
|
|
if (!filling) {
|
|
|
|
cancel_cpu_boost();
|
|
|
|
}
|
|
|
|
queue_wait_w_tmo(&buffering_queue, &ev, filling ? 1 : HZ/2);
|
|
|
|
} else {
|
|
|
|
filling = false;
|
2008-03-29 17:28:30 +00:00
|
|
|
cancel_cpu_boost();
|
2011-04-27 03:08:23 +00:00
|
|
|
queue_wait(&buffering_queue, &ev);
|
2008-03-29 17:40:04 +00:00
|
|
|
}
|
2008-03-29 17:28:30 +00:00
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
switch (ev.id)
|
|
|
|
{
|
2007-11-05 17:51:55 +00:00
|
|
|
case Q_START_FILL:
|
2008-07-18 23:42:47 +00:00
|
|
|
LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
|
2007-11-05 15:24:21 +00:00
|
|
|
shrink_buffer();
|
2007-11-05 03:11:58 +00:00
|
|
|
queue_reply(&buffering_queue, 1);
|
2011-05-09 21:19:11 +00:00
|
|
|
if (buffer_handle((int)ev.data, 0)) {
|
|
|
|
filling = true;
|
|
|
|
}
|
|
|
|
else if (num_handles > 0 && conf_watermark > 0) {
|
|
|
|
update_data_counters(NULL);
|
|
|
|
if (data_counters.useful >= BUF_WATERMARK) {
|
|
|
|
send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
|
|
|
|
}
|
|
|
|
}
|
2007-11-05 03:11:58 +00:00
|
|
|
break;
|
2007-11-05 15:24:21 +00:00
|
|
|
|
2007-11-05 03:11:58 +00:00
|
|
|
case Q_BUFFER_HANDLE:
|
2008-07-18 23:42:47 +00:00
|
|
|
LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
|
2007-11-05 03:11:58 +00:00
|
|
|
queue_reply(&buffering_queue, 1);
|
2011-02-14 08:36:29 +00:00
|
|
|
buffer_handle((int)ev.data, 0);
|
2007-10-25 21:27:45 +00:00
|
|
|
break;
|
|
|
|
|
2011-02-14 08:36:29 +00:00
|
|
|
case Q_REBUFFER_HANDLE:
|
2013-08-26 20:49:53 +00:00
|
|
|
{
|
|
|
|
struct buf_message_data *parm =
|
|
|
|
(struct buf_message_data *)ev.data;
|
2011-02-14 08:36:29 +00:00
|
|
|
LOGFQUEUE("buffering < Q_REBUFFER_HANDLE %d %ld",
|
|
|
|
parm->handle_id, parm->data);
|
|
|
|
rebuffer_handle(parm->handle_id, parm->data);
|
2007-10-25 21:27:45 +00:00
|
|
|
break;
|
2013-08-26 20:49:53 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
case Q_CLOSE_HANDLE:
|
2008-07-18 23:42:47 +00:00
|
|
|
LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
|
2007-10-25 21:27:45 +00:00
|
|
|
queue_reply(&buffering_queue, close_handle((int)ev.data));
|
|
|
|
break;
|
|
|
|
|
2008-02-12 23:15:59 +00:00
|
|
|
case Q_HANDLE_ADDED:
|
|
|
|
LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
|
|
|
|
/* A handle was added: the disk is spinning, so we can fill */
|
|
|
|
filling = true;
|
|
|
|
break;
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
case SYS_TIMEOUT:
|
|
|
|
LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
if (num_handles == 0 || !queue_empty(&buffering_queue))
|
|
|
|
continue;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
update_data_counters(NULL);
|
2007-11-04 00:23:22 +00:00
|
|
|
#if 0
|
|
|
|
/* TODO: This needs to be fixed to use the idle callback, disable it
|
|
|
|
* for simplicity until its done right */
|
2011-02-02 17:43:32 +00:00
|
|
|
#if MEMORYSIZE > 8
|
2007-10-25 21:27:45 +00:00
|
|
|
/* If the disk is spinning, take advantage by filling the buffer */
|
2011-04-27 03:08:23 +00:00
|
|
|
else if (storage_disk_is_active()) {
|
2007-11-03 03:46:22 +00:00
|
|
|
if (num_handles > 0 && data_counters.useful <= high_watermark)
|
2008-10-16 10:38:03 +00:00
|
|
|
send_event(BUFFER_EVENT_BUFFER_LOW, 0);
|
2007-11-03 06:21:32 +00:00
|
|
|
|
2013-08-26 20:49:53 +00:00
|
|
|
if (data_counters.remaining > 0 && buf_used() <= high_watermark) {
|
2007-11-03 17:55:45 +00:00
|
|
|
/* This is a new fill, shrink the buffer up first */
|
|
|
|
if (!filling)
|
2007-11-05 15:24:21 +00:00
|
|
|
shrink_buffer();
|
2007-11-03 06:21:32 +00:00
|
|
|
filling = fill_buffer();
|
2011-02-14 08:36:29 +00:00
|
|
|
update_data_counters(NULL);
|
2007-10-30 17:24:31 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
2007-11-04 00:23:22 +00:00
|
|
|
#endif
|
2007-10-25 21:27:45 +00:00
|
|
|
#endif
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
if (filling) {
|
2011-05-09 21:19:11 +00:00
|
|
|
filling = data_counters.remaining > 0 ? fill_buffer() : false;
|
2011-04-27 03:08:23 +00:00
|
|
|
} else if (ev.id == SYS_TIMEOUT) {
|
|
|
|
if (data_counters.useful < BUF_WATERMARK) {
|
|
|
|
/* The buffer is low and we're idle, just watching the levels
|
|
|
|
- call the callbacks to get new data */
|
|
|
|
send_event(BUFFER_EVENT_BUFFER_LOW, NULL);
|
|
|
|
|
|
|
|
/* Continue anything else we haven't finished - it might
|
|
|
|
get booted off or stop early because the receiver hasn't
|
|
|
|
had a chance to clear anything yet */
|
|
|
|
if (data_counters.remaining > 0) {
|
2007-11-05 15:24:21 +00:00
|
|
|
shrink_buffer();
|
2007-11-03 17:55:45 +00:00
|
|
|
filling = fill_buffer();
|
2007-11-05 03:11:58 +00:00
|
|
|
}
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-30 02:19:59 +00:00
|
|
|
void INIT_ATTR buffering_init(void)
|
2008-03-26 08:57:25 +00:00
|
|
|
{
|
2007-10-27 04:16:41 +00:00
|
|
|
mutex_init(&llist_mutex);
|
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
/* Thread should absolutely not respond to USB because if it waits first,
|
|
|
|
then it cannot properly service the handles and leaks will happen -
|
|
|
|
this is a worker thread and shouldn't need to care about any system
|
|
|
|
notifications.
|
|
|
|
***
|
|
|
|
Whoever is using buffering should be responsible enough to clear all
|
|
|
|
the handles at the right time. */
|
|
|
|
queue_init(&buffering_queue, false);
|
2008-12-10 08:57:10 +00:00
|
|
|
buffering_thread_id = create_thread( buffering_thread, buffering_stack,
|
2007-10-27 04:29:46 +00:00
|
|
|
sizeof(buffering_stack), CREATE_THREAD_FROZEN,
|
2008-03-29 23:21:19 +00:00
|
|
|
buffering_thread_name IF_PRIO(, PRIORITY_BUFFERING)
|
2007-10-27 04:16:41 +00:00
|
|
|
IF_COP(, CPU));
|
2008-03-25 02:34:12 +00:00
|
|
|
|
|
|
|
queue_enable_queue_send(&buffering_queue, &buffering_queue_sender_list,
|
2008-12-10 08:57:10 +00:00
|
|
|
buffering_thread_id);
|
2007-10-27 04:16:41 +00:00
|
|
|
}
|
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
/* Initialise the buffering subsystem */
|
2008-03-28 12:51:33 +00:00
|
|
|
bool buffering_reset(char *buf, size_t buflen)
|
2007-10-25 21:27:45 +00:00
|
|
|
{
|
2011-02-09 09:30:09 +00:00
|
|
|
/* Wraps of storage-aligned data must also be storage aligned,
|
|
|
|
thus buf and buflen must be a aligned to an integer multiple of
|
|
|
|
the storage alignment */
|
2011-04-27 03:08:23 +00:00
|
|
|
|
2012-05-21 06:18:46 +00:00
|
|
|
if (buf) {
|
|
|
|
buflen -= MIN(buflen, GUARD_BUFSIZE);
|
2013-06-30 02:18:17 +00:00
|
|
|
|
2012-05-21 06:18:46 +00:00
|
|
|
STORAGE_ALIGN_BUFFER(buf, buflen);
|
2011-04-27 03:08:23 +00:00
|
|
|
|
2012-05-21 06:18:46 +00:00
|
|
|
if (!buf || !buflen)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
buflen = 0;
|
|
|
|
}
|
2011-02-09 09:30:09 +00:00
|
|
|
|
2012-05-21 06:18:46 +00:00
|
|
|
send_event(BUFFER_EVENT_BUFFER_RESET, NULL);
|
|
|
|
|
|
|
|
/* If handles weren't closed above, just do it */
|
|
|
|
while (num_handles != 0)
|
|
|
|
bufclose(first_handle->id);
|
2007-10-25 21:27:45 +00:00
|
|
|
|
|
|
|
buffer = buf;
|
2010-06-23 04:34:18 +00:00
|
|
|
buffer_len = buflen;
|
2007-10-25 21:27:45 +00:00
|
|
|
guard_buffer = buf + buflen;
|
|
|
|
|
|
|
|
first_handle = NULL;
|
2007-10-27 01:25:47 +00:00
|
|
|
cur_handle = NULL;
|
|
|
|
cached_handle = NULL;
|
2007-10-25 21:27:45 +00:00
|
|
|
num_handles = 0;
|
2007-11-04 19:01:02 +00:00
|
|
|
base_handle_id = -1;
|
2007-10-25 21:27:45 +00:00
|
|
|
|
2011-04-27 03:08:23 +00:00
|
|
|
/* Set the high watermark as 75% full...or 25% empty :)
|
|
|
|
This is the greatest fullness that will trigger low-buffer events
|
|
|
|
no matter what the setting because high-bitrate files can have
|
|
|
|
ludicrous margins that even exceed the buffer size - most common
|
|
|
|
with a huge anti-skip buffer but even without that setting,
|
|
|
|
staying constantly active in buffering is pointless */
|
2007-10-25 21:27:45 +00:00
|
|
|
high_watermark = 3*buflen / 4;
|
|
|
|
|
2008-12-10 08:57:10 +00:00
|
|
|
thread_thaw(buffering_thread_id);
|
2007-10-27 04:29:46 +00:00
|
|
|
|
2007-10-25 21:27:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void buffering_get_debugdata(struct buffering_debug *dbgdata)
|
|
|
|
{
|
2011-02-14 08:36:29 +00:00
|
|
|
struct data_counters dc;
|
2013-08-26 20:49:53 +00:00
|
|
|
dbgdata->num_handles = update_data_counters(&dc);
|
2011-02-14 08:36:29 +00:00
|
|
|
dbgdata->data_rem = dc.remaining;
|
|
|
|
dbgdata->buffered_data = dc.buffered;
|
|
|
|
dbgdata->useful_data = dc.useful;
|
2011-04-27 03:08:23 +00:00
|
|
|
dbgdata->watermark = BUF_WATERMARK;
|
2007-10-25 21:27:45 +00:00
|
|
|
}
|