buflib: Add pinned get/put data functions

These are more efficient than separate pin/unpin calls because
pin count increment and decrement can be done cheaply when the
data pointer is known.

Secondly, pinned access can be made safe against preemption by
hardware interrupts or other CPU cores; buflib_get_data() can't.
This makes it more useful under different threading models and
for SMP targets; both of which are not particularly relevant to
Rockbox now, but might be in the future.

Change-Id: I09284251b83bbbc59ef88a494c8fda26a7f7ef26
This commit is contained in:
Aidan MacDonald 2023-01-14 18:20:59 +00:00
parent 92565e9246
commit 800bc000a0
5 changed files with 109 additions and 35 deletions

View file

@ -168,6 +168,18 @@ unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
return h->pin_count;
}
void _buflib_malloc_put_data_pinned(struct buflib_context *ctx, void *data)
{
for (int i = 0; i < ctx->num_allocs; ++i)
{
if (ctx->allocs[i].user == data)
{
ctx->allocs[i].pin_count--;
break;
}
}
}
int buflib_free(struct buflib_context *ctx, int handle)
{
if (handle <= 0)

View file

@ -107,22 +107,14 @@
(PARANOIA_CHECK_LENGTH | \
PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING)
/* Indices used to access block fields as block[idx_XXX] */
enum {
idx_LEN, /* length of the block, must come first */
idx_HANDLE, /* pointer to entry in the handle table */
idx_OPS, /* pointer to an ops struct */
idx_PIN, /* pin count */
BUFLIB_NUM_FIELDS,
};
struct buflib_callbacks buflib_ops_locked = {
.move_callback = NULL,
.shrink_callback = NULL,
.sync_callback = NULL,
};
#define IS_MOVABLE(a) (!a[idx_OPS].ops || a[idx_OPS].ops->move_callback)
#define IS_MOVABLE(a) \
(!a[BUFLIB_IDX_OPS].ops || a[BUFLIB_IDX_OPS].ops->move_callback)
static union buflib_data* find_first_free(struct buflib_context *ctx);
static union buflib_data* find_block_before(struct buflib_context *ctx,
@ -281,8 +273,7 @@ union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
if (!ptr)
return NULL;
union buflib_data *data = ALIGN_DOWN(ptr, sizeof(*data));
return data - BUFLIB_NUM_FIELDS;
return _buflib_get_block_header(ptr);
}
/* Shrink the handle table, returning true if its size was reduced, false if
@ -318,9 +309,9 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
union buflib_data *new_block;
check_block_handle(ctx, block);
union buflib_data *h_entry = block[idx_HANDLE].handle;
union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
if (!IS_MOVABLE(block) || block[idx_PIN].pincount > 0)
if (!IS_MOVABLE(block) || block[BUFLIB_IDX_PIN].pincount > 0)
return false;
int handle = ctx->handle_table - h_entry;
@ -329,7 +320,7 @@ move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
new_block = block + shift;
new_start = h_entry->alloc + shift*sizeof(union buflib_data);
struct buflib_callbacks *ops = block[idx_OPS].ops;
struct buflib_callbacks *ops = block[BUFLIB_IDX_OPS].ops;
/* If move must be synchronized with use, user should have specified a
callback that handles this */
@ -459,12 +450,12 @@ buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
if (this->val < 0)
continue;
struct buflib_callbacks *ops = this[idx_OPS].ops;
struct buflib_callbacks *ops = this[BUFLIB_IDX_OPS].ops;
if (!ops || !ops->shrink_callback)
continue;
check_block_handle(ctx, this);
union buflib_data* h_entry = this[idx_HANDLE].handle;
union buflib_data* h_entry = this[BUFLIB_IDX_HANDLE].handle;
int handle = ctx->handle_table - h_entry;
unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK;
@ -596,7 +587,7 @@ handle_alloc:
*/
union buflib_data* last_block = find_block_before(ctx,
ctx->alloc_end, false);
struct buflib_callbacks* ops = last_block[idx_OPS].ops;
struct buflib_callbacks* ops = last_block[BUFLIB_IDX_OPS].ops;
unsigned hints = 0;
if (!ops || !ops->shrink_callback)
{ /* the last one isn't shrinkable
@ -666,10 +657,10 @@ buffer_alloc:
/* Set up the allocated block, by marking the size allocated, and storing
* a pointer to the handle.
*/
block[idx_LEN].val = size;
block[idx_HANDLE].handle = handle;
block[idx_OPS].ops = ops;
block[idx_PIN].pincount = 0;
block[BUFLIB_IDX_LEN].val = size;
block[BUFLIB_IDX_HANDLE].handle = handle;
block[BUFLIB_IDX_OPS].ops = ops;
block[BUFLIB_IDX_PIN].pincount = 0;
handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS];
@ -916,10 +907,10 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
metadata_size.val = aligned_oldstart - block;
/* update val and the handle table entry */
new_block = aligned_newstart - metadata_size.val;
block[idx_LEN].val = new_next_block - new_block;
block[BUFLIB_IDX_LEN].val = new_next_block - new_block;
check_block_handle(ctx, block);
block[idx_HANDLE].handle->alloc = newstart;
block[BUFLIB_IDX_HANDLE].handle->alloc = newstart;
if (block != new_block)
{
/* move metadata over, i.e. pointer to handle table entry and name
@ -964,7 +955,7 @@ void buflib_pin(struct buflib_context *ctx, int handle)
buflib_panic(ctx, "invalid handle pin: %d", handle);
union buflib_data *data = handle_to_block(ctx, handle);
data[idx_PIN].pincount++;
data[BUFLIB_IDX_PIN].pincount++;
}
void buflib_unpin(struct buflib_context *ctx, int handle)
@ -975,11 +966,11 @@ void buflib_unpin(struct buflib_context *ctx, int handle)
union buflib_data *data = handle_to_block(ctx, handle);
if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING)
{
if (data[idx_PIN].pincount == 0)
if (data[BUFLIB_IDX_PIN].pincount == 0)
buflib_panic(ctx, "handle pin underflow: %d", handle);
}
data[idx_PIN].pincount--;
data[BUFLIB_IDX_PIN].pincount--;
}
unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
@ -988,7 +979,7 @@ unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
buflib_panic(ctx, "invalid handle: %d", handle);
union buflib_data *data = handle_to_block(ctx, handle);
return data[idx_PIN].pincount;
return data[BUFLIB_IDX_PIN].pincount;
}
#ifdef BUFLIB_DEBUG_GET_DATA
@ -1062,13 +1053,13 @@ static void check_block_length(struct buflib_context *ctx,
{
if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH)
{
intptr_t length = block[idx_LEN].val;
intptr_t length = block[BUFLIB_IDX_LEN].val;
/* Check the block length does not pass beyond the end */
if (length == 0 || block > ctx->alloc_end - abs(length))
{
buflib_panic(ctx, "block len wacky [%p]=%ld",
(void*)&block[idx_LEN], (long)length);
(void*)&block[BUFLIB_IDX_LEN], (long)length);
}
}
}
@ -1078,8 +1069,8 @@ static void check_block_handle(struct buflib_context *ctx,
{
if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE)
{
intptr_t length = block[idx_LEN].val;
union buflib_data *h_entry = block[idx_HANDLE].handle;
intptr_t length = block[BUFLIB_IDX_LEN].val;
union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
/* Check the handle pointer is properly aligned */
/* TODO: Can we ensure the compiler doesn't optimize this out?
@ -1088,14 +1079,14 @@ static void check_block_handle(struct buflib_context *ctx,
if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry)))
{
buflib_panic(ctx, "handle unaligned [%p]=%p",
&block[idx_HANDLE], h_entry);
&block[BUFLIB_IDX_HANDLE], h_entry);
}
/* Check the pointer is actually inside the handle table */
if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table)
{
buflib_panic(ctx, "handle out of bounds [%p]=%p",
&block[idx_HANDLE], h_entry);
&block[BUFLIB_IDX_HANDLE], h_entry);
}
/* Now check the allocation is within the block.

View file

@ -316,6 +316,32 @@ void *buflib_get_data(struct buflib_context *ctx, int handle);
static inline void *buflib_get_data(struct buflib_context *ctx, int handle);
#endif
/**
* \brief Get a pinned pointer to a buflib allocation
* \param ctx Buflib context of the allocation
* \param handle Handle identifying the allocation
* \return Pointer to the allocation's memory.
*
* Functionally equivalent to buflib_pin() followed by buflib_get_data(),
* but this call is more efficient and should be preferred over separate
* calls.
*
* To unpin the data, call buflib_put_data_pinned() and pass the pointer
* returned by this function.
*/
static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle);
/**
* \brief Release a pinned pointer to a buflib allocation
* \param ctx Buflib context of the allocation
* \param data Pointer returned by buflib_get_data()
*
* Decrements the pin count, allowing the buffer to be moved once the
* pin count drops to zero. This is more efficient than buflib_unpin()
* and should be preferred when you have a pointer to the buflib data.
*/
static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data);
/**
* \brief Shift allocations up to free space at the start of the pool
* \param ctx Context to operate on

View file

@ -50,4 +50,16 @@ static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
}
#endif
static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle)
{
buflib_pin(ctx, handle);
return buflib_get_data(ctx, handle);
}
void _buflib_malloc_put_data_pinned(struct buflib_context *ctx, void *data);
static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data)
{
_buflib_malloc_put_data_pinned(ctx, data);
}
#endif /* _BUFLIB_MALLOC_H_ */

View file

@ -30,6 +30,17 @@
# error "include buflib.h instead"
#endif
#include "system.h"
/* Indices used to access block fields as block[BUFLIB_IDX_XXX] */
enum {
BUFLIB_IDX_LEN, /* length of the block, must come first */
BUFLIB_IDX_HANDLE, /* pointer to entry in the handle table */
BUFLIB_IDX_OPS, /* pointer to an ops struct */
BUFLIB_IDX_PIN, /* pin count */
BUFLIB_NUM_FIELDS,
};
union buflib_data
{
intptr_t val; /* length of the block in n*sizeof(union buflib_data).
@ -52,7 +63,7 @@ struct buflib_context
bool compact;
};
#define BUFLIB_ALLOC_OVERHEAD (4 * sizeof(union buflib_data))
#define BUFLIB_ALLOC_OVERHEAD (BUFLIB_NUM_FIELDS * sizeof(union buflib_data))
#ifndef BUFLIB_DEBUG_GET_DATA
static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
@ -61,4 +72,26 @@ static inline void *buflib_get_data(struct buflib_context *ctx, int handle)
}
#endif
static inline union buflib_data *_buflib_get_block_header(void *data)
{
union buflib_data *bd = ALIGN_DOWN(data, sizeof(*bd));
return bd - BUFLIB_NUM_FIELDS;
}
static inline void *buflib_get_data_pinned(struct buflib_context *ctx, int handle)
{
void *data = buflib_get_data(ctx, handle);
union buflib_data *bd = _buflib_get_block_header(data);
bd[BUFLIB_IDX_PIN].pincount++;
return data;
}
static inline void buflib_put_data_pinned(struct buflib_context *ctx, void *data)
{
(void)ctx;
union buflib_data *bd = _buflib_get_block_header(data);
bd[BUFLIB_IDX_PIN].pincount--;
}
#endif /* _BUFLIB_MEMPOOL_H_ */