2011-08-30 14:01:33 +00:00
|
|
|
/***************************************************************************
|
|
|
|
* __________ __ ___.
|
|
|
|
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
|
|
|
|
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
|
|
|
|
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
|
|
|
|
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
|
|
|
|
* \/ \/ \/ \/ \/
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* This is a memory allocator designed to provide reasonable management of free
|
|
|
|
* space and fast access to allocated data. More than one allocator can be used
|
|
|
|
* at a time by initializing multiple contexts.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 Andrew Mahone
|
|
|
|
* Copyright (C) 2011 Thomas Martitz
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-01-28 14:32:23 +00:00
|
|
|
#include <stdarg.h>
|
2011-08-30 14:01:33 +00:00
|
|
|
#include <stdlib.h> /* for abs() */
|
|
|
|
#include <stdio.h> /* for snprintf() */
|
2014-01-05 00:22:19 +00:00
|
|
|
#include <stddef.h> /* for ptrdiff_t */
|
2023-01-02 19:49:56 +00:00
|
|
|
#include "buflib.h"
|
2022-11-14 16:32:34 +00:00
|
|
|
#include "string-extra.h" /* strmemccpy() */
|
2011-08-30 14:01:33 +00:00
|
|
|
#include "debug.h"
|
2014-01-09 20:37:07 +00:00
|
|
|
#include "panic.h"
|
2011-08-30 14:01:33 +00:00
|
|
|
#include "system.h" /* for ALIGN_*() */
|
|
|
|
|
2022-10-15 23:24:05 +00:00
|
|
|
/* FIXME: This comment is pretty out of date now and wrong in some details.
|
|
|
|
*
|
|
|
|
* The main goal of this design is fast fetching of the pointer for a handle.
|
2011-08-30 14:01:33 +00:00
|
|
|
* For that reason, the handles are stored in a table at the end of the buffer
|
|
|
|
* with a fixed address, so that returning the pointer for a handle is a simple
|
|
|
|
* table lookup. To reduce the frequency with which allocated blocks will need
|
|
|
|
* to be moved to free space, allocations grow up in address from the start of
|
|
|
|
* the buffer. The buffer is treated as an array of union buflib_data. Blocks
|
|
|
|
* start with a length marker, which is included in their length. Free blocks
|
|
|
|
* are marked by negative length. Allocated blocks have a positiv length marker,
|
2014-12-29 22:36:50 +00:00
|
|
|
* and additional metadata following that: It follows a pointer
|
2011-08-30 14:01:33 +00:00
|
|
|
* (union buflib_data*) to the corresponding handle table entry. so that it can
|
|
|
|
* be quickly found and updated during compaction. After that follows
|
|
|
|
* the pointer to the struct buflib_callbacks associated with this allocation
|
|
|
|
* (may be NULL). That pointer follows a variable length character array
|
|
|
|
* containing the nul-terminated string identifier of the allocation. After this
|
|
|
|
* array there's a length marker for the length of the character array including
|
|
|
|
* this length marker (counted in n*sizeof(union buflib_data)), which allows
|
|
|
|
* to find the start of the character array (and therefore the start of the
|
|
|
|
* entire block) when only the handle or payload start is known.
|
|
|
|
*
|
2015-01-02 18:23:01 +00:00
|
|
|
* UPDATE BUFLIB_ALLOC_OVERHEAD (buflib.h) WHEN THE METADATA CHANGES!
|
2014-01-28 05:58:09 +00:00
|
|
|
*
|
2011-08-30 14:01:33 +00:00
|
|
|
* Example:
|
|
|
|
* |<- alloc block #1 ->|<- unalloc block ->|<- alloc block #2 ->|<-handle table->|
|
2014-01-09 20:37:07 +00:00
|
|
|
* |L|H|C|cccc|L2|crc|XXXXXX|-L|YYYYYYYYYYYYYYYY|L|H|C|cc|L2|crc|XXXXXXXXXXXXX|AAA|
|
2011-08-30 14:01:33 +00:00
|
|
|
*
|
|
|
|
* L - length marker (negative if block unallocated)
|
2014-12-29 22:36:50 +00:00
|
|
|
* H - handle table entry pointer
|
2011-08-30 14:01:33 +00:00
|
|
|
* C - pointer to struct buflib_callbacks
|
|
|
|
* c - variable sized string identifier
|
2022-03-30 00:29:19 +00:00
|
|
|
* L2 - length of the metadata
|
2015-01-02 18:23:01 +00:00
|
|
|
* crc - crc32 protecting buflib metadata integrity
|
2011-08-30 14:01:33 +00:00
|
|
|
* X - actual payload
|
|
|
|
* Y - unallocated space
|
|
|
|
*
|
|
|
|
* A - pointer to start of payload (first X) in the handle table (may be null)
|
|
|
|
*
|
|
|
|
* The blocks can be walked by jumping the abs() of the L length marker, i.e.
|
|
|
|
* union buflib_data* L;
|
|
|
|
* for(L = start; L < end; L += abs(L->val)) { .... }
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* The allocator functions are passed a context struct so that two allocators
|
|
|
|
* can be run, for example, one per core may be used, with convenience wrappers
|
|
|
|
* for the single-allocator case that use a predefined context.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define B_ALIGN_DOWN(x) \
|
|
|
|
ALIGN_DOWN(x, sizeof(union buflib_data))
|
|
|
|
|
|
|
|
#define B_ALIGN_UP(x) \
|
|
|
|
ALIGN_UP(x, sizeof(union buflib_data))
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
#include <stdio.h>
|
|
|
|
#define BDEBUGF DEBUGF
|
|
|
|
#else
|
|
|
|
#define BDEBUGF(...) do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
2014-01-28 14:32:23 +00:00
|
|
|
#define BPANICF panicf
|
|
|
|
|
2022-03-30 13:45:19 +00:00
|
|
|
/* Available paranoia checks */
|
2022-03-30 16:39:32 +00:00
|
|
|
#define PARANOIA_CHECK_LENGTH (1 << 0)
|
2022-10-15 23:24:05 +00:00
|
|
|
#define PARANOIA_CHECK_BLOCK_HANDLE (1 << 1)
|
|
|
|
#define PARANOIA_CHECK_PINNING (1 << 2)
|
2022-03-30 13:45:19 +00:00
|
|
|
/* Bitmask of enabled paranoia checks */
|
2022-03-30 20:20:02 +00:00
|
|
|
#define BUFLIB_PARANOIA \
|
2022-10-15 23:24:05 +00:00
|
|
|
(PARANOIA_CHECK_LENGTH | \
|
2022-10-15 22:49:47 +00:00
|
|
|
PARANOIA_CHECK_BLOCK_HANDLE | PARANOIA_CHECK_PINNING)
|
2022-03-30 17:18:55 +00:00
|
|
|
|
2022-01-18 18:57:06 +00:00
|
|
|
struct buflib_callbacks buflib_ops_locked = {
|
|
|
|
.move_callback = NULL,
|
|
|
|
.shrink_callback = NULL,
|
|
|
|
.sync_callback = NULL,
|
|
|
|
};
|
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
#define IS_MOVABLE(a) \
|
|
|
|
(!a[BUFLIB_IDX_OPS].ops || a[BUFLIB_IDX_OPS].ops->move_callback)
|
2022-03-30 01:29:35 +00:00
|
|
|
|
2011-09-09 15:35:14 +00:00
|
|
|
static union buflib_data* find_first_free(struct buflib_context *ctx);
|
|
|
|
static union buflib_data* find_block_before(struct buflib_context *ctx,
|
|
|
|
union buflib_data* block,
|
|
|
|
bool is_free);
|
2022-03-30 13:45:19 +00:00
|
|
|
|
|
|
|
/* Check the length of a block to ensure it does not go beyond the end
|
|
|
|
* of the allocated area. The block can be either allocated or free.
|
|
|
|
*
|
|
|
|
* This verifies that it is safe to iterate to the next block in a loop.
|
|
|
|
*/
|
|
|
|
static void check_block_length(struct buflib_context *ctx,
|
|
|
|
union buflib_data *block);
|
|
|
|
|
2022-03-30 16:39:32 +00:00
|
|
|
/* Check a block's handle pointer to ensure it is within the handle
|
|
|
|
* table, and that the user pointer is pointing within the block.
|
|
|
|
*
|
2022-10-15 23:24:05 +00:00
|
|
|
* This verifies that it is safe to dereference the entry and ensures
|
|
|
|
* that the pointer in the handle table points within the block, as
|
|
|
|
* determined by the length field at the start of the block.
|
2022-03-30 16:39:32 +00:00
|
|
|
*/
|
|
|
|
static void check_block_handle(struct buflib_context *ctx,
|
|
|
|
union buflib_data *block);
|
2022-03-30 13:45:19 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
/* Initialize buffer manager */
|
|
|
|
void
|
|
|
|
buflib_init(struct buflib_context *ctx, void *buf, size_t size)
|
|
|
|
{
|
|
|
|
union buflib_data *bd_buf = buf;
|
2014-12-29 23:17:38 +00:00
|
|
|
BDEBUGF("buflib initialized with %lu.%02lu kiB\n",
|
2014-02-02 13:43:45 +00:00
|
|
|
(unsigned long)size / 1024, ((unsigned long)size%1000)/10);
|
2011-08-30 14:01:33 +00:00
|
|
|
|
|
|
|
/* Align on sizeof(buflib_data), to prevent unaligned access */
|
|
|
|
ALIGN_BUFFER(bd_buf, size, sizeof(union buflib_data));
|
|
|
|
size /= sizeof(union buflib_data);
|
|
|
|
/* The handle table is initialized with no entries */
|
|
|
|
ctx->handle_table = bd_buf + size;
|
|
|
|
ctx->last_handle = bd_buf + size;
|
|
|
|
ctx->first_free_handle = bd_buf + size - 1;
|
|
|
|
ctx->buf_start = bd_buf;
|
|
|
|
/* A marker is needed for the end of allocated data, to make sure that it
|
|
|
|
* does not collide with the handle table, and to detect end-of-buffer.
|
|
|
|
*/
|
|
|
|
ctx->alloc_end = bd_buf;
|
|
|
|
ctx->compact = true;
|
2020-06-28 03:32:45 +00:00
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
{
|
|
|
|
BPANICF("buflib_init error (CTX:%p, %zd bytes):\n", ctx,
|
|
|
|
(ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data));
|
|
|
|
}
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
2013-11-21 10:40:04 +00:00
|
|
|
bool buflib_context_relocate(struct buflib_context *ctx, void *buf)
|
|
|
|
{
|
|
|
|
union buflib_data *handle, *bd_buf = buf;
|
|
|
|
ptrdiff_t diff = bd_buf - ctx->buf_start;
|
|
|
|
|
|
|
|
/* cannot continue if the buffer is not aligned, since we would need
|
|
|
|
* to reduce the size of the buffer for aligning */
|
2022-03-28 20:27:41 +00:00
|
|
|
if (!IS_ALIGNED((uintptr_t)buf, sizeof(union buflib_data)))
|
2013-11-21 10:40:04 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* relocate the handle table entries */
|
|
|
|
for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
|
|
|
|
{
|
|
|
|
if (handle->alloc)
|
|
|
|
handle->alloc += diff * sizeof(union buflib_data);
|
|
|
|
}
|
|
|
|
/* relocate the pointers in the context */
|
|
|
|
ctx->handle_table += diff;
|
|
|
|
ctx->last_handle += diff;
|
|
|
|
ctx->first_free_handle += diff;
|
|
|
|
ctx->buf_start += diff;
|
|
|
|
ctx->alloc_end += diff;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-01-28 14:32:23 +00:00
|
|
|
static void buflib_panic(struct buflib_context *ctx, const char *message, ...)
|
|
|
|
{
|
|
|
|
char buf[128];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, message);
|
|
|
|
vsnprintf(buf, sizeof(buf), message, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
BPANICF("buflib error (CTX:%p, %zd bytes):\n%s", ctx,
|
|
|
|
(ctx->handle_table - ctx->buf_start) * sizeof(union buflib_data), buf);
|
|
|
|
}
|
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
/* Allocate a new handle, returning 0 on failure */
|
|
|
|
static inline
|
|
|
|
union buflib_data* handle_alloc(struct buflib_context *ctx)
|
|
|
|
{
|
|
|
|
union buflib_data *handle;
|
|
|
|
/* first_free_handle is a lower bound on free handles, work through the
|
|
|
|
* table from there until a handle containing NULL is found, or the end
|
|
|
|
* of the table is reached.
|
|
|
|
*/
|
|
|
|
for (handle = ctx->first_free_handle; handle >= ctx->last_handle; handle--)
|
|
|
|
if (!handle->alloc)
|
|
|
|
break;
|
|
|
|
/* If the search went past the end of the table, it means we need to extend
|
|
|
|
* the table to get a new handle.
|
|
|
|
*/
|
|
|
|
if (handle < ctx->last_handle)
|
|
|
|
{
|
|
|
|
if (handle >= ctx->alloc_end)
|
|
|
|
ctx->last_handle--;
|
|
|
|
else
|
2022-03-28 20:30:58 +00:00
|
|
|
{
|
|
|
|
/* We know the table is full, so update first_free_handle */
|
|
|
|
ctx->first_free_handle = ctx->last_handle - 1;
|
2011-08-30 14:01:33 +00:00
|
|
|
return NULL;
|
2022-03-28 20:30:58 +00:00
|
|
|
}
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
2022-03-28 20:30:58 +00:00
|
|
|
|
|
|
|
/* We know there are no free handles between the old first_free_handle
|
|
|
|
* and the found handle, therefore we can update first_free_handle */
|
|
|
|
ctx->first_free_handle = handle - 1;
|
|
|
|
|
2022-03-28 20:56:04 +00:00
|
|
|
/* We need to set the table entry to a non-NULL value to ensure that
|
|
|
|
* compactions triggered by an allocation do not compact the handle
|
|
|
|
* table and delete this handle. */
|
2011-08-30 14:01:33 +00:00
|
|
|
handle->val = -1;
|
2022-03-28 20:56:04 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free one handle, shrinking the handle table if it's the last one */
|
|
|
|
static inline
|
|
|
|
void handle_free(struct buflib_context *ctx, union buflib_data *handle)
|
|
|
|
{
|
|
|
|
handle->alloc = 0;
|
|
|
|
/* Update free handle lower bound if this handle has a lower index than the
|
|
|
|
* old one.
|
|
|
|
*/
|
|
|
|
if (handle > ctx->first_free_handle)
|
|
|
|
ctx->first_free_handle = handle;
|
|
|
|
if (handle == ctx->last_handle)
|
|
|
|
ctx->last_handle++;
|
|
|
|
else
|
|
|
|
ctx->compact = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the start block of an allocation */
|
2014-02-02 13:43:45 +00:00
|
|
|
static inline
|
|
|
|
union buflib_data* handle_to_block(struct buflib_context* ctx, int handle)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 16:39:32 +00:00
|
|
|
void *ptr = buflib_get_data(ctx, handle);
|
|
|
|
|
|
|
|
/* this is a valid case for shrinking if handle
|
|
|
|
* was freed by the shrink callback */
|
|
|
|
if (!ptr)
|
2014-02-02 13:43:45 +00:00
|
|
|
return NULL;
|
2022-03-30 00:29:19 +00:00
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
return _buflib_get_block_header(ptr);
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Shrink the handle table, returning true if its size was reduced, false if
|
|
|
|
* not
|
|
|
|
*/
|
2022-03-28 20:59:43 +00:00
|
|
|
static inline bool handle_table_shrink(struct buflib_context *ctx)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
|
|
|
union buflib_data *handle;
|
2022-03-28 20:59:43 +00:00
|
|
|
union buflib_data *old_last = ctx->last_handle;
|
|
|
|
|
|
|
|
for (handle = ctx->last_handle; handle != ctx->handle_table; ++handle)
|
|
|
|
if (handle->alloc)
|
|
|
|
break;
|
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
if (handle > ctx->first_free_handle)
|
|
|
|
ctx->first_free_handle = handle - 1;
|
2022-03-28 20:59:43 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
ctx->last_handle = handle;
|
2022-03-28 20:59:43 +00:00
|
|
|
return handle != old_last;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If shift is non-zero, it represents the number of places to move
|
|
|
|
* blocks in memory. Calculate the new address for this block,
|
|
|
|
* update its entry in the handle table, and then move its contents.
|
|
|
|
*
|
|
|
|
* Returns false if moving was unsucessful
|
|
|
|
* (NULL callback or BUFLIB_CB_CANNOT_MOVE was returned)
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
move_block(struct buflib_context* ctx, union buflib_data* block, int shift)
|
|
|
|
{
|
|
|
|
char* new_start;
|
2022-03-30 01:29:35 +00:00
|
|
|
union buflib_data *new_block;
|
2018-12-14 10:22:16 +00:00
|
|
|
|
2022-03-30 16:39:32 +00:00
|
|
|
check_block_handle(ctx, block);
|
2023-01-14 18:20:59 +00:00
|
|
|
union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
|
2014-01-09 20:37:07 +00:00
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
if (!IS_MOVABLE(block) || block[BUFLIB_IDX_PIN].pincount > 0)
|
2011-08-30 14:01:33 +00:00
|
|
|
return false;
|
|
|
|
|
2022-03-30 01:29:35 +00:00
|
|
|
int handle = ctx->handle_table - h_entry;
|
2022-10-15 22:14:27 +00:00
|
|
|
BDEBUGF("%s(): moving id=%d by %d(%d)\n", __func__,
|
|
|
|
handle, shift, shift*(int)sizeof(union buflib_data));
|
2011-08-30 14:01:33 +00:00
|
|
|
new_block = block + shift;
|
2022-03-30 01:29:35 +00:00
|
|
|
new_start = h_entry->alloc + shift*sizeof(union buflib_data);
|
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
struct buflib_callbacks *ops = block[BUFLIB_IDX_OPS].ops;
|
2011-10-09 12:27:35 +00:00
|
|
|
|
2013-05-16 20:15:34 +00:00
|
|
|
/* If move must be synchronized with use, user should have specified a
|
|
|
|
callback that handles this */
|
2012-05-02 21:22:28 +00:00
|
|
|
if (ops && ops->sync_callback)
|
|
|
|
ops->sync_callback(handle, true);
|
|
|
|
|
|
|
|
bool retval = false;
|
2022-03-30 01:29:35 +00:00
|
|
|
if (!ops || ops->move_callback(handle, h_entry->alloc, new_start)
|
2012-05-02 21:22:28 +00:00
|
|
|
!= BUFLIB_CB_CANNOT_MOVE)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 01:29:35 +00:00
|
|
|
h_entry->alloc = new_start; /* update handle table */
|
2012-05-02 21:22:28 +00:00
|
|
|
memmove(new_block, block, block->val * sizeof(union buflib_data));
|
|
|
|
retval = true;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
2011-10-09 12:27:35 +00:00
|
|
|
|
2012-05-02 21:22:28 +00:00
|
|
|
if (ops && ops->sync_callback)
|
|
|
|
ops->sync_callback(handle, false);
|
2011-08-30 14:01:33 +00:00
|
|
|
|
2012-05-02 21:22:28 +00:00
|
|
|
return retval;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Compact allocations and handle table, adjusting handle pointers as needed.
|
|
|
|
* Return true if any space was freed or consolidated, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
buflib_compact(struct buflib_context *ctx)
|
|
|
|
{
|
|
|
|
BDEBUGF("%s(): Compacting!\n", __func__);
|
2011-09-09 15:35:14 +00:00
|
|
|
union buflib_data *block,
|
2011-12-01 07:02:18 +00:00
|
|
|
*hole = NULL;
|
2011-08-30 14:01:33 +00:00
|
|
|
int shift = 0, len;
|
|
|
|
/* Store the results of attempting to shrink the handle table */
|
|
|
|
bool ret = handle_table_shrink(ctx);
|
2011-12-01 07:02:18 +00:00
|
|
|
/* compaction has basically two modes of operation:
|
|
|
|
* 1) the buffer is nicely movable: In this mode, blocks can be simply
|
|
|
|
* moved towards the beginning. Free blocks add to a shift value,
|
|
|
|
* which is the amount to move.
|
|
|
|
* 2) the buffer contains unmovable blocks: unmovable blocks create
|
|
|
|
* holes and reset shift. Once a hole is found, we're trying to fill
|
|
|
|
* holes first, moving by shift is the fallback. As the shift is reset,
|
|
|
|
* this effectively splits the buffer into portions of movable blocks.
|
|
|
|
* This mode cannot be used if no holes are found yet as it only works
|
|
|
|
* when it moves blocks across the portions. On the other side,
|
|
|
|
* moving by shift only works within the same portion
|
|
|
|
* For simplicity only 1 hole at a time is considered */
|
|
|
|
for(block = find_first_free(ctx); block < ctx->alloc_end; block += len)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
|
|
|
|
2011-12-01 07:02:18 +00:00
|
|
|
bool movable = true; /* cache result to avoid 2nd call to move_block */
|
2011-08-30 14:01:33 +00:00
|
|
|
len = block->val;
|
|
|
|
/* This block is free, add its length to the shift value */
|
|
|
|
if (len < 0)
|
|
|
|
{
|
|
|
|
shift += len;
|
|
|
|
len = -len;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* attempt to fill any hole */
|
2011-12-01 07:02:18 +00:00
|
|
|
if (hole && -hole->val >= len)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2011-12-01 07:02:18 +00:00
|
|
|
intptr_t hlen = -hole->val;
|
|
|
|
if ((movable = move_block(ctx, block, hole - block)))
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2011-12-01 07:02:18 +00:00
|
|
|
ret = true;
|
|
|
|
/* Move was successful. The memory at block is now free */
|
|
|
|
block->val = -len;
|
2014-01-09 20:37:07 +00:00
|
|
|
|
2011-12-01 07:02:18 +00:00
|
|
|
/* add its length to shift */
|
|
|
|
shift += -len;
|
|
|
|
/* Reduce the size of the hole accordingly
|
|
|
|
* but be careful to not overwrite an existing block */
|
|
|
|
if (hlen != len)
|
2011-09-09 15:35:14 +00:00
|
|
|
{
|
2011-12-01 07:02:18 +00:00
|
|
|
hole += len;
|
|
|
|
hole->val = len - hlen; /* negative */
|
2011-09-09 15:35:14 +00:00
|
|
|
}
|
2011-12-01 07:02:18 +00:00
|
|
|
else /* hole closed */
|
|
|
|
hole = NULL;
|
2011-12-19 08:08:07 +00:00
|
|
|
continue;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* attempt move the allocation by shift */
|
|
|
|
if (shift)
|
|
|
|
{
|
2011-09-09 15:35:14 +00:00
|
|
|
union buflib_data* target_block = block + shift;
|
2011-12-01 07:02:18 +00:00
|
|
|
if (!movable || !move_block(ctx, block, shift))
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2011-12-01 07:02:18 +00:00
|
|
|
/* free space before an unmovable block becomes a hole,
|
|
|
|
* therefore mark this block free and track the hole */
|
|
|
|
target_block->val = shift;
|
|
|
|
hole = target_block;
|
2011-08-30 14:01:33 +00:00
|
|
|
shift = 0;
|
|
|
|
}
|
2011-09-09 15:35:14 +00:00
|
|
|
else
|
2011-12-01 07:02:18 +00:00
|
|
|
ret = true;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Move the end-of-allocation mark, and return true if any new space has
|
|
|
|
* been freed.
|
|
|
|
*/
|
|
|
|
ctx->alloc_end += shift;
|
|
|
|
ctx->compact = true;
|
|
|
|
return ret || shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compact the buffer by trying both shrinking and moving.
|
|
|
|
*
|
|
|
|
* Try to move first. If unsuccesfull, try to shrink. If that was successful
|
|
|
|
* try to move once more as there might be more room now.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
|
|
|
|
{
|
|
|
|
bool result = false;
|
|
|
|
/* if something compacted before already there will be no further gain */
|
|
|
|
if (!ctx->compact)
|
|
|
|
result = buflib_compact(ctx);
|
|
|
|
if (!result)
|
|
|
|
{
|
2011-11-17 17:55:02 +00:00
|
|
|
union buflib_data *this, *before;
|
|
|
|
for(this = ctx->buf_start, before = this;
|
|
|
|
this < ctx->alloc_end;
|
|
|
|
before = this, this += abs(this->val))
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, this);
|
|
|
|
if (this->val < 0)
|
2022-03-30 01:29:35 +00:00
|
|
|
continue;
|
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
struct buflib_callbacks *ops = this[BUFLIB_IDX_OPS].ops;
|
2022-03-30 01:29:35 +00:00
|
|
|
if (!ops || !ops->shrink_callback)
|
|
|
|
continue;
|
|
|
|
|
2022-03-30 16:39:32 +00:00
|
|
|
check_block_handle(ctx, this);
|
2023-01-14 18:20:59 +00:00
|
|
|
union buflib_data* h_entry = this[BUFLIB_IDX_HANDLE].handle;
|
2022-03-30 01:29:35 +00:00
|
|
|
int handle = ctx->handle_table - h_entry;
|
|
|
|
|
|
|
|
unsigned pos_hints = shrink_hints & BUFLIB_SHRINK_POS_MASK;
|
|
|
|
/* adjust what we ask for if there's free space in the front
|
|
|
|
* this isn't too unlikely assuming this block is
|
|
|
|
* shrinkable but not movable */
|
|
|
|
if (pos_hints == BUFLIB_SHRINK_POS_FRONT &&
|
|
|
|
before != this && before->val < 0)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 01:29:35 +00:00
|
|
|
size_t free_space = (-before->val) * sizeof(union buflib_data);
|
|
|
|
size_t wanted = shrink_hints & BUFLIB_SHRINK_SIZE_MASK;
|
|
|
|
if (wanted < free_space) /* no shrink needed? */
|
|
|
|
continue;
|
|
|
|
wanted -= free_space;
|
|
|
|
shrink_hints = pos_hints | wanted;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
2022-03-30 01:29:35 +00:00
|
|
|
|
|
|
|
char* data = h_entry->alloc;
|
|
|
|
char* data_end = (char*)(this + this->val);
|
|
|
|
bool last = (data_end == (char*)ctx->alloc_end);
|
|
|
|
|
|
|
|
int ret = ops->shrink_callback(handle, shrink_hints,
|
|
|
|
data, data_end - data);
|
|
|
|
result |= (ret == BUFLIB_CB_OK);
|
|
|
|
|
|
|
|
/* 'this' might have changed in the callback (if it shrinked
|
|
|
|
* from the top or even freed the handle), get it again */
|
|
|
|
this = handle_to_block(ctx, handle);
|
|
|
|
|
|
|
|
/* The handle was possibly be freed in the callback,
|
|
|
|
* re-run the loop with the handle before */
|
|
|
|
if (!this)
|
|
|
|
this = before;
|
|
|
|
/* could also change with shrinking from back */
|
|
|
|
else if (last)
|
|
|
|
ctx->alloc_end = this + this->val;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
/* shrinking was successful at least once, try compaction again */
|
|
|
|
if (result)
|
|
|
|
result |= buflib_compact(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift buffered items by size units, and update handle pointers. The shift
|
|
|
|
* value must be determined to be safe *before* calling.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
buflib_buffer_shift(struct buflib_context *ctx, int shift)
|
|
|
|
{
|
|
|
|
memmove(ctx->buf_start + shift, ctx->buf_start,
|
|
|
|
(ctx->alloc_end - ctx->buf_start) * sizeof(union buflib_data));
|
2011-11-27 01:40:47 +00:00
|
|
|
ctx->buf_start += shift;
|
|
|
|
ctx->alloc_end += shift;
|
|
|
|
shift *= sizeof(union buflib_data);
|
2011-08-30 14:01:33 +00:00
|
|
|
union buflib_data *handle;
|
|
|
|
for (handle = ctx->last_handle; handle < ctx->handle_table; handle++)
|
|
|
|
if (handle->alloc)
|
|
|
|
handle->alloc += shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift buffered items up by size bytes, or as many as possible if size == 0.
|
|
|
|
* Set size to the number of bytes freed.
|
|
|
|
*/
|
|
|
|
void*
|
|
|
|
buflib_buffer_out(struct buflib_context *ctx, size_t *size)
|
|
|
|
{
|
|
|
|
if (!ctx->compact)
|
|
|
|
buflib_compact(ctx);
|
|
|
|
size_t avail = ctx->last_handle - ctx->alloc_end;
|
|
|
|
size_t avail_b = avail * sizeof(union buflib_data);
|
|
|
|
if (*size && *size < avail_b)
|
|
|
|
{
|
|
|
|
avail = (*size + sizeof(union buflib_data) - 1)
|
|
|
|
/ sizeof(union buflib_data);
|
|
|
|
avail_b = avail * sizeof(union buflib_data);
|
|
|
|
}
|
|
|
|
*size = avail_b;
|
|
|
|
void *ret = ctx->buf_start;
|
|
|
|
buflib_buffer_shift(ctx, avail);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift buffered items down by size bytes */
|
|
|
|
void
|
|
|
|
buflib_buffer_in(struct buflib_context *ctx, int size)
|
|
|
|
{
|
|
|
|
size /= sizeof(union buflib_data);
|
|
|
|
buflib_buffer_shift(ctx, -size);
|
|
|
|
}
|
|
|
|
|
2015-01-02 17:41:30 +00:00
|
|
|
/* Allocate a buffer of size bytes, returning a handle for it.
|
|
|
|
* Note: Buffers are movable since NULL is passed for "ops".
|
|
|
|
Don't pass them to functions that call yield() */
|
2011-08-30 14:01:33 +00:00
|
|
|
int
|
|
|
|
buflib_alloc(struct buflib_context *ctx, size_t size)
|
|
|
|
{
|
2022-10-15 22:55:39 +00:00
|
|
|
return buflib_alloc_ex(ctx, size, NULL);
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a buffer of size bytes, returning a handle for it.
|
|
|
|
*
|
2022-10-15 22:14:27 +00:00
|
|
|
* The ops parameter points to caller-implemented callbacks for moving and
|
2015-01-02 17:41:30 +00:00
|
|
|
* shrinking.
|
|
|
|
*
|
|
|
|
* If you pass NULL for "ops", buffers are movable by default.
|
|
|
|
* Don't pass them to functions that call yield() like I/O.
|
|
|
|
* Buffers are only shrinkable when a shrink callback is given.
|
2011-08-30 14:01:33 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2022-10-15 22:55:39 +00:00
|
|
|
buflib_alloc_ex(struct buflib_context *ctx, size_t size,
|
2011-08-30 14:01:33 +00:00
|
|
|
struct buflib_callbacks *ops)
|
|
|
|
{
|
|
|
|
union buflib_data *handle, *block;
|
|
|
|
bool last;
|
|
|
|
/* This really is assigned a value before use */
|
|
|
|
int block_len;
|
|
|
|
size = (size + sizeof(union buflib_data) - 1) /
|
|
|
|
sizeof(union buflib_data)
|
2022-03-30 01:29:35 +00:00
|
|
|
+ BUFLIB_NUM_FIELDS;
|
2011-08-30 14:01:33 +00:00
|
|
|
handle_alloc:
|
|
|
|
handle = handle_alloc(ctx);
|
|
|
|
if (!handle)
|
|
|
|
{
|
|
|
|
/* If allocation has failed, and compaction has succeded, it may be
|
|
|
|
* possible to get a handle by trying again.
|
|
|
|
*/
|
2011-09-09 13:33:22 +00:00
|
|
|
union buflib_data* last_block = find_block_before(ctx,
|
|
|
|
ctx->alloc_end, false);
|
2023-01-14 18:20:59 +00:00
|
|
|
struct buflib_callbacks* ops = last_block[BUFLIB_IDX_OPS].ops;
|
2011-09-09 13:33:22 +00:00
|
|
|
unsigned hints = 0;
|
|
|
|
if (!ops || !ops->shrink_callback)
|
|
|
|
{ /* the last one isn't shrinkable
|
|
|
|
* make room in front of a shrinkable and move this alloc */
|
|
|
|
hints = BUFLIB_SHRINK_POS_FRONT;
|
|
|
|
hints |= last_block->val * sizeof(union buflib_data);
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
2011-09-09 13:33:22 +00:00
|
|
|
else if (ops && ops->shrink_callback)
|
|
|
|
{ /* the last is shrinkable, make room for handles directly */
|
|
|
|
hints = BUFLIB_SHRINK_POS_BACK;
|
|
|
|
hints |= 16*sizeof(union buflib_data);
|
|
|
|
}
|
|
|
|
/* buflib_compact_and_shrink() will compact and move last_block()
|
|
|
|
* if possible */
|
|
|
|
if (buflib_compact_and_shrink(ctx, hints))
|
|
|
|
goto handle_alloc;
|
|
|
|
return -1;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
buffer_alloc:
|
|
|
|
/* need to re-evaluate last before the loop because the last allocation
|
|
|
|
* possibly made room in its front to fit this, so last would be wrong */
|
|
|
|
last = false;
|
2022-03-30 13:45:19 +00:00
|
|
|
for (block = find_first_free(ctx);; block += block_len)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
|
|
|
/* If the last used block extends all the way to the handle table, the
|
|
|
|
* block "after" it doesn't have a header. Because of this, it's easier
|
|
|
|
* to always find the end of allocation by saving a pointer, and always
|
|
|
|
* calculate the free space at the end by comparing it to the
|
|
|
|
* last_handle pointer.
|
|
|
|
*/
|
|
|
|
if(block == ctx->alloc_end)
|
|
|
|
{
|
|
|
|
last = true;
|
|
|
|
block_len = ctx->last_handle - block;
|
|
|
|
if ((size_t)block_len < size)
|
|
|
|
block = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2022-03-30 13:45:19 +00:00
|
|
|
|
|
|
|
check_block_length(ctx, block);
|
2011-08-30 14:01:33 +00:00
|
|
|
block_len = block->val;
|
|
|
|
/* blocks with positive length are already allocated. */
|
|
|
|
if(block_len > 0)
|
|
|
|
continue;
|
|
|
|
block_len = -block_len;
|
|
|
|
/* The search is first-fit, any fragmentation this causes will be
|
|
|
|
* handled at compaction.
|
|
|
|
*/
|
|
|
|
if ((size_t)block_len >= size)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!block)
|
|
|
|
{
|
|
|
|
/* Try compacting if allocation failed */
|
|
|
|
unsigned hint = BUFLIB_SHRINK_POS_FRONT |
|
|
|
|
((size*sizeof(union buflib_data))&BUFLIB_SHRINK_SIZE_MASK);
|
|
|
|
if (buflib_compact_and_shrink(ctx, hint))
|
|
|
|
{
|
|
|
|
goto buffer_alloc;
|
|
|
|
} else {
|
|
|
|
handle_free(ctx, handle);
|
2011-09-07 23:16:29 +00:00
|
|
|
return -2;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the allocated block, by marking the size allocated, and storing
|
|
|
|
* a pointer to the handle.
|
|
|
|
*/
|
2023-01-14 18:20:59 +00:00
|
|
|
block[BUFLIB_IDX_LEN].val = size;
|
|
|
|
block[BUFLIB_IDX_HANDLE].handle = handle;
|
|
|
|
block[BUFLIB_IDX_OPS].ops = ops;
|
|
|
|
block[BUFLIB_IDX_PIN].pincount = 0;
|
2022-03-30 01:29:35 +00:00
|
|
|
|
2022-10-15 23:24:05 +00:00
|
|
|
handle->alloc = (char*)&block[BUFLIB_NUM_FIELDS];
|
2014-01-09 20:37:07 +00:00
|
|
|
|
2022-10-15 22:14:27 +00:00
|
|
|
BDEBUGF("buflib_alloc_ex: size=%d handle=%p clb=%p\n",
|
|
|
|
(unsigned int)size, (void *)handle, (void *)ops);
|
2011-09-09 15:44:09 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
block += size;
|
|
|
|
/* alloc_end must be kept current if we're taking the last block. */
|
|
|
|
if (last)
|
|
|
|
ctx->alloc_end = block;
|
|
|
|
/* Only free blocks *before* alloc_end have tagged length. */
|
|
|
|
else if ((size_t)block_len > size)
|
|
|
|
block->val = size - block_len;
|
|
|
|
/* Return the handle index as a positive integer. */
|
|
|
|
return ctx->handle_table - handle;
|
|
|
|
}
|
|
|
|
|
2011-09-09 15:35:14 +00:00
|
|
|
static union buflib_data*
|
|
|
|
find_first_free(struct buflib_context *ctx)
|
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
union buflib_data *ret;
|
|
|
|
for(ret = ctx->buf_start; ret < ctx->alloc_end; ret += ret->val)
|
2011-09-09 15:35:14 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, ret);
|
2011-09-09 15:35:14 +00:00
|
|
|
if (ret->val < 0)
|
|
|
|
break;
|
|
|
|
}
|
2022-03-30 13:45:19 +00:00
|
|
|
|
2011-09-09 15:35:14 +00:00
|
|
|
/* ret is now either a free block or the same as alloc_end, both is fine */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
/* Finds the free block before block, and returns NULL if it's not free */
|
|
|
|
static union buflib_data*
|
2011-09-09 13:33:22 +00:00
|
|
|
find_block_before(struct buflib_context *ctx, union buflib_data* block,
|
|
|
|
bool is_free)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2011-09-09 13:33:22 +00:00
|
|
|
union buflib_data *ret = ctx->buf_start,
|
2011-08-30 14:01:33 +00:00
|
|
|
*next_block = ret;
|
|
|
|
|
2022-03-28 20:23:39 +00:00
|
|
|
/* no previous block */
|
|
|
|
if (next_block == block)
|
|
|
|
return NULL;
|
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
/* find the block that's before the current one */
|
2022-03-28 20:23:39 +00:00
|
|
|
while (next_block != block)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, ret);
|
2011-08-30 14:01:33 +00:00
|
|
|
ret = next_block;
|
|
|
|
next_block += abs(ret->val);
|
|
|
|
}
|
|
|
|
|
2022-03-28 20:23:39 +00:00
|
|
|
/* don't return it if the found block isn't free */
|
|
|
|
if (is_free && ret->val >= 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return ret;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free the buffer associated with handle_num. */
|
|
|
|
int
|
|
|
|
buflib_free(struct buflib_context *ctx, int handle_num)
|
|
|
|
{
|
2022-10-15 13:08:09 +00:00
|
|
|
if (handle_num <= 0) /* invalid or already free */
|
|
|
|
return handle_num;
|
2011-08-30 14:01:33 +00:00
|
|
|
union buflib_data *handle = ctx->handle_table - handle_num,
|
|
|
|
*freed_block = handle_to_block(ctx, handle_num),
|
|
|
|
*block, *next_block;
|
|
|
|
/* We need to find the block before the current one, to see if it is free
|
|
|
|
* and can be merged with this one.
|
|
|
|
*/
|
2011-09-09 13:33:22 +00:00
|
|
|
block = find_block_before(ctx, freed_block, true);
|
2011-08-30 14:01:33 +00:00
|
|
|
if (block)
|
|
|
|
{
|
|
|
|
block->val -= freed_block->val;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-03-28 21:42:36 +00:00
|
|
|
/* Otherwise, set block to the newly-freed block, and mark it free,
|
|
|
|
* before continuing on, since the code below expects block to point
|
|
|
|
* to a free block which may have free space after it. */
|
2011-08-30 14:01:33 +00:00
|
|
|
block = freed_block;
|
|
|
|
block->val = -block->val;
|
|
|
|
}
|
2022-03-28 21:42:36 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
next_block = block - block->val;
|
|
|
|
/* Check if we are merging with the free space at alloc_end. */
|
|
|
|
if (next_block == ctx->alloc_end)
|
|
|
|
ctx->alloc_end = block;
|
|
|
|
/* Otherwise, the next block might still be a "normal" free block, and the
|
|
|
|
* mid-allocation free means that the buffer is no longer compact.
|
|
|
|
*/
|
|
|
|
else {
|
|
|
|
ctx->compact = false;
|
|
|
|
if (next_block->val < 0)
|
|
|
|
block->val += next_block->val;
|
|
|
|
}
|
|
|
|
handle_free(ctx, handle);
|
|
|
|
handle->alloc = NULL;
|
|
|
|
|
|
|
|
return 0; /* unconditionally */
|
|
|
|
}
|
|
|
|
|
2011-11-17 17:55:02 +00:00
|
|
|
static size_t
|
|
|
|
free_space_at_end(struct buflib_context* ctx)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
|
|
|
/* subtract 5 elements for
|
2022-03-30 00:29:19 +00:00
|
|
|
* val, handle, meta_len, ops and the handle table entry*/
|
2022-03-30 01:29:35 +00:00
|
|
|
ptrdiff_t diff = (ctx->last_handle - ctx->alloc_end - BUFLIB_NUM_FIELDS);
|
2011-08-30 14:01:33 +00:00
|
|
|
diff -= 16; /* space for future handles */
|
|
|
|
diff *= sizeof(union buflib_data); /* make it bytes */
|
|
|
|
|
|
|
|
if (diff > 0)
|
|
|
|
return diff;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-29 05:07:34 +00:00
|
|
|
/* Return the maximum allocatable contiguous memory in bytes */
|
2011-11-17 17:55:02 +00:00
|
|
|
size_t
|
2013-05-29 05:07:34 +00:00
|
|
|
buflib_allocatable(struct buflib_context* ctx)
|
2011-11-17 17:55:02 +00:00
|
|
|
{
|
|
|
|
size_t free_space = 0, max_free_space = 0;
|
2022-03-30 13:45:19 +00:00
|
|
|
intptr_t block_len;
|
2011-11-17 17:55:02 +00:00
|
|
|
|
|
|
|
/* make sure buffer is as contiguous as possible */
|
|
|
|
if (!ctx->compact)
|
|
|
|
buflib_compact(ctx);
|
|
|
|
|
|
|
|
/* now look if there's free in holes */
|
2022-03-30 13:45:19 +00:00
|
|
|
for(union buflib_data *block = find_first_free(ctx);
|
|
|
|
block < ctx->alloc_end;
|
|
|
|
block += block_len)
|
2011-11-17 17:55:02 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
|
|
|
block_len = block->val;
|
|
|
|
|
|
|
|
if (block_len < 0)
|
2011-11-17 17:55:02 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
block_len = -block_len;
|
|
|
|
free_space += block_len;
|
2011-11-17 17:55:02 +00:00
|
|
|
continue;
|
|
|
|
}
|
2022-03-30 13:45:19 +00:00
|
|
|
|
2011-11-17 17:55:02 +00:00
|
|
|
/* an unmovable section resets the count as free space
|
|
|
|
* can't be contigous */
|
2022-03-30 13:45:19 +00:00
|
|
|
if (!IS_MOVABLE(block))
|
2011-11-17 17:55:02 +00:00
|
|
|
{
|
|
|
|
if (max_free_space < free_space)
|
|
|
|
max_free_space = free_space;
|
|
|
|
free_space = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* select the best */
|
|
|
|
max_free_space = MAX(max_free_space, free_space);
|
|
|
|
max_free_space *= sizeof(union buflib_data);
|
|
|
|
max_free_space = MAX(max_free_space, free_space_at_end(ctx));
|
|
|
|
|
|
|
|
if (max_free_space > 0)
|
|
|
|
return max_free_space;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-29 05:07:34 +00:00
|
|
|
/* Return the amount of unallocated memory in bytes (even if not contiguous) */
|
|
|
|
size_t
|
|
|
|
buflib_available(struct buflib_context* ctx)
|
|
|
|
{
|
|
|
|
size_t free_space = 0;
|
|
|
|
|
2022-03-30 13:45:19 +00:00
|
|
|
/* add up all holes */
|
|
|
|
for(union buflib_data *block = find_first_free(ctx);
|
|
|
|
block < ctx->alloc_end;
|
|
|
|
block += abs(block->val))
|
2013-05-29 05:07:34 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
|
|
|
if (block->val < 0)
|
|
|
|
free_space += -block->val;
|
2013-05-29 05:07:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
free_space *= sizeof(union buflib_data); /* make it bytes */
|
|
|
|
free_space += free_space_at_end(ctx);
|
|
|
|
|
|
|
|
return free_space;
|
|
|
|
}
|
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
/*
|
|
|
|
* Allocate all available (as returned by buflib_available()) memory and return
|
|
|
|
* a handle to it
|
|
|
|
*
|
|
|
|
* This grabs a lock which can only be unlocked by buflib_free() or
|
|
|
|
* buflib_shrink(), to protect from further allocations (which couldn't be
|
|
|
|
* serviced anyway).
|
|
|
|
*/
|
|
|
|
int
|
2022-10-15 22:55:39 +00:00
|
|
|
buflib_alloc_maximum(struct buflib_context* ctx, size_t *size, struct buflib_callbacks *ops)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2013-05-29 05:12:01 +00:00
|
|
|
/* ignore ctx->compact because it's true if all movable blocks are contiguous
|
|
|
|
* even if the buffer has holes due to unmovable allocations */
|
|
|
|
unsigned hints;
|
|
|
|
size_t bufsize = ctx->handle_table - ctx->buf_start;
|
|
|
|
bufsize = MIN(BUFLIB_SHRINK_SIZE_MASK, bufsize*sizeof(union buflib_data)); /* make it bytes */
|
|
|
|
/* try as hard as possible to free up space. allocations are
|
|
|
|
* welcome to give up some or all of their memory */
|
|
|
|
hints = BUFLIB_SHRINK_POS_BACK | BUFLIB_SHRINK_POS_FRONT | bufsize;
|
|
|
|
/* compact until no space can be gained anymore */
|
|
|
|
while (buflib_compact_and_shrink(ctx, hints));
|
|
|
|
|
|
|
|
*size = buflib_allocatable(ctx);
|
2011-10-09 12:30:37 +00:00
|
|
|
if (*size <= 0) /* OOM */
|
|
|
|
return -1;
|
|
|
|
|
2022-10-15 22:55:39 +00:00
|
|
|
return buflib_alloc_ex(ctx, *size, ops);
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Shrink the allocation indicated by the handle according to new_start and
|
|
|
|
* new_size. Grow is not possible, therefore new_start and new_start + new_size
|
|
|
|
* must be within the original allocation
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t new_size)
|
|
|
|
{
|
|
|
|
char* oldstart = buflib_get_data(ctx, handle);
|
2023-01-13 01:52:29 +00:00
|
|
|
char* newstart = new_start != NULL ? new_start : oldstart;
|
2011-08-30 14:01:33 +00:00
|
|
|
char* newend = newstart + new_size;
|
|
|
|
|
|
|
|
/* newstart must be higher and new_size not "negative" */
|
|
|
|
if (newstart < oldstart || newend < newstart)
|
|
|
|
return false;
|
|
|
|
union buflib_data *block = handle_to_block(ctx, handle),
|
|
|
|
*old_next_block = block + block->val,
|
|
|
|
/* newstart isn't necessarily properly aligned but it
|
|
|
|
* needn't be since it's only dereferenced by the user code */
|
|
|
|
*aligned_newstart = (union buflib_data*)B_ALIGN_DOWN(newstart),
|
|
|
|
*aligned_oldstart = (union buflib_data*)B_ALIGN_DOWN(oldstart),
|
|
|
|
*new_next_block = (union buflib_data*)B_ALIGN_UP(newend),
|
|
|
|
*new_block, metadata_size;
|
|
|
|
|
|
|
|
/* growing is not supported */
|
|
|
|
if (new_next_block > old_next_block)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
metadata_size.val = aligned_oldstart - block;
|
|
|
|
/* update val and the handle table entry */
|
|
|
|
new_block = aligned_newstart - metadata_size.val;
|
2023-01-14 18:20:59 +00:00
|
|
|
block[BUFLIB_IDX_LEN].val = new_next_block - new_block;
|
2011-08-30 14:01:33 +00:00
|
|
|
|
2022-03-30 16:39:32 +00:00
|
|
|
check_block_handle(ctx, block);
|
2023-01-14 18:20:59 +00:00
|
|
|
block[BUFLIB_IDX_HANDLE].handle->alloc = newstart;
|
2011-08-30 14:01:33 +00:00
|
|
|
if (block != new_block)
|
|
|
|
{
|
|
|
|
/* move metadata over, i.e. pointer to handle table entry and name
|
|
|
|
* This is actually the point of no return. Data in the allocation is
|
|
|
|
* being modified, and therefore we must successfully finish the shrink
|
|
|
|
* operation */
|
|
|
|
memmove(new_block, block, metadata_size.val*sizeof(metadata_size));
|
|
|
|
/* mark the old block unallocated */
|
|
|
|
block->val = block - new_block;
|
|
|
|
/* find the block before in order to merge with the new free space */
|
2011-09-09 13:33:22 +00:00
|
|
|
union buflib_data *free_before = find_block_before(ctx, block, true);
|
2011-08-30 14:01:33 +00:00
|
|
|
if (free_before)
|
|
|
|
free_before->val += block->val;
|
|
|
|
|
|
|
|
/* We didn't handle size changes yet, assign block to the new one
|
|
|
|
* the code below the wants block whether it changed or not */
|
|
|
|
block = new_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now deal with size changes that create free blocks after the allocation */
|
|
|
|
if (old_next_block != new_next_block)
|
|
|
|
{
|
|
|
|
if (ctx->alloc_end == old_next_block)
|
|
|
|
ctx->alloc_end = new_next_block;
|
|
|
|
else if (old_next_block->val < 0)
|
|
|
|
{ /* enlarge next block by moving it up */
|
|
|
|
new_next_block->val = old_next_block->val - (old_next_block - new_next_block);
|
|
|
|
}
|
|
|
|
else if (old_next_block != new_next_block)
|
|
|
|
{ /* creating a hole */
|
|
|
|
/* must be negative to indicate being unallocated */
|
|
|
|
new_next_block->val = new_next_block - old_next_block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-04-03 09:48:14 +00:00
|
|
|
void buflib_pin(struct buflib_context *ctx, int handle)
|
|
|
|
{
|
|
|
|
if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0)
|
|
|
|
buflib_panic(ctx, "invalid handle pin: %d", handle);
|
|
|
|
|
2022-10-15 23:24:05 +00:00
|
|
|
union buflib_data *data = handle_to_block(ctx, handle);
|
2023-01-14 18:20:59 +00:00
|
|
|
data[BUFLIB_IDX_PIN].pincount++;
|
2022-04-03 09:48:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void buflib_unpin(struct buflib_context *ctx, int handle)
|
|
|
|
{
|
|
|
|
if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0)
|
|
|
|
buflib_panic(ctx, "invalid handle unpin: %d", handle);
|
|
|
|
|
2022-10-15 23:24:05 +00:00
|
|
|
union buflib_data *data = handle_to_block(ctx, handle);
|
2022-04-03 09:48:14 +00:00
|
|
|
if (BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING)
|
|
|
|
{
|
2023-01-14 18:20:59 +00:00
|
|
|
if (data[BUFLIB_IDX_PIN].pincount == 0)
|
2022-04-03 09:48:14 +00:00
|
|
|
buflib_panic(ctx, "handle pin underflow: %d", handle);
|
|
|
|
}
|
|
|
|
|
2023-01-14 18:20:59 +00:00
|
|
|
data[BUFLIB_IDX_PIN].pincount--;
|
2022-04-03 09:48:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned buflib_pin_count(struct buflib_context *ctx, int handle)
|
|
|
|
{
|
|
|
|
if ((BUFLIB_PARANOIA & PARANOIA_CHECK_PINNING) && handle <= 0)
|
|
|
|
buflib_panic(ctx, "invalid handle: %d", handle);
|
|
|
|
|
2022-10-15 23:24:05 +00:00
|
|
|
union buflib_data *data = handle_to_block(ctx, handle);
|
2023-01-14 18:20:59 +00:00
|
|
|
return data[BUFLIB_IDX_PIN].pincount;
|
2022-04-03 09:48:14 +00:00
|
|
|
}
|
|
|
|
|
2023-01-02 19:18:02 +00:00
|
|
|
#ifdef BUFLIB_DEBUG_GET_DATA
|
2014-01-28 14:33:40 +00:00
|
|
|
void *buflib_get_data(struct buflib_context *ctx, int handle)
|
|
|
|
{
|
|
|
|
if (handle <= 0)
|
|
|
|
buflib_panic(ctx, "invalid handle access: %d", handle);
|
|
|
|
|
|
|
|
return (void*)(ctx->handle_table[-handle].alloc);
|
|
|
|
}
|
2023-01-02 19:18:02 +00:00
|
|
|
#endif
|
2014-01-28 14:33:40 +00:00
|
|
|
|
2023-01-02 19:18:02 +00:00
|
|
|
#ifdef BUFLIB_DEBUG_CHECK_VALID
|
2014-01-09 20:37:07 +00:00
|
|
|
void buflib_check_valid(struct buflib_context *ctx)
|
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
for(union buflib_data *block = ctx->buf_start;
|
|
|
|
block < ctx->alloc_end;
|
|
|
|
block += abs(block->val))
|
2014-01-09 20:37:07 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
|
|
|
if (block->val < 0)
|
2014-01-09 20:37:07 +00:00
|
|
|
continue;
|
|
|
|
|
2022-03-30 16:39:32 +00:00
|
|
|
check_block_handle(ctx, block);
|
2014-01-09 20:37:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-01-02 19:18:02 +00:00
|
|
|
#ifdef BUFLIB_DEBUG_PRINT
|
2011-08-30 14:01:33 +00:00
|
|
|
int buflib_get_num_blocks(struct buflib_context *ctx)
|
|
|
|
{
|
|
|
|
int i = 0;
|
2023-01-02 19:18:02 +00:00
|
|
|
|
2022-03-30 13:45:19 +00:00
|
|
|
for(union buflib_data *block = ctx->buf_start;
|
|
|
|
block < ctx->alloc_end;
|
|
|
|
block += abs(block->val))
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
|
|
|
++i;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
2023-01-02 19:18:02 +00:00
|
|
|
|
2011-08-30 14:01:33 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2023-01-02 19:18:02 +00:00
|
|
|
bool buflib_print_block_at(struct buflib_context *ctx, int block_num,
|
|
|
|
char *buf, size_t bufsize)
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
for(union buflib_data *block = ctx->buf_start;
|
|
|
|
block < ctx->alloc_end;
|
|
|
|
block += abs(block->val))
|
2011-08-30 14:01:33 +00:00
|
|
|
{
|
2022-03-30 13:45:19 +00:00
|
|
|
check_block_length(ctx, block);
|
2022-03-30 01:29:35 +00:00
|
|
|
|
2022-03-30 13:45:19 +00:00
|
|
|
if (block_num-- == 0)
|
|
|
|
{
|
2022-10-15 22:14:27 +00:00
|
|
|
snprintf(buf, bufsize, "%8p: val: %4ld (%sallocated)",
|
2022-03-30 13:45:19 +00:00
|
|
|
block, (long)block->val,
|
2022-10-15 22:14:27 +00:00
|
|
|
block->val > 0 ? "" : "un");
|
2023-01-02 19:18:02 +00:00
|
|
|
return true;
|
2022-03-30 13:45:19 +00:00
|
|
|
}
|
|
|
|
}
|
2023-01-02 19:18:02 +00:00
|
|
|
|
|
|
|
if (bufsize > 0)
|
|
|
|
*buf = '\0';
|
|
|
|
return false;
|
2011-08-30 14:01:33 +00:00
|
|
|
}
|
|
|
|
#endif
|
2022-03-30 13:45:19 +00:00
|
|
|
|
|
|
|
static void check_block_length(struct buflib_context *ctx,
|
|
|
|
union buflib_data *block)
|
|
|
|
{
|
|
|
|
if (BUFLIB_PARANOIA & PARANOIA_CHECK_LENGTH)
|
|
|
|
{
|
2023-01-14 18:20:59 +00:00
|
|
|
intptr_t length = block[BUFLIB_IDX_LEN].val;
|
2022-03-30 13:45:19 +00:00
|
|
|
|
|
|
|
/* Check the block length does not pass beyond the end */
|
|
|
|
if (length == 0 || block > ctx->alloc_end - abs(length))
|
|
|
|
{
|
|
|
|
buflib_panic(ctx, "block len wacky [%p]=%ld",
|
2023-01-14 18:20:59 +00:00
|
|
|
(void*)&block[BUFLIB_IDX_LEN], (long)length);
|
2022-03-30 16:39:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_block_handle(struct buflib_context *ctx,
|
|
|
|
union buflib_data *block)
|
|
|
|
{
|
|
|
|
if (BUFLIB_PARANOIA & PARANOIA_CHECK_BLOCK_HANDLE)
|
|
|
|
{
|
2023-01-14 18:20:59 +00:00
|
|
|
intptr_t length = block[BUFLIB_IDX_LEN].val;
|
|
|
|
union buflib_data *h_entry = block[BUFLIB_IDX_HANDLE].handle;
|
2022-03-30 16:39:32 +00:00
|
|
|
|
|
|
|
/* Check the handle pointer is properly aligned */
|
|
|
|
/* TODO: Can we ensure the compiler doesn't optimize this out?
|
|
|
|
* I dunno, maybe the compiler can assume the pointer is always
|
|
|
|
* properly aligned due to some C standard voodoo?? */
|
|
|
|
if (!IS_ALIGNED((uintptr_t)h_entry, alignof(*h_entry)))
|
|
|
|
{
|
|
|
|
buflib_panic(ctx, "handle unaligned [%p]=%p",
|
2023-01-14 18:20:59 +00:00
|
|
|
&block[BUFLIB_IDX_HANDLE], h_entry);
|
2022-03-30 16:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the pointer is actually inside the handle table */
|
|
|
|
if (h_entry < ctx->last_handle || h_entry >= ctx->handle_table)
|
|
|
|
{
|
|
|
|
buflib_panic(ctx, "handle out of bounds [%p]=%p",
|
2023-01-14 18:20:59 +00:00
|
|
|
&block[BUFLIB_IDX_HANDLE], h_entry);
|
2022-03-30 16:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now check the allocation is within the block.
|
|
|
|
* This is stricter than check_handle(). */
|
|
|
|
void *alloc = h_entry->alloc;
|
|
|
|
void *alloc_begin = block;
|
|
|
|
void *alloc_end = block + length;
|
|
|
|
/* buflib allows zero length allocations, so alloc_end is inclusive */
|
|
|
|
if (alloc < alloc_begin || alloc > alloc_end)
|
|
|
|
{
|
|
|
|
buflib_panic(ctx, "alloc outside block [%p]=%p, %p-%p",
|
|
|
|
h_entry, alloc, alloc_begin, alloc_end);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|