Buflib: Handle not having enough space for new handles (fixes FS#12265).

Handles are allocated at the end, growing downwards.
The tiny allocations from r30478 broke buflib, since it was assumed that the only shrinkable allocation (the audiobuffer) is the very last allocation.
The tiny allocations however fit into the reserve buffer for new handles, breaking the above assumption, and they can't shrink to make room for handles.

Now, move any allocations before audiobuf (or shrink audiobuf like before) to make room for handles. This also unifies some duplicated code.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@30486 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Thomas Martitz 2011-09-09 13:33:22 +00:00
parent 7e3a3f491d
commit 0dcbc6cd5d
1 changed files with 40 additions and 27 deletions

View File

@ -89,6 +89,9 @@
#define BDEBUGF(...) do { } while(0)
#endif
static union buflib_data*
find_block_before(struct buflib_context *ctx, union buflib_data* block,
bool is_free);
/* Initialize buffer manager */
void
buflib_init(struct buflib_context *ctx, void *buf, size_t size)
@ -227,7 +230,7 @@ buflib_compact(struct buflib_context *ctx)
int shift = 0, len;
/* Store the results of attempting to shrink the handle table */
bool ret = handle_table_shrink(ctx);
for(block = ctx->first_free_block; block != ctx->alloc_end; block += len)
for(block = ctx->first_free_block; block < ctx->alloc_end; block += len)
{
len = block->val;
/* This block is free, add its length to the shift value */
@ -241,11 +244,14 @@ buflib_compact(struct buflib_context *ctx)
if (-ctx->first_free_block->val > block->val)
{
intptr_t size = ctx->first_free_block->val;
union buflib_data* next_block = block + block->val;
if (move_block(ctx, block, ctx->first_free_block - block))
{
/* moving was successful. Mark the next block as the new
* first_free_block and merge it with the free space
* that the move created */
if (ctx->alloc_end == next_block)
ctx->alloc_end = block;
ctx->first_free_block += block->val;
ctx->first_free_block->val = size + block->val;
continue;
@ -307,12 +313,16 @@ buflib_compact_and_shrink(struct buflib_context *ctx, unsigned shrink_hints)
int ret;
int handle = ctx->handle_table - this[1].handle;
char* data = this[1].handle->alloc;
bool last = (this+this->val) == ctx->alloc_end;
ret = this[2].ops->shrink_callback(handle, shrink_hints,
data, (char*)(this+this->val)-data);
result |= (ret == BUFLIB_CB_OK);
/* this might have changed in the callback (if
* it shrinked from the top), get it again */
this = handle_to_block(ctx, handle);
/* could also change with shrinking from back */
if (last)
ctx->alloc_end = this + this->val;
}
}
/* shrinking was successful at least once, try compaction again */
@ -407,26 +417,26 @@ handle_alloc:
/* If allocation has failed, and compaction has succeded, it may be
* possible to get a handle by trying again.
*/
if (!ctx->compact && buflib_compact(ctx))
goto handle_alloc;
else
{ /* first try to shrink the alloc before the handle table
* to make room for new handles */
int handle = ctx->handle_table - ctx->last_handle;
union buflib_data* last_block = handle_to_block(ctx, handle);
struct buflib_callbacks* ops = last_block[2].ops;
if (ops && ops->shrink_callback)
{
char *data = buflib_get_data(ctx, handle);
unsigned hint = BUFLIB_SHRINK_POS_BACK | 10*sizeof(union buflib_data);
if (ops->shrink_callback(handle, hint, data,
(char*)(last_block+last_block->val)-data) == BUFLIB_CB_OK)
{ /* retry one more time */
goto handle_alloc;
}
}
return -1;
union buflib_data* last_block = find_block_before(ctx,
ctx->alloc_end, false);
struct buflib_callbacks* ops = last_block[2].ops;
unsigned hints = 0;
if (!ops || !ops->shrink_callback)
{ /* the last one isn't shrinkable
* make room in front of a shrinkable and move this alloc */
hints = BUFLIB_SHRINK_POS_FRONT;
hints |= last_block->val * sizeof(union buflib_data);
}
else if (ops && ops->shrink_callback)
{ /* the last is shrinkable, make room for handles directly */
hints = BUFLIB_SHRINK_POS_BACK;
hints |= 16*sizeof(union buflib_data);
}
/* buflib_compact_and_shrink() will compact and move last_block()
* if possible */
if (buflib_compact_and_shrink(ctx, hints))
goto handle_alloc;
return -1;
}
buffer_alloc:
@ -504,9 +514,10 @@ buffer_alloc:
/* Finds the free block before block, and returns NULL if it's not free */
static union buflib_data*
find_free_block_before(struct buflib_context *ctx, union buflib_data* block)
find_block_before(struct buflib_context *ctx, union buflib_data* block,
bool is_free)
{
union buflib_data *ret = ctx->first_free_block,
union buflib_data *ret = ctx->buf_start,
*next_block = ret;
/* find the block that's before the current one */
@ -519,10 +530,12 @@ find_free_block_before(struct buflib_context *ctx, union buflib_data* block)
/* If next_block == block, the above loop didn't go anywhere. If it did,
* and the block before this one is empty, that is the wanted one
*/
if (next_block == block && ret < block && ret->val < 0)
if (next_block == block && ret < block)
{
if (is_free && ret->val >= 0) /* NULL if found block isn't free */
return NULL;
return ret;
/* otherwise, e.g. if ret > block, or if the buffer is compact,
* there's no free block before */
}
return NULL;
}
@ -536,7 +549,7 @@ buflib_free(struct buflib_context *ctx, int handle_num)
/* We need to find the block before the current one, to see if it is free
* and can be merged with this one.
*/
block = find_free_block_before(ctx, freed_block);
block = find_block_before(ctx, freed_block, true);
if (block)
{
block->val -= freed_block->val;
@ -652,7 +665,7 @@ buflib_shrink(struct buflib_context* ctx, int handle, void* new_start, size_t ne
/* mark the old block unallocated */
block->val = block - new_block;
/* find the block before in order to merge with the new free space */
union buflib_data *free_before = find_free_block_before(ctx, block);
union buflib_data *free_before = find_block_before(ctx, block, true);
if (free_before)
free_before->val += block->val;
else if (ctx->first_free_block > block)