cosmetic changes and additions :

* now we have seperate private headers files
  containing private and static or public functions
  in memory-* files.
* there is only one .c file of the same name the library.

Zagor: because now there is only one .c file, you could only have a .o file and use it instead of the .a libfile.

* most structures and codes are now private.


git-svn-id: svn://svn.rockbox.org/rockbox/trunk@113 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Alan Korr 2002-04-17 12:20:41 +00:00
parent 08dada2df5
commit 0256fdf30b
4 changed files with 1002 additions and 0 deletions

View file

@ -0,0 +1,77 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id:
*
* Copyright (C) 2002 by Alan Korr
*
* All files in this archive are subject to the GNU General Public License.
* See the file COPYING in the source tree root for full license agreement.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef __LIBRARY_MEMORY_C__
# error "This header file must be included ONLY from memory.c."
#endif
#ifndef __LIBRARY_MEMORY_BLOCK_H__
# define __LIBRARY_MEMORY_BLOCK_H__
static struct memory_cache *free_block_cache[MEMORY_PAGE_MINIMAL_ORDER - 2];
///////////////////////////////////////////////////////////////////////////////
// MEMORY BLOCK :
/////////////////
//
// - memory_allocate_block : allocate a power-of-2-sized block (or a page)
// - memory_release_block : release a power-of-2-sized block (or a page)
//
static inline void *allocate_small_block (int order)
{
struct memory_cache *cache = free_block_cache[order - 2];
do
{
if (cache)
return memory_cache_allocate (cache);
}
while ((free_block_cache[order] = cache = memory_create_cache (size,0,0)));
return MEMORY_RETURN_FAILURE;
}
void *memory_allocate_block (int order)
{
if (order < 2)
order = 2;
if (order < MEMORY_PAGE_MINIMAL_ORDER)
return allocate_small_block (order);
if (order < MEMORY_PAGE_MAXIMAL_ORDER)
return memory_allocate_page (order);
return MEMORY_RETURN_FAILURE;
}
static inline int release_block (int order,void *address)
{
struct memory_cache *cache = free_block_cache[order - 2];
if (cache)
return memory_cache_release (cache,address);
return MEMORY_RETURN_FAILURE;
}
int memory_release_block (int order,void *address)
{
if (order < 2)
order = 2;
if (order < MEMORY_PAGE_MINIMAL_ORDER)
return release_block (order);
if (order < MEMORY_PAGE_MAXIMAL_ORDER)
return memory_release_page (address);
return MEMORY_RETURN_FAILURE;
}
#endif

View file

@ -0,0 +1,425 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id:
*
* Copyright (C) 2002 by Alan Korr
*
* All files in this archive are subject to the GNU General Public License.
* See the file COPYING in the source tree root for full license agreement.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef __LIBRARY_MEMORY_C__
# error "This header file must be included ONLY from memory.c."
#endif
#ifndef __LIBRARY_MEMORY_PAGE_H__
# define __LIBRARY_MEMORY_PAGE_H__
struct memory_free_page
{
struct memory_free_page
*less,*more;
char
reserved[MEMORY_PAGE_MINIMAL_SIZE - 2*sizeof (struct memory_free_page *)];
};
#define LESS -1
#define MORE +1
#ifdef TEST
struct memory_free_page free_page[MEMORY_TOTAL_PAGES];
static inline unsigned int get_offset (int order)
{
return (2 << order);
}
// IA32 has no problem with shift operation
static inline unsigned int get_size (int order)
{
return (MEMORY_PAGE_MINIMAL_SIZE << order);
}
// Arghhhh ! I cannot align 'free_page' on 512-byte boundary (max is 16-byte for Cygwin)
static inline struct memory_free_page *get_neighbour (struct memory_free_page *node,unsigned int size)
{
return ((struct memory_free_page *)((unsigned)free_page + (((unsigned)node - (unsigned)free_page) ^ size)));
}
#else
extern struct memory_free_page free_page[MEMORY_TOTAL_PAGES] asm("dram");
static inline unsigned int get_offset (int order)
{
static unsigned short offset [MEMORY_TOTAL_ORDERS] =
{ 2,4,8,16,32,64,128,256,512,1024,2048,4096,8192 };
return offset[order];
}
// SH1 has very poor shift instructions (only <<1,>>1,<<2,>>2,<<8,>>8,<<16 and >>16).
// so we should use a lookup table to speedup.
static inline unsigned int get_size (int order)
{
return (get_offset (order))<<8;
}
static inline struct memory_free_page *get_neighbour (struct memory_free_page *node,unsigned int size)
{
return ((struct memory_free_page *)((unsigned)node ^ size));
}
#endif
static char free_page_order[MEMORY_TOTAL_PAGES];
static struct memory_free_page *free_page_bin[MEMORY_TOTAL_ORDERS];
static inline int get_order (struct memory_free_page *node)
{
return free_page_order[node - free_page];
}
static inline void set_order (struct memory_free_page *node,int order)
{
free_page_order[node - free_page] = order;
}
#if MEMORY_PAGE_USE_SPLAY_TREE
# include <stdio.h>
static struct memory_free_page *splay_page (struct memory_free_page *root,struct memory_free_page *node)
{
struct memory_free_page *down;
struct memory_free_page *less;
struct memory_free_page *more;
struct memory_free_page head;
head.less =
head.more = 0;
less =
more = &head;
while (1)
{
if (node < root)
{
if ((down = root->less))
{
if (node < down)
{
root->less = down->more;
down->more = root;
root = down;
if (!root->less)
break;
}
more->less = root;
more = root;
root = root->less;
continue;
}
break;
}
if (root < node)
{
if ((down = root->more))
{
if (root < node)
{
root->more = down->less;
down->less = root;
root = down;
if (!root->more)
break;
}
less->more = root;
less = root;
root = root->more;
continue;
}
}
break;
}
less->more = root->less;
more->less = root->more;
root->less = head.more;
root->more = head.less;
return root;
}
static inline void insert_page (int order,struct memory_free_page *node)
{
struct memory_free_page *root = free_page_bin[order];
if (!root)
{
node->less =
node->more = 0;
}
else if (node < (root = splay_page (root,node)))
{
node->less = root->less;
node->more = root;
root->less = 0;
}
else if (node > root)
{
node->less = root;
node->more = root->more;
node->more = 0;
}
free_page_bin[order] = node;
set_order (node,order);
return;
}
static inline struct memory_free_page *pop_page (int order,int want)
{
struct memory_free_page *root = free_page_bin[order];
if (root)
{
root = splay_page (root,free_page);
free_page_bin[order] = root->more;
set_order (root,~want);
}
return root;
}
static inline void remove_page (int order,struct memory_free_page *node)
{
struct memory_free_page *root = free_page_bin[order];
root = splay_page (root,node);
if (root->less)
{
node = splay_page (root->less,node);
node->more = root->more;
}
else
node = root->more;
free_page_bin[order] = node;
}
#else
static inline void insert_page (int order,struct memory_free_page *node)
{
struct memory_free_page *head = free_page_bin[order];
node->less = 0;
node->more = head;
if (head)
head->less = node;
free_page_bin[order] = node;
set_order (node,order);
}
static inline struct memory_free_page *pop_page (int order,int want)
{
struct memory_free_page *node = free_page_bin[order];
if (node)
{
free_page_bin[order] = node->more;
if (node->more)
node->more->less = 0;
set_order (node,~want);
}
return node;
}
static inline void remove_page (int order,struct memory_free_page *node)
{
if (node->less)
node->less->more = node->more;
else
free_page_bin[order] = node->more;
if (node->more)
node->more->less = node->less;
}
#endif
static inline void push_page (int order,struct memory_free_page *node)
{
node->less = 0;
node->more = 0;
free_page_bin[order] = node;
set_order (node,order);
}
static struct memory_free_page *allocate_page (unsigned int size,int order)
{
struct memory_free_page *node;
int min = order;
while ((unsigned)order <= (MEMORY_TOTAL_ORDERS - 1))
// order is valid ?
{
if (!(node = pop_page (order,min)))
// no free page of this order ?
{
++order; size <<= 1;
continue;
}
while (order > min)
// split our larger page in smaller pages
{
--order; size >>= 1;
push_page (order,(struct memory_free_page *)((unsigned int)node + size));
}
return node;
}
return MEMORY_RETURN_FAILURE;
}
static inline void release_page (struct memory_free_page *node,unsigned int size,int order)
{
struct memory_free_page *neighbour;
while ((order <= (MEMORY_TOTAL_ORDERS - 1)) &&
((neighbour = get_neighbour (node,size)),
(get_order (neighbour) == order)))
// merge our released page with its contiguous page into a larger page
{
remove_page (order,neighbour);
++order; size <<= 1;
if (neighbour < node)
node = neighbour;
}
insert_page (order,node);
}
/*****************************************************************************/
/* PUBLIC FUNCTIONS */
/*****************************************************************************/
void *memory_allocate_page (int order)
{
if (order < 0)
return MEMORY_RETURN_FAILURE;
return allocate_page (get_size (order),order);
}
// release a page :
// when called, 'address' MUST be a valid address pointing
// to &dram[i], where i ranges from 0 to MEMORY_TOTAL_PAGES - 1.
// FAILURE if block is already freed.
int memory_release_page (void *address)
{
struct memory_free_page *node = (struct memory_free_page *)address;
int order = ~get_order (node);
if (order < 0)
return MEMORY_RETURN_FAILURE;
release_page (node,get_size (order),order);
return MEMORY_RETURN_SUCCESS;
}
#ifdef TEST
# include <stdio.h>
# include <stdlib.h>
# if MEMORY_PAGE_USE_SPLAY_TREE
static void dump_splay_node (struct memory_free_page *node,int level)
{
if (!node)
return;
dump_splay_node (node->less,level+1);
printf ("\n%*s[%d-%d]",level,"",(node - free_page),(node - free_page) + (1 << get_order (node)) - 1);
dump_splay_node (node->more,level+1);
}
static void dump_splay_tree (struct memory_free_page *root)
{
dump_splay_node (root,2); fflush (stdout);
}
# endif
void memory_spy_page (void *address)
{
struct memory_free_page *node = (struct memory_free_page *)address;
int order,used;
if (node)
{
order = get_order (node);
used = order < 0;
if (used)
order = ~order;
printf("\n(%s,%2d,%7d)",(used ? "used" : "free"),order,get_size (order));
}
}
void memory_dump (int order)
{
struct memory_free_page *node = free_page_bin[order];
printf("\n(%s,%2d,%7d)",node ? "free" : "none",order,get_size (order));
# if MEMORY_PAGE_USE_SPLAY_TREE
dump_splay_tree (node);
# else
while (node)
{
printf("[%d-%d]",(node - free_page),(node - free_page) + (1<<order) - 1);
node = node->more;
}
# endif
}
void memory_check (int order)
{
struct memory_free_page *node[4096],*swap;
unsigned int i = 0,j = 0;
while (i <= 12)
memory_dump (i++);
i = 0;
printf ("\nallocating...\n");
while (order >= 0)
{
j = order;
while ((swap = memory_allocate_page (j)))
{
node[i++] = swap;
printf("[%d-%d]",(swap - free_page),(swap - free_page) + ((1 << j)-1));
for (j += (rand () & 15); j > (unsigned int)order; j -= order);
}
--order;
}
node[i] = 0;
while (j <= 12)
memory_dump (j++);
j = 0;
printf ("\nreleasing...");
--i;
while (i > 0)
{
unsigned int k = 0;
#if 0
printf ("\n");
#endif
swap = node[k++];
#if 0
while (swap)
{
printf("[%d-%d]",(swap - free_page),(swap - free_page) + ((1 << ~get_order (swap))-1));
swap = node[k++];
}
#endif
for (j += 1 + (rand () & 15); j >= i; j -= i);
swap = node[j];
node[j] = node[i];
memory_release_page (swap);
node[i] = 0;
--i;
}
memory_release_page (node[0]);
i = 0;
while (i <= 12)
memory_dump (i++);
printf("\n\n%s !",(get_order (free_page) == 12) ? "SUCCESS" : "FAILURE");
}
#endif
#endif

View file

@ -0,0 +1,450 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id:
*
* Copyright (C) 2002 by Alan Korr
*
* All files in this archive are subject to the GNU General Public License.
* See the file COPYING in the source tree root for full license agreement.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#ifndef __LIBRARY_MEMORY_C__
# error "This header file must be included ONLY from memory.c."
#endif
#ifndef __LIBRARY_MEMORY_PAGE_H__
# define __LIBRARY_MEMORY_PAGE_H__
struct memory_free_block
{
struct memory_free_block
*link;
};
///////////////////////////////////////////////////////////////////////////////
// MEMORY SLAB :
////////////////
//
//
struct memory_slab
{
struct memory_slab
*less,*more;
unsigned int // left == number of free blocks left
left;
struct memory_free_block
*free;
};
static inline struct memory_slab *push_slab (struct memory_slab *head,struct memory_slab *node)
{
node->less = head;
if (head)
{
node->more = head->more;
head->more = node;
}
else
node->more = 0;
return node;
}
static inline struct memory_slab *pop_slab (struct memory_slab *head,struct memory_slab *node)
{
if (head)
head->more = node->more;
return node->more;
}
static inline struct memory_slab *move_slab (struct memory_slab **from,struct memory_slab **to)
{
struct memory_slab *head = *from;
*from = (*from)->more;
if (*from)
(*from)->less = head->less;
head->less = 0;
head->more = (*to);
if (*to)
(*to)->prev = head;
*to = head;
return head;
}
//
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// MEMORY CACHE :
/////////////////
//
//
struct memory_cache
{
struct memory_cache
*less,*more,*same;
unsigned int
left; // number of free slabs
struct memory_slab
*used;
struct memory_slab
*free;
struct memory_slab
*reap;
unsigned int
size,original_size;
unsigned int
page_size;
unsigned int
blocks_per_slab;
int
page_order;
unsigned int
flags;
};
static struct memory_cache *cache_tree;
static inline int get_order (unsigned size)
{
int order = 0;
size = (size + sizeof(struct memory_free_block) - 1) & - sizeof(struct memory_free_block);
while (size > 0)
{
++order; size <<= 1;
}
return order;
}
static inline struct memory_slab *get_slab (struct memory_cache *cache,void *address)
{
#ifdef TEST
return (struct memory_slab *)((((unsigned)address + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab));
#else
return (struct memory_slab *)((free_page + (((unsigned)address - free_page + cache->page_size) & -cache->page_size) - sizeof (struct memory_slab)));
#endif
}
static struct memory_cache *splay_cache (struct memory_cache *root,unsigned int left)
{
struct memory_cache *down;
struct memory_cache *less;
struct memory_cache *more;
struct memory_cache head;
head.less =
head.more = 0;
less =
more = &head;
while (1)
{
if (left < root->left)
{
if ((down = root->less))
{
if (left < down->left)
{
root->less = down->more;
down->more = root;
root = down;
if (!root->less)
break;
}
more->less = root;
more = root;
root = root->less;
continue;
}
break;
}
if (root->left < left)
{
if ((down = root->more))
{
if (root->left < left)
{
root->more = down->less;
down->less = root;
root = down;
if (!root->more)
break;
}
less->more = root;
less = root;
root = root->more;
continue;
}
}
break;
}
less->more = root->less;
more->less = root->more;
root->less = head.more;
root->more = head.less;
return root;
}
static inline struct memory_cache *insert_cache (struct memory_cache *root,struct memory_cache *node)
{
node->less =
node->more =
node->same = 0;
if (root)
{
if (node->left == ((root = splay_cache (root,node))->left))
{
node->less = root.less;
node->more = root.more;
node->same = root;
root->less = node;
}
else if (node < root)
{
node->less = root->less;
node->more = root;
root->less = 0;
}
else
{
node->less = root;
node->more = root->more;
node->more = 0;
}
}
return node;
}
static inline struct memory_cache *remove_cache (struct memory_cache *root,struct memory_cache *node)
{
if (root)
{
root = splay_cache (root,node);
if (root != node)
{
node->less->same = node->same;
if (node->same)
node->same->less = node->less;
return root;
}
if (root->less)
{
node = splay_page (root->less,node);
node->more = root->more;
}
else
node = root->more;
}
return root;
}
static inline struct memory_cache *move_cache (struct memory_cache *root,struct memory_cache *node,int delta)
{
if ((root = remove_cache (root,node)))
{
node->left += delta;
root = insert_cache (root,node);
}
return root;
}
//
/////////////////////
// PUBLIC FUNCTIONS :
/////////////////////
//
// - memory_grow_cache : allocate a new slab for a cache
// - memory_shrink_cache : release free slabs from a cache
// - memory_create_cache : create a new cache of size-fixed blocks
// - memory_destroy_cache : destroy the cache and release all the slabs
// - memory_cache_allocate : allocate a block from the cache
// - memory_cache_release : release a block in the cache
//
struct memory_slab *memory_grow_cache (struct memory_cache *cache)
{
struct memory_slab *slab;
unsigned int page;
if (cache)
{
page = (unsigned int)memory_allocate_page (cache->page_order);
if (page)
{
struct memory_free_block *block,**link;
slab = (struct memory_slab *)(page + cache->page_size - sizeof (struct memory_slab));
slab->free = 0;
slab->left = 0;
link = &slab->free;
for ((unsigned int)block = page;
(unsigned int)block + cache->size < (unsigned int)slab;
(unsigned int)block += cache->size)
{
*link = block;
link = &block->link;
++slab->free;
}
*link = 0;
cache->blocks_per_slab = slab->free;
cache->reap = push_slab (cache->reap,slab);
cache_tree = move_cache (cache_tree,cache,+1);
return slab;
}
}
return MEMORY_RETURN_FAILURE;
}
static int shrink_cache (struct memory_cache *cache,int all,int move)
{
struct memory_slab *slab;
unsigned int slabs = 0;
if (cache)
{
while ((slab = cache->reap))
{
++slabs;
cache->reap = pop_slab (cache->reap,slab);
memory_release_page ((void *)slab);
if (all)
continue;
if (move)
cache_tree = move_cache (cache_tree,cache,-slabs);
return MEMORY_RETURN_SUCCESS;
}
}
return MEMORY_RETURN_FAILURE;
}
int memory_shrink_cache (struct memory_cache *cache,int all)
{
return shrink_cache (cache,all,1 /* move cache in cache_tree */);
}
struct memory_cache *memory_create_cache (unsigned int size,int align,int flags)
{
struct memory_cache *cache;
unsigned int waste = 0,blocks_per_page;
int page_order;
unsigned int page_size;
unsigned int original_size = size;
// Align size on 'align' bytes ('align' should equal 1<<n)
// if 'align' is inferior to 4, 32-bit word alignment is done by default.
size = (align > 4) ? ((size + align - 1) & -align) : ((size + sizeof (int) - 1) & -sizeof (int));
if (!(cache = memory_cache_allocate (&cache_cache))
return MEMORY_RETURN_FAILURE;
cache->flags =
cache->left = 0;
cache->used =
cache->free =
cache->reap = 0;
cache->original_size = original_size;
cache->size = size;
page_size = 0;
page_order = MEMORY_PAGE_MINIMAL_SIZE;;
// Trying to determine what is the best number of pages per slab
for (;; ++order,(page_size <<= 1))
{
if (page_order >= MEMORY_MAXIMUM_PAGE_ORDER_PER_SLAB)
{
memory_cache_release (&cache_cache,cache);
return MEMORY_RETURN_FAILURE;
}
waste = page_size;
waste -= sizeof (struct memory_slab);
blocks_per_slab = waste / size;
waste -= block_per_slab * size;
if (blocks_per_slab < MEMORY_MINIMUM_BLOCKS_PER_SLAB)
{
++page_order; page_size <<= 1;
continue;
}
// below 3% of lost space is correct
if ((waste << 16) / page_size) < 1967)
break;
++page_order; page_size <<= 1;
}
cache->page_size = page_size;
cache->page_order = page_order;
cache_tree = insert_cache (cache_tree,cache);
return cache;
}
int memory_destroy_cache (struct memory_cache *cache)
{
/* FIX ME : this function shouldn't be called if there are still used blocks */
if (cache && !cache->free && !cache->used)
{
cache_tree = remove_cache (cache_tree,cache);
if (shrink_cache (cache,1 /* release all free slabs */,0 /* don't move in cache_tree */))
return memory_cache_release (&cache_cache,cache);
}
return MEMORY_RETURN_FAILURE;
}
void *memory_cache_allocate (struct memory_cache *cache)
{
if (cache)
{
do
{
struct memory_slab *slab;
if ((slab = cache->free))
{
if (slab->left > 0)
{
ok: struct memory_free_block *block = slab->free;
slab->free = block->link;
if (--slab->left == 0)
move_slab (&cache->free,&cache->used);
return block;
}
}
if (cache->reap)
{
slab = move_slab (&cache->reap,&cache->free);
cache_tree = move_cache (cache_tree,cache,-1);
goto ok;
}
}
while (grow_cache (cache));
}
return MEMORY_RETURN_FAILURE;
}
int memory_cache_release (struct memory_cache *cache,void *address)
{
struct memory_slab *slab = get_slab (cache,address);
((struct memory_free_block *)address)->link = slab->free;
slab->free = (struct memory_free_block *)address;
if (slab->left++ == 0)
move_slab (&cache->used,&cache->free);
else if (slab->left == cache->blocks_per_slab)
{
move_slab (&cache->free,&cache->reap);
cache_tree = move_cache (cache_tree,cache,+1);
}
return MEMORY_RETURN_SUCCESS;
}
//
///////////////////////////////////////////////////////////////////////////////
#endif

View file

@ -0,0 +1,50 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id:
*
* Copyright (C) 2002 by Alan Korr
*
* All files in this archive are subject to the GNU General Public License.
* See the file COPYING in the source tree root for full license agreement.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#define __LIBRARY_MEMORY_C__
#include <memory.h>
#include "memory-page.h"
#if 0
# include "memory-slab.h"
# include "memory-block.h"
#endif
/* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */
void memory_copy (void *target,void const *source,unsigned int count)
{
while (count--)
*((char *)target)++ = *((char const *)source)++;
}
/* NOT VERY OPTIMIZED AT ALL BUT WE WILL DO IT WHEN PRIORITY COMES */
void memory_set (void *target,int byte,unsigned int count)
{
while (count--)
*((char *)target)++ = (char)byte;
}
void memory_setup (void)
{
#if 1
memory_set (free_page,0,MEMORY_TOTAL_BYTES);
memory_set (free_page_bin,0,MEMORY_TOTAL_ORDERS *sizeof (struct memory_free_page *));
memory_set (free_page_order + 1,0,MEMORY_TOTAL_PAGES);
#endif
free_page_order[0] = MEMORY_TOTAL_ORDERS - 1;
free_page_bin[MEMORY_TOTAL_ORDERS - 1] = free_page;
}