123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882 |
|
/*******************************************************************************
copyright: Copyright (c) 2008 Kris Bell. All rights reserved
license: BSD style: $(LICENSE)
version: Apr 2008: Initial release
Jan 2009: Added GCChunk allocator
authors: Kris, schveiguy
Since: 0.99.7
*******************************************************************************/
module tango.util.container.Container;
private import tango.core.Memory;
private import tango.stdc.stdlib;
private import tango.stdc.string;
/*******************************************************************************
Utility functions and constants
*******************************************************************************/
struct Container
{
/***********************************************************************
default initial number of buckets of a non-empty hashmap
***********************************************************************/
enum size_t defaultInitialBuckets = 31;
/***********************************************************************
default load factor for a non-empty hashmap. The hash
table is resized when the proportion of elements per
buckets exceeds this limit
***********************************************************************/
enum float defaultLoadFactor = 0.75f;
/***********************************************************************
generic value reaper, which does nothing
***********************************************************************/
static void reap(V) (V v) {}
/***********************************************************************
generic key/value reaper, which does nothing
***********************************************************************/
static void reap(K, V) (K k, V v) {}
/***********************************************************************
generic hash function, using the default hashing. Thanks
to 'mwarning' for the optimization suggestion
***********************************************************************/
static size_t hash(K) (K k, size_t length)
{
static if (is(K : int) || is(K : uint) ||
is(K : long) || is(K : ulong) ||
is(K : short) || is(K : ushort) ||
is(K : byte) || is(K : ubyte) ||
is(K : char) || is(K : wchar) || is (K : dchar))
return cast(size_t) (k % length);
else
return (typeid(K).getHash(&k) & 0x7FFFFFFF) % length;
}
/***********************************************************************
GC Chunk allocator
Can save approximately 30% memory for small elements (tested
with integer elements and a chunk size of 1000), and is at
least twice as fast at adding elements when compared to the
generic allocator (approximately 50x faster with LinkedList)
Operates safely with GC managed entities
***********************************************************************/
struct ChunkGC(T)
{
static assert (T.sizeof >= (T*).sizeof, "The ChunkGC allocator can only be used for data sizes of at least " ~ ((T*).sizeof).stringof[0..$-1] ~ " bytes!");
private struct Cache {Cache* next;}
private Cache* cache;
private T[][] lists;
private size_t chunks = 256;
/***************************************************************
allocate a T-sized memory chunk
***************************************************************/
T* allocate ()
{
if (cache is null)
newlist();
auto p = cache;
cache = p.next;
return cast(T*) p;
}
/***************************************************************
allocate an array of T* sized memory chunks
***************************************************************/
T*[] allocate (size_t count)
{
auto p = (cast(T**) calloc(count, (T*).sizeof)) [0 .. count];
GC.addRange (cast(void*) p, count * (T*).sizeof);
return p;
}
/***************************************************************
Invoked when a specific T*[] is discarded
***************************************************************/
void collect (T*[] t)
{
if (t.ptr)
{
GC.removeRange (t.ptr);
free (t.ptr);
}
}
/***************************************************************
Invoked when a specific T is discarded
***************************************************************/
void collect (T* p)
{
assert (p);
auto d = cast(Cache*) p;
//*p = T.init;
d.next = cache;
cache = d;
}
/***************************************************************
Invoked when clear/reset is called on the host.
This is a shortcut to clear everything allocated.
Should return true if supported, or false otherwise.
False return will cause a series of discrete collect
calls
***************************************************************/
bool collect (bool all = true)
{
if (all)
{
foreach (ref list; lists)
{
GC.removeRange (list.ptr);
free (list.ptr);
list = null;
}
cache = null;
lists = null;
return true;
}
return false;
}
/***************************************************************
set the chunk size and prepopulate with nodes
***************************************************************/
void config (size_t chunks, size_t allocate=0)
{
this.chunks = chunks;
if (allocate)
for (size_t i=allocate/chunks+1; i--;)
newlist();
}
/***************************************************************
list manager
***************************************************************/
private void newlist ()
{
lists.length = lists.length + 1;
auto p = (cast(T*) calloc (chunks, T.sizeof)) [0 .. chunks];
lists[$-1] = p;
GC.addRange (p.ptr, T.sizeof * chunks);
auto head = cache;
foreach (ref node; p)
{
auto d = cast(Cache*) &node;
d.next = head;
head = d;
}
cache = head;
}
}
/***********************************************************************
Chunk allocator (non GC)
Can save approximately 30% memory for small elements (tested
with integer elements and a chunk size of 1000), and is at
least twice as fast at adding elements when compared to the
default allocator (approximately 50x faster with LinkedList)
Note that, due to GC behaviour, you should not configure
a custom allocator for containers holding anything managed
by the GC. For example, you cannot use a MallocAllocator
to manage a container of classes or strings where those
were allocated by the GC. Once something is owned by a GC
then it's lifetime must be managed by GC-managed entities
(otherwise the GC may think there are no live references
and prematurely collect container contents).
You can explicity manage the collection of keys and values
yourself by providing a reaper delegate. For example, if
you use a MallocAllocator to manage key/value pairs which
are themselves allocated via malloc, then you should also
provide a reaper delegate to collect those as required.
The primary benefit of this allocator is to avoid the GC
scanning the date-structures involved. Use ChunkGC where
that option is unwarranted, or if you have GC-managed data
instead
***********************************************************************/
struct Chunk(T)
{
static assert (T.sizeof >= (T*).sizeof, "The Chunk allocator can only be used for data sizes of at least " ~ ((T*).sizeof).stringof[0..$-1] ~ " bytes!");
private struct Cache {Cache* next;}
private Cache* cache;
private T[][] lists;
private size_t chunks = 256;
/***************************************************************
allocate a T-sized memory chunk
***************************************************************/
T* allocate ()
{
if (cache is null)
newlist();
auto p = cache;
cache = p.next;
return cast(T*) p;
}
/***************************************************************
allocate an array of T* sized memory chunks
***************************************************************/
T*[] allocate (size_t count)
{
return (cast(T**) calloc(count, (T*).sizeof)) [0 .. count];
}
/***************************************************************
Invoked when a specific T*[] is discarded
***************************************************************/
void collect (T*[] t)
{
if (t.ptr)
free (t.ptr);
}
/***************************************************************
Invoked when a specific T is discarded
***************************************************************/
void collect (T* p)
{
assert (p);
auto d = cast(Cache*) p;
d.next = cache;
cache = d;
}
/***************************************************************
Invoked when clear/reset is called on the host.
This is a shortcut to clear everything allocated.
Should return true if supported, or false otherwise.
False return will cause a series of discrete collect
calls
***************************************************************/
bool collect (bool all = true)
{
if (all)
{
foreach (ref list; lists)
{
free (list.ptr);
list = null;
}
cache = null;
lists = null;
return true;
}
return false;
}
/***************************************************************
set the chunk size and prepopulate with nodes
***************************************************************/
void config (size_t chunks, int allocate=0)
{
this.chunks = chunks;
if (allocate)
for (int i=allocate/chunks+1; i--;)
newlist();
}
/***************************************************************
list manager
***************************************************************/
private void newlist ()
{
lists.length = lists.length + 1;
auto p = (cast(T*) calloc (chunks, T.sizeof)) [0 .. chunks];
lists[$-1] = p;
auto head = cache;
foreach (ref node; p)
{
auto d = cast(Cache*) &node;
d.next = head;
head = d;
}
cache = head;
}
}
/***********************************************************************
generic GC allocation manager
Slow and expensive in memory costs
***********************************************************************/
struct Collect(T)
{
/***************************************************************
allocate a T sized memory chunk
***************************************************************/
T* allocate ()
{
return cast(T*) GC.calloc (T.sizeof);
}
/***************************************************************
allocate an array of T sized memory chunks
***************************************************************/
T*[] allocate (size_t count)
{
return new T*[count];
}
/***************************************************************
Invoked when a specific T[] is discarded
***************************************************************/
void collect (T* p)
{
if (p)
delete p;
}
/***************************************************************
Invoked when a specific T[] is discarded
***************************************************************/
void collect (T*[] t)
{
if (t)
delete t;
}
/***************************************************************
Invoked when clear/reset is called on the host.
This is a shortcut to clear everything allocated.
Should return true if supported, or false otherwise.
False return will cause a series of discrete collect
calls
***************************************************************/
bool collect (bool all = true)
{
return false;
}
/***************************************************************
set the chunk size and prepopulate with nodes
***************************************************************/
void config (size_t chunks, int allocate=0)
{
}
}
/***********************************************************************
Malloc allocation manager.
Note that, due to GC behaviour, you should not configure
a custom allocator for containers holding anything managed
by the GC. For example, you cannot use a MallocAllocator
to manage a container of classes or strings where those
were allocated by the GC. Once something is owned by a GC
then it's lifetime must be managed by GC-managed entities
(otherwise the GC may think there are no live references
and prematurely collect container contents).
You can explicity manage the collection of keys and values
yourself by providing a reaper delegate. For example, if
you use a MallocAllocator to manage key/value pairs which
are themselves allocated via malloc, then you should also
provide a reaper delegate to collect those as required.
***********************************************************************/
struct Malloc(T)
{
/***************************************************************
allocate an array of T sized memory chunks
***************************************************************/
T* allocate ()
{
return cast(T*) calloc (1, T.sizeof);
}
/***************************************************************
allocate an array of T sized memory chunks
***************************************************************/
T*[] allocate (size_t count)
{
return (cast(T**) calloc(count, (T*).sizeof)) [0 .. count];
}
/***************************************************************
Invoked when a specific T[] is discarded
***************************************************************/
void collect (T*[] t)
{
if (t.length)
free (t.ptr);
}
/***************************************************************
Invoked when a specific T[] is discarded
***************************************************************/
void collect (T* p)
{
if (p)
free (p);
}
/***************************************************************
Invoked when clear/reset is called on the host.
This is a shortcut to clear everything allocated.
Should return true if supported, or false otherwise.
False return will cause a series of discrete collect
calls
***************************************************************/
bool collect (bool all = true)
{
return false;
}
/***************************************************************
set the chunk size and prepopulate with nodes
***************************************************************/
void config (size_t chunks, int allocate=0)
{
}
}
version (prior_allocator)
{
/***********************************************************************
GCChunk allocator
Like the Chunk allocator, this allocates elements in chunks,
but allows you to allocate elements that can have GC pointers.
Tests have shown about a 60% speedup when using the GC chunk
allocator for a Hashmap!(int, int).
***********************************************************************/
struct GCChunk(T, uint chunkSize)
{
static if(T.sizeof < (void*).sizeof)
{
static assert(false, "Error, allocator for " ~ T.stringof ~ " failed to instantiate");
}
/**
* This is the form used to link recyclable elements together.
*/
struct element
{
element *next;
}
/**
* A chunk of elements
*/
struct chunk
{
/**
* The next chunk in the chain
*/
chunk *next;
/**
* The previous chunk in the chain. Required for O(1) removal
* from the chain.
*/
chunk *prev;
/**
* The linked list of free elements in the chunk. This list is
* amended each time an element in this chunk is freed.
*/
element *freeList;
/**
* The number of free elements in the freeList. Used to determine
* whether this chunk can be given back to the GC
*/
uint numFree;
/**
* The elements in the chunk.
*/
T[chunkSize] elems;
/**
* Allocate a T* from the free list.
*/
T *allocateFromFree()
{
element *x = freeList;
freeList = x.next;
//
// clear the pointer, this clears the element as if it was
// newly allocated
//
x.next = null;
numFree--;
return cast(T*)x;
}
/**
* deallocate a T*, send it to the free list
*
* returns true if this chunk no longer has any used elements.
*/
bool deallocate(T *t)
{
//
// clear the element so the GC does not interpret the element
// as pointing to anything else.
//
memset(t, 0, (T).sizeof);
element *x = cast(element *)t;
x.next = freeList;
freeList = x;
return (++numFree == chunkSize);
}
}
/**
* The chain of used chunks. Used chunks have had all their elements
* allocated at least once.
*/
chunk *used;
/**
* The fresh chunk. This is only used if no elements are available in
* the used chain.
*/
chunk *fresh;
/**
* The next element in the fresh chunk. Because we don't worry about
* the free list in the fresh chunk, we need to keep track of the next
* fresh element to use.
*/
uint nextFresh;
/**
* Allocate a T*
*/
T* allocate()
{
if(used !is null && used.numFree > 0)
{
//
// allocate one element of the used list
//
T* result = used.allocateFromFree();
if(used.numFree == 0)
//
// move used to the end of the list
//
used = used.next;
return result;
}
//
// no used elements are available, allocate out of the fresh
// elements
//
if(fresh is null)
{
fresh = new chunk;
nextFresh = 0;
}
T* result = &fresh.elems[nextFresh];
if(++nextFresh == chunkSize)
{
if(used is null)
{
used = fresh;
fresh.next = fresh;
fresh.prev = fresh;
}
else
{
//
// insert fresh into the used chain
//
fresh.prev = used.prev;
fresh.next = used;
fresh.prev.next = fresh;
fresh.next.prev = fresh;
if(fresh.numFree != 0)
{
//
// can recycle elements from fresh
//
used = fresh;
}
}
fresh = null;
}
return result;
}
T*[] allocate(uint count)
{
return new T*[count];
}
/**
* free a T*
*/
void collect(T* t)
{
//
// need to figure out which chunk t is in
//
chunk *cur = cast(chunk *)GC.addrOf(t);
if(cur !is fresh && cur.numFree == 0)
{
//
// move cur to the front of the used list, it has free nodes
// to be used.
//
if(cur !is used)
{
if(used.numFree != 0)
{
//
// first, unlink cur from its current location
//
cur.prev.next = cur.next;
cur.next.prev = cur.prev;
//
// now, insert cur before used.
//
cur.prev = used.prev;
cur.next = used;
used.prev = cur;
cur.prev.next = cur;
}
used = cur;
}
}
if(cur.deallocate(t))
{
//
// cur no longer has any elements in use, it can be deleted.
//
if(cur.next is cur)
{
//
// only one element, don't free it.
//
}
else
{
//
// remove cur from list
//
if(used is cur)
{
//
// update used pointer
//
used = used.next;
}
cur.next.prev = cur.prev;
cur.prev.next = cur.next;
delete cur;
}
}
}
void collect(T*[] t)
{
if(t)
delete t;
}
/**
* Deallocate all chunks used by this allocator. Depends on the GC to do
* the actual collection
*/
bool collect(bool all = true)
{
used = null;
//
// keep fresh around
//
if(fresh !is null)
{
nextFresh = 0;
fresh.freeList = null;
}
return true;
}
void config (size_t chunks, int allocate=0)
{
}
}
/***********************************************************************
aliases to the correct Default allocator depending on how big
the type is. It makes less sense to use a GCChunk allocator
if the type is going to be larger than a page (currently there
is no way to get the page size from the GC, so we assume 4096
bytes). If not more than one unit can fit into a page, then
we use the default GC allocator.
***********************************************************************/
template DefaultCollect(T)
{
static if((T).sizeof + ((void*).sizeof * 3) + uint.sizeof >= 4095 / 2)
{
alias Collect!(T) DefaultCollect;
}
else
{
alias GCChunk!(T, (4095 - ((void *).sizeof * 3) - uint.sizeof) / (T).sizeof) DefaultCollect;
}
// TODO: see if we can automatically figure out whether a type has
// any pointers in it, this would allow automatic usage of the
// Chunk allocator for added speed.
}
}
else
template DefaultCollect(T) {alias ChunkGC!(T) DefaultCollect;}
}
|