@@ -128,12 +128,12 @@ C_SOURCES = \ | |||
util/u_linkage.c \ | |||
util/u_network.c \ | |||
util/u_math.c \ | |||
util/u_mempool.c \ | |||
util/u_mm.c \ | |||
util/u_rect.c \ | |||
util/u_ringbuffer.c \ | |||
util/u_sampler.c \ | |||
util/u_simple_shaders.c \ | |||
util/u_slab.c \ | |||
util/u_snprintf.c \ | |||
util/u_staging.c \ | |||
util/u_surface.c \ |
@@ -175,13 +175,13 @@ source = [ | |||
'util/u_linkage.c', | |||
'util/u_network.c', | |||
'util/u_math.c', | |||
'util/u_mempool.c', | |||
'util/u_mm.c', | |||
'util/u_rect.c', | |||
'util/u_resource.c', | |||
'util/u_ringbuffer.c', | |||
'util/u_sampler.c', | |||
'util/u_simple_shaders.c', | |||
'util/u_slab.c', | |||
'util/u_snprintf.c', | |||
'util/u_staging.c', | |||
'util/u_surface.c', |
@@ -20,7 +20,7 @@ | |||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |||
* USE OR OTHER DEALINGS IN THE SOFTWARE. */ | |||
#include "util/u_mempool.h" | |||
#include "util/u_slab.h" | |||
#include "util/u_math.h" | |||
#include "util/u_memory.h" | |||
@@ -28,13 +28,13 @@ | |||
#include <stdio.h> | |||
#define UTIL_MEMPOOL_MAGIC 0xcafe4321 | |||
#define UTIL_SLAB_MAGIC 0xcafe4321 | |||
/* The block is either allocated memory or free space. */ | |||
struct util_mempool_block { | |||
struct util_slab_block { | |||
/* The header. */ | |||
/* The first next free block. */ | |||
struct util_mempool_block *next_free; | |||
struct util_slab_block *next_free; | |||
intptr_t magic; | |||
@@ -42,19 +42,19 @@ struct util_mempool_block { | |||
* The allocated size is always larger than this structure. */ | |||
}; | |||
static struct util_mempool_block * | |||
util_mempool_get_block(struct util_mempool *pool, | |||
struct util_mempool_page *page, unsigned index) | |||
static struct util_slab_block * | |||
util_slab_get_block(struct util_slab_mempool *pool, | |||
struct util_slab_page *page, unsigned index) | |||
{ | |||
return (struct util_mempool_block*) | |||
((uint8_t*)page + sizeof(struct util_mempool_page) + | |||
return (struct util_slab_block*) | |||
((uint8_t*)page + sizeof(struct util_slab_page) + | |||
(pool->block_size * index)); | |||
} | |||
static void util_mempool_add_new_page(struct util_mempool *pool) | |||
static void util_slab_add_new_page(struct util_slab_mempool *pool) | |||
{ | |||
struct util_mempool_page *page; | |||
struct util_mempool_block *block; | |||
struct util_slab_page *page; | |||
struct util_slab_block *block; | |||
int i; | |||
page = MALLOC(pool->page_size); | |||
@@ -62,15 +62,15 @@ static void util_mempool_add_new_page(struct util_mempool *pool) | |||
/* Mark all blocks as free. */ | |||
for (i = 0; i < pool->num_blocks-1; i++) { | |||
block = util_mempool_get_block(pool, page, i); | |||
block->next_free = util_mempool_get_block(pool, page, i+1); | |||
block->magic = UTIL_MEMPOOL_MAGIC; | |||
block = util_slab_get_block(pool, page, i); | |||
block->next_free = util_slab_get_block(pool, page, i+1); | |||
block->magic = UTIL_SLAB_MAGIC; | |||
} | |||
block = util_mempool_get_block(pool, page, pool->num_blocks-1); | |||
block = util_slab_get_block(pool, page, pool->num_blocks-1); | |||
block->next_free = pool->first_free; | |||
block->magic = UTIL_MEMPOOL_MAGIC; | |||
pool->first_free = util_mempool_get_block(pool, page, 0); | |||
block->magic = UTIL_SLAB_MAGIC; | |||
pool->first_free = util_slab_get_block(pool, page, 0); | |||
pool->num_pages++; | |||
#if 0 | |||
@@ -78,74 +78,74 @@ static void util_mempool_add_new_page(struct util_mempool *pool) | |||
#endif | |||
} | |||
static void *util_mempool_malloc_st(struct util_mempool *pool) | |||
static void *util_slab_alloc_st(struct util_slab_mempool *pool) | |||
{ | |||
struct util_mempool_block *block; | |||
struct util_slab_block *block; | |||
if (!pool->first_free) | |||
util_mempool_add_new_page(pool); | |||
util_slab_add_new_page(pool); | |||
block = pool->first_free; | |||
assert(block->magic == UTIL_MEMPOOL_MAGIC); | |||
assert(block->magic == UTIL_SLAB_MAGIC); | |||
pool->first_free = block->next_free; | |||
return (uint8_t*)block + sizeof(struct util_mempool_block); | |||
return (uint8_t*)block + sizeof(struct util_slab_block); | |||
} | |||
static void util_mempool_free_st(struct util_mempool *pool, void *ptr) | |||
static void util_slab_free_st(struct util_slab_mempool *pool, void *ptr) | |||
{ | |||
struct util_mempool_block *block = | |||
(struct util_mempool_block*) | |||
((uint8_t*)ptr - sizeof(struct util_mempool_block)); | |||
struct util_slab_block *block = | |||
(struct util_slab_block*) | |||
((uint8_t*)ptr - sizeof(struct util_slab_block)); | |||
assert(block->magic == UTIL_MEMPOOL_MAGIC); | |||
assert(block->magic == UTIL_SLAB_MAGIC); | |||
block->next_free = pool->first_free; | |||
pool->first_free = block; | |||
} | |||
static void *util_mempool_malloc_mt(struct util_mempool *pool) | |||
static void *util_slab_alloc_mt(struct util_slab_mempool *pool) | |||
{ | |||
void *mem; | |||
pipe_mutex_lock(pool->mutex); | |||
mem = util_mempool_malloc_st(pool); | |||
mem = util_slab_alloc_st(pool); | |||
pipe_mutex_unlock(pool->mutex); | |||
return mem; | |||
} | |||
static void util_mempool_free_mt(struct util_mempool *pool, void *ptr) | |||
static void util_slab_free_mt(struct util_slab_mempool *pool, void *ptr) | |||
{ | |||
pipe_mutex_lock(pool->mutex); | |||
util_mempool_free_st(pool, ptr); | |||
util_slab_free_st(pool, ptr); | |||
pipe_mutex_unlock(pool->mutex); | |||
} | |||
void util_mempool_set_thread_safety(struct util_mempool *pool, | |||
enum util_mempool_threading threading) | |||
void util_slab_set_thread_safety(struct util_slab_mempool *pool, | |||
enum util_slab_threading threading) | |||
{ | |||
pool->threading = threading; | |||
if (threading) { | |||
pool->malloc = util_mempool_malloc_mt; | |||
pool->free = util_mempool_free_mt; | |||
pool->alloc = util_slab_alloc_mt; | |||
pool->free = util_slab_free_mt; | |||
} else { | |||
pool->malloc = util_mempool_malloc_st; | |||
pool->free = util_mempool_free_st; | |||
pool->alloc = util_slab_alloc_st; | |||
pool->free = util_slab_free_st; | |||
} | |||
} | |||
void util_mempool_create(struct util_mempool *pool, | |||
unsigned item_size, | |||
unsigned num_blocks, | |||
enum util_mempool_threading threading) | |||
void util_slab_create(struct util_slab_mempool *pool, | |||
unsigned item_size, | |||
unsigned num_blocks, | |||
enum util_slab_threading threading) | |||
{ | |||
item_size = align(item_size, sizeof(intptr_t)); | |||
pool->num_pages = 0; | |||
pool->num_blocks = num_blocks; | |||
pool->block_size = sizeof(struct util_mempool_block) + item_size; | |||
pool->block_size = sizeof(struct util_slab_block) + item_size; | |||
pool->block_size = align(pool->block_size, sizeof(intptr_t)); | |||
pool->page_size = sizeof(struct util_mempool_page) + | |||
pool->page_size = sizeof(struct util_slab_page) + | |||
num_blocks * pool->block_size; | |||
pool->first_free = NULL; | |||
@@ -153,12 +153,12 @@ void util_mempool_create(struct util_mempool *pool, | |||
pipe_mutex_init(pool->mutex); | |||
util_mempool_set_thread_safety(pool, threading); | |||
util_slab_set_thread_safety(pool, threading); | |||
} | |||
void util_mempool_destroy(struct util_mempool *pool) | |||
void util_slab_destroy(struct util_slab_mempool *pool) | |||
{ | |||
struct util_mempool_page *page, *temp; | |||
struct util_slab_page *page, *temp; | |||
foreach_s(page, temp, &pool->list) { | |||
remove_from_list(page); |
@@ -22,66 +22,66 @@ | |||
/** | |||
* @file | |||
* Simple memory pool for equally sized memory allocations. | |||
* util_mempool_malloc and util_mempool_free are in O(1). | |||
* Simple slab allocator for equally sized memory allocations. | |||
* util_slab_alloc and util_slab_free have time complexity in O(1). | |||
* | |||
* Good for allocations which have very low lifetime and are allocated | |||
* and freed very often. Use a profiler first! | |||
* and freed very often. Use a profiler first to know if it's worth using it! | |||
* | |||
* Candidates: get_transfer, user_buffer_create | |||
* | |||
* @author Marek Olšák | |||
*/ | |||
#ifndef U_MEMPOOL_H | |||
#define U_MEMPOOL_H | |||
#ifndef U_SLAB_H | |||
#define U_SLAB_H | |||
#include "os/os_thread.h" | |||
enum util_mempool_threading { | |||
UTIL_MEMPOOL_SINGLETHREADED = FALSE, | |||
UTIL_MEMPOOL_MULTITHREADED = TRUE | |||
enum util_slab_threading { | |||
UTIL_SLAB_SINGLETHREADED = FALSE, | |||
UTIL_SLAB_MULTITHREADED = TRUE | |||
}; | |||
/* The page is an array of blocks (allocations). */ | |||
struct util_mempool_page { | |||
struct util_slab_page { | |||
/* The header (linked-list pointers). */ | |||
struct util_mempool_page *prev, *next; | |||
struct util_slab_page *prev, *next; | |||
/* Memory after the last member is dedicated to the page itself. | |||
* The allocated size is always larger than this structure. */ | |||
}; | |||
struct util_mempool { | |||
struct util_slab_mempool { | |||
/* Public members. */ | |||
void *(*malloc)(struct util_mempool *pool); | |||
void (*free)(struct util_mempool *pool, void *ptr); | |||
void *(*alloc)(struct util_slab_mempool *pool); | |||
void (*free)(struct util_slab_mempool *pool, void *ptr); | |||
/* Private members. */ | |||
struct util_mempool_block *first_free; | |||
struct util_slab_block *first_free; | |||
struct util_mempool_page list; | |||
struct util_slab_page list; | |||
unsigned block_size; | |||
unsigned page_size; | |||
unsigned num_blocks; | |||
unsigned num_pages; | |||
enum util_mempool_threading threading; | |||
enum util_slab_threading threading; | |||
pipe_mutex mutex; | |||
}; | |||
void util_mempool_create(struct util_mempool *pool, | |||
unsigned item_size, | |||
unsigned num_blocks, | |||
enum util_mempool_threading threading); | |||
void util_slab_create(struct util_slab_mempool *pool, | |||
unsigned item_size, | |||
unsigned num_blocks, | |||
enum util_slab_threading threading); | |||
void util_mempool_destroy(struct util_mempool *pool); | |||
void util_slab_destroy(struct util_slab_mempool *pool); | |||
void util_mempool_set_thread_safety(struct util_mempool *pool, | |||
enum util_mempool_threading threading); | |||
void util_slab_set_thread_safety(struct util_slab_mempool *pool, | |||
enum util_slab_threading threading); | |||
#define util_mempool_malloc(pool) (pool)->malloc(pool) | |||
#define util_mempool_free(pool, ptr) (pool)->free(pool, ptr) | |||
#define util_slab_alloc(pool) (pool)->alloc(pool) | |||
#define util_slab_free(pool, ptr) (pool)->free(pool, ptr) | |||
#endif |
@@ -44,14 +44,14 @@ static void r300_update_num_contexts(struct r300_screen *r300screen, | |||
p_atomic_inc(&r300screen->num_contexts); | |||
if (r300screen->num_contexts > 1) | |||
util_mempool_set_thread_safety(&r300screen->pool_buffers, | |||
UTIL_MEMPOOL_MULTITHREADED); | |||
util_slab_set_thread_safety(&r300screen->pool_buffers, | |||
UTIL_SLAB_MULTITHREADED); | |||
} else { | |||
p_atomic_dec(&r300screen->num_contexts); | |||
if (r300screen->num_contexts <= 1) | |||
util_mempool_set_thread_safety(&r300screen->pool_buffers, | |||
UTIL_MEMPOOL_SINGLETHREADED); | |||
util_slab_set_thread_safety(&r300screen->pool_buffers, | |||
UTIL_SLAB_SINGLETHREADED); | |||
} | |||
} | |||
@@ -135,7 +135,7 @@ static void r300_destroy_context(struct pipe_context* context) | |||
r300->rws->cs_destroy(r300->cs); | |||
/* XXX: No way to tell if this was initialized or not? */ | |||
util_mempool_destroy(&r300->pool_transfers); | |||
util_slab_destroy(&r300->pool_transfers); | |||
r300_update_num_contexts(r300->screen, -1); | |||
@@ -421,9 +421,9 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen, | |||
make_empty_list(&r300->query_list); | |||
util_mempool_create(&r300->pool_transfers, | |||
sizeof(struct pipe_transfer), 64, | |||
UTIL_MEMPOOL_SINGLETHREADED); | |||
util_slab_create(&r300->pool_transfers, | |||
sizeof(struct pipe_transfer), 64, | |||
UTIL_SLAB_SINGLETHREADED); | |||
r300->cs = rws->cs_create(rws); | |||
if (r300->cs == NULL) |
@@ -599,7 +599,7 @@ struct r300_context { | |||
struct u_upload_mgr *upload_vb; | |||
struct u_upload_mgr *upload_ib; | |||
struct util_mempool pool_transfers; | |||
struct util_slab_mempool pool_transfers; | |||
/* Stat counter. */ | |||
uint64_t flush_counter; |
@@ -399,7 +399,7 @@ static void r300_destroy_screen(struct pipe_screen* pscreen) | |||
struct r300_screen* r300screen = r300_screen(pscreen); | |||
struct r300_winsys_screen *rws = r300_winsys_screen(pscreen); | |||
util_mempool_destroy(&r300screen->pool_buffers); | |||
util_slab_destroy(&r300screen->pool_buffers); | |||
if (rws) | |||
rws->destroy(rws); | |||
@@ -456,9 +456,9 @@ struct pipe_screen* r300_screen_create(struct r300_winsys_screen *rws) | |||
r300_init_debug(r300screen); | |||
r300_parse_chipset(&r300screen->caps); | |||
util_mempool_create(&r300screen->pool_buffers, | |||
sizeof(struct r300_buffer), 64, | |||
UTIL_MEMPOOL_SINGLETHREADED); | |||
util_slab_create(&r300screen->pool_buffers, | |||
sizeof(struct r300_buffer), 64, | |||
UTIL_SLAB_SINGLETHREADED); | |||
r300screen->rws = rws; | |||
r300screen->screen.winsys = (struct pipe_winsys*)rws; |
@@ -28,7 +28,7 @@ | |||
#include "r300_chipset.h" | |||
#include "util/u_mempool.h" | |||
#include "util/u_slab.h" | |||
#include <stdio.h> | |||
@@ -44,7 +44,7 @@ struct r300_screen { | |||
struct r300_capabilities caps; | |||
/* Memory pools. */ | |||
struct util_mempool pool_buffers; | |||
struct util_slab_mempool pool_buffers; | |||
/** Combination of DBG_xxx flags */ | |||
unsigned debug; |
@@ -136,7 +136,7 @@ static void r300_buffer_destroy(struct pipe_screen *screen, | |||
if (rbuf->buf) | |||
rws->buffer_reference(rws, &rbuf->buf, NULL); | |||
util_mempool_free(&r300screen->pool_buffers, rbuf); | |||
util_slab_free(&r300screen->pool_buffers, rbuf); | |||
} | |||
static struct pipe_transfer* | |||
@@ -148,7 +148,7 @@ r300_default_get_transfer(struct pipe_context *context, | |||
{ | |||
struct r300_context *r300 = r300_context(context); | |||
struct pipe_transfer *transfer = | |||
util_mempool_malloc(&r300->pool_transfers); | |||
util_slab_alloc(&r300->pool_transfers); | |||
transfer->resource = resource; | |||
transfer->sr = sr; | |||
@@ -168,7 +168,7 @@ static void r300_default_transfer_destroy(struct pipe_context *pipe, | |||
struct pipe_transfer *transfer) | |||
{ | |||
struct r300_context *r300 = r300_context(pipe); | |||
util_mempool_free(&r300->pool_transfers, transfer); | |||
util_slab_free(&r300->pool_transfers, transfer); | |||
} | |||
static void * | |||
@@ -285,7 +285,7 @@ struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, | |||
struct r300_buffer *rbuf; | |||
unsigned alignment = 16; | |||
rbuf = util_mempool_malloc(&r300screen->pool_buffers); | |||
rbuf = util_slab_alloc(&r300screen->pool_buffers); | |||
rbuf->magic = R300_BUFFER_MAGIC; | |||
@@ -312,7 +312,7 @@ struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, | |||
rbuf->domain); | |||
if (!rbuf->buf) { | |||
util_mempool_free(&r300screen->pool_buffers, rbuf); | |||
util_slab_free(&r300screen->pool_buffers, rbuf); | |||
return NULL; | |||
} | |||
@@ -327,7 +327,7 @@ struct pipe_resource *r300_user_buffer_create(struct pipe_screen *screen, | |||
struct r300_screen *r300screen = r300_screen(screen); | |||
struct r300_buffer *rbuf; | |||
rbuf = util_mempool_malloc(&r300screen->pool_buffers); | |||
rbuf = util_slab_alloc(&r300screen->pool_buffers); | |||
rbuf->magic = R300_BUFFER_MAGIC; | |||