Properly link flecs library

This commit is contained in:
2023-11-09 11:38:29 +01:00
parent dc585396c3
commit 8edcf9305c
1392 changed files with 390081 additions and 164 deletions

View File

@@ -0,0 +1,103 @@
/**
* @file datastructures/allocator.c
* @brief Allocator for any size.
*
* Allocators create a block allocator for each requested size.
*/
#include "../private_api.h"
static
ecs_size_t flecs_allocator_size(
ecs_size_t size)
{
return ECS_ALIGN(size, 16);
}
static
ecs_size_t flecs_allocator_size_hash(
ecs_size_t size)
{
return size >> 4;
}
void flecs_allocator_init(
ecs_allocator_t *a)
{
flecs_ballocator_init_n(&a->chunks, ecs_block_allocator_t,
FLECS_SPARSE_PAGE_SIZE);
flecs_sparse_init_t(&a->sizes, NULL, &a->chunks, ecs_block_allocator_t);
}
void flecs_allocator_fini(
ecs_allocator_t *a)
{
int32_t i = 0, count = flecs_sparse_count(&a->sizes);
for (i = 0; i < count; i ++) {
ecs_block_allocator_t *ba = flecs_sparse_get_dense_t(
&a->sizes, ecs_block_allocator_t, i);
flecs_ballocator_fini(ba);
}
flecs_sparse_fini(&a->sizes);
flecs_ballocator_fini(&a->chunks);
}
ecs_block_allocator_t* flecs_allocator_get(
ecs_allocator_t *a,
ecs_size_t size)
{
ecs_assert(size >= 0, ECS_INTERNAL_ERROR, NULL);
if (!size) {
return NULL;
}
ecs_assert(a != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_assert(size <= flecs_allocator_size(size), ECS_INTERNAL_ERROR, NULL);
size = flecs_allocator_size(size);
ecs_size_t hash = flecs_allocator_size_hash(size);
ecs_block_allocator_t *result = flecs_sparse_get_any_t(&a->sizes,
ecs_block_allocator_t, (uint32_t)hash);
if (!result) {
result = flecs_sparse_ensure_fast_t(&a->sizes,
ecs_block_allocator_t, (uint32_t)hash);
flecs_ballocator_init(result, size);
}
ecs_assert(result->data_size == size, ECS_INTERNAL_ERROR, NULL);
return result;
}
char* flecs_strdup(
ecs_allocator_t *a,
const char* str)
{
ecs_size_t len = ecs_os_strlen(str);
char *result = flecs_alloc_n(a, char, len + 1);
ecs_os_memcpy(result, str, len + 1);
return result;
}
void flecs_strfree(
ecs_allocator_t *a,
char* str)
{
ecs_size_t len = ecs_os_strlen(str);
flecs_free_n(a, char, len + 1, str);
}
void* flecs_dup(
ecs_allocator_t *a,
ecs_size_t size,
const void *src)
{
ecs_block_allocator_t *ba = flecs_allocator_get(a, size);
if (ba) {
void *dst = flecs_balloc(ba);
ecs_os_memcpy(dst, src, size);
return dst;
} else {
return NULL;
}
}

View File

@@ -0,0 +1,121 @@
/**
* @file datastructures/bitset.c
* @brief Bitset data structure.
*
* Simple bitset implementation. The bitset allows for storage of arbitrary
* numbers of bits.
*/
#include "../private_api.h"
static
void ensure(
ecs_bitset_t *bs,
ecs_size_t size)
{
if (!bs->size) {
int32_t new_size = ((size - 1) / 64 + 1) * ECS_SIZEOF(uint64_t);
bs->size = ((size - 1) / 64 + 1) * 64;
bs->data = ecs_os_calloc(new_size);
} else if (size > bs->size) {
int32_t prev_size = ((bs->size - 1) / 64 + 1) * ECS_SIZEOF(uint64_t);
bs->size = ((size - 1) / 64 + 1) * 64;
int32_t new_size = ((size - 1) / 64 + 1) * ECS_SIZEOF(uint64_t);
bs->data = ecs_os_realloc(bs->data, new_size);
ecs_os_memset(ECS_OFFSET(bs->data, prev_size), 0, new_size - prev_size);
}
}
void flecs_bitset_init(
ecs_bitset_t* bs)
{
bs->size = 0;
bs->count = 0;
bs->data = NULL;
}
void flecs_bitset_ensure(
ecs_bitset_t *bs,
int32_t count)
{
if (count > bs->count) {
bs->count = count;
ensure(bs, count);
}
}
void flecs_bitset_fini(
ecs_bitset_t *bs)
{
ecs_os_free(bs->data);
bs->data = NULL;
bs->count = 0;
}
void flecs_bitset_addn(
ecs_bitset_t *bs,
int32_t count)
{
int32_t elem = bs->count += count;
ensure(bs, elem);
}
void flecs_bitset_set(
ecs_bitset_t *bs,
int32_t elem,
bool value)
{
ecs_check(elem < bs->count, ECS_INVALID_PARAMETER, NULL);
uint32_t hi = ((uint32_t)elem) >> 6;
uint32_t lo = ((uint32_t)elem) & 0x3F;
uint64_t v = bs->data[hi];
bs->data[hi] = (v & ~((uint64_t)1 << lo)) | ((uint64_t)value << lo);
error:
return;
}
bool flecs_bitset_get(
const ecs_bitset_t *bs,
int32_t elem)
{
ecs_check(elem < bs->count, ECS_INVALID_PARAMETER, NULL);
return !!(bs->data[elem >> 6] & ((uint64_t)1 << ((uint64_t)elem & 0x3F)));
error:
return false;
}
int32_t flecs_bitset_count(
const ecs_bitset_t *bs)
{
return bs->count;
}
void flecs_bitset_remove(
ecs_bitset_t *bs,
int32_t elem)
{
ecs_check(elem < bs->count, ECS_INVALID_PARAMETER, NULL);
int32_t last = bs->count - 1;
bool last_value = flecs_bitset_get(bs, last);
flecs_bitset_set(bs, elem, last_value);
flecs_bitset_set(bs, last, 0);
bs->count --;
error:
return;
}
void flecs_bitset_swap(
ecs_bitset_t *bs,
int32_t elem_a,
int32_t elem_b)
{
ecs_check(elem_a < bs->count, ECS_INVALID_PARAMETER, NULL);
ecs_check(elem_b < bs->count, ECS_INVALID_PARAMETER, NULL);
bool a = flecs_bitset_get(bs, elem_a);
bool b = flecs_bitset_get(bs, elem_b);
flecs_bitset_set(bs, elem_a, b);
flecs_bitset_set(bs, elem_b, a);
error:
return;
}

View File

@@ -0,0 +1,242 @@
/**
* @file datastructures/block_allocator.c
* @brief Block allocator.
*
* A block allocator is an allocator for a fixed size that allocates blocks of
* memory with N elements of the requested size.
*/
#include "../private_api.h"
// #ifdef FLECS_SANITIZE
// #define FLECS_MEMSET_UNINITIALIZED
// #endif
int64_t ecs_block_allocator_alloc_count = 0;
int64_t ecs_block_allocator_free_count = 0;
static
ecs_block_allocator_chunk_header_t* flecs_balloc_block(
ecs_block_allocator_t *allocator)
{
if (!allocator->chunk_size) {
return NULL;
}
ecs_block_allocator_block_t *block =
ecs_os_malloc(ECS_SIZEOF(ecs_block_allocator_block_t) +
allocator->block_size);
ecs_block_allocator_chunk_header_t *first_chunk = ECS_OFFSET(block,
ECS_SIZEOF(ecs_block_allocator_block_t));
block->memory = first_chunk;
if (!allocator->block_tail) {
ecs_assert(!allocator->block_head, ECS_INTERNAL_ERROR, 0);
block->next = NULL;
allocator->block_head = block;
allocator->block_tail = block;
} else {
block->next = NULL;
allocator->block_tail->next = block;
allocator->block_tail = block;
}
ecs_block_allocator_chunk_header_t *chunk = first_chunk;
int32_t i, end;
for (i = 0, end = allocator->chunks_per_block - 1; i < end; ++i) {
chunk->next = ECS_OFFSET(chunk, allocator->chunk_size);
chunk = chunk->next;
}
ecs_os_linc(&ecs_block_allocator_alloc_count);
chunk->next = NULL;
return first_chunk;
}
void flecs_ballocator_init(
ecs_block_allocator_t *ba,
ecs_size_t size)
{
ecs_assert(ba != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_assert(size != 0, ECS_INTERNAL_ERROR, NULL);
ba->data_size = size;
#ifdef FLECS_SANITIZE
size += ECS_SIZEOF(int64_t);
#endif
ba->chunk_size = ECS_ALIGN(size, 16);
ba->chunks_per_block = ECS_MAX(4096 / ba->chunk_size, 1);
ba->block_size = ba->chunks_per_block * ba->chunk_size;
ba->head = NULL;
ba->block_head = NULL;
ba->block_tail = NULL;
}
ecs_block_allocator_t* flecs_ballocator_new(
ecs_size_t size)
{
ecs_block_allocator_t *result = ecs_os_calloc_t(ecs_block_allocator_t);
flecs_ballocator_init(result, size);
return result;
}
void flecs_ballocator_fini(
ecs_block_allocator_t *ba)
{
ecs_assert(ba != NULL, ECS_INTERNAL_ERROR, NULL);
#ifdef FLECS_SANITIZE
ecs_assert(ba->alloc_count == 0, ECS_LEAK_DETECTED,
"(size = %u)", (uint32_t)ba->data_size);
#endif
ecs_block_allocator_block_t *block;
for (block = ba->block_head; block;) {
ecs_block_allocator_block_t *next = block->next;
ecs_os_free(block);
ecs_os_linc(&ecs_block_allocator_free_count);
block = next;
}
ba->block_head = NULL;
}
void flecs_ballocator_free(
ecs_block_allocator_t *ba)
{
flecs_ballocator_fini(ba);
ecs_os_free(ba);
}
void* flecs_balloc(
ecs_block_allocator_t *ba)
{
void *result;
#ifdef FLECS_USE_OS_ALLOC
result = ecs_os_malloc(ba->data_size);
#else
if (!ba) return NULL;
if (!ba->head) {
ba->head = flecs_balloc_block(ba);
}
result = ba->head;
ba->head = ba->head->next;
#ifdef FLECS_SANITIZE
ecs_assert(ba->alloc_count >= 0, ECS_INTERNAL_ERROR, "corrupted allocator");
ba->alloc_count ++;
*(int64_t*)result = ba->chunk_size;
result = ECS_OFFSET(result, ECS_SIZEOF(int64_t));
#endif
#endif
#ifdef FLECS_MEMSET_UNINITIALIZED
ecs_os_memset(result, 0xAA, ba->data_size);
#endif
return result;
}
void* flecs_bcalloc(
ecs_block_allocator_t *ba)
{
#ifdef FLECS_USE_OS_ALLOC
return ecs_os_calloc(ba->data_size);
#endif
if (!ba) return NULL;
void *result = flecs_balloc(ba);
ecs_os_memset(result, 0, ba->data_size);
return result;
}
void flecs_bfree(
ecs_block_allocator_t *ba,
void *memory)
{
#ifdef FLECS_USE_OS_ALLOC
ecs_os_free(memory);
return;
#endif
if (!ba) {
ecs_assert(memory == NULL, ECS_INTERNAL_ERROR, NULL);
return;
}
if (memory == NULL) {
return;
}
#ifdef FLECS_SANITIZE
memory = ECS_OFFSET(memory, -ECS_SIZEOF(int64_t));
if (*(int64_t*)memory != ba->chunk_size) {
ecs_err("chunk %p returned to wrong allocator "
"(chunk = %ub, allocator = %ub)",
memory, *(int64_t*)memory, ba->chunk_size);
ecs_abort(ECS_INTERNAL_ERROR, NULL);
}
ba->alloc_count --;
#endif
ecs_block_allocator_chunk_header_t *chunk = memory;
chunk->next = ba->head;
ba->head = chunk;
ecs_assert(ba->alloc_count >= 0, ECS_INTERNAL_ERROR, "corrupted allocator");
}
void* flecs_brealloc(
ecs_block_allocator_t *dst,
ecs_block_allocator_t *src,
void *memory)
{
void *result;
#ifdef FLECS_USE_OS_ALLOC
result = ecs_os_realloc(memory, dst->data_size);
#else
if (dst == src) {
return memory;
}
result = flecs_balloc(dst);
if (result && src) {
ecs_size_t size = src->data_size;
if (dst->data_size < size) {
size = dst->data_size;
}
ecs_os_memcpy(result, memory, size);
}
flecs_bfree(src, memory);
#endif
#ifdef FLECS_MEMSET_UNINITIALIZED
if (dst && src && (dst->data_size > src->data_size)) {
ecs_os_memset(ECS_OFFSET(result, src->data_size), 0xAA,
dst->data_size - src->data_size);
} else if (dst && !src) {
ecs_os_memset(result, 0xAA, dst->data_size);
}
#endif
return result;
}
void* flecs_bdup(
ecs_block_allocator_t *ba,
void *memory)
{
#ifdef FLECS_USE_OS_ALLOC
if (memory && ba->chunk_size) {
return ecs_os_memdup(memory, ba->data_size);
} else {
return NULL;
}
#endif
void *result = flecs_balloc(ba);
if (result) {
ecs_os_memcpy(result, memory, ba->data_size);
}
return result;
}

View File

@@ -0,0 +1,154 @@
// This is free and unencumbered software released into the public domain under The Unlicense (http://unlicense.org/)
// main repo: https://github.com/wangyi-fudan/wyhash
// author: 王一 Wang Yi
// contributors: Reini Urban, Dietrich Epp, Joshua Haberman, Tommy Ettinger,
// Daniel Lemire, Otmar Ertl, cocowalla, leo-yuriev,
// Diego Barrios Romero, paulie-g, dumblob, Yann Collet, ivte-ms,
// hyb, James Z.M. Gao, easyaspi314 (Devin), TheOneric
/* quick example:
string s="fjsakfdsjkf";
uint64_t hash=wyhash(s.c_str(), s.size(), 0, wyp_);
*/
#include "../private_api.h"
#ifndef WYHASH_CONDOM
//protections that produce different results:
//1: normal valid behavior
//2: extra protection against entropy loss (probability=2^-63), aka. "blind multiplication"
#define WYHASH_CONDOM 1
#endif
#ifndef WYHASH_32BIT_MUM
//0: normal version, slow on 32 bit systems
//1: faster on 32 bit systems but produces different results, incompatible with wy2u0k function
#define WYHASH_32BIT_MUM 0
#endif
//includes
#include <stdint.h>
#include <string.h>
#if defined(_MSC_VER) && defined(_M_X64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
//likely and unlikely macros
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
#define likely_(x) __builtin_expect(x,1)
#define unlikely_(x) __builtin_expect(x,0)
#else
#define likely_(x) (x)
#define unlikely_(x) (x)
#endif
//128bit multiply function
static inline void wymum_(uint64_t *A, uint64_t *B){
#if(WYHASH_32BIT_MUM)
uint64_t hh=(*A>>32)*(*B>>32), hl=(*A>>32)*(uint32_t)*B, lh=(uint32_t)*A*(*B>>32), ll=(uint64_t)(uint32_t)*A*(uint32_t)*B;
#if(WYHASH_CONDOM>1)
*A^=_wyrot(hl)^hh; *B^=_wyrot(lh)^ll;
#else
*A=_wyrot(hl)^hh; *B=_wyrot(lh)^ll;
#endif
#elif defined(__SIZEOF_INT128__)
__uint128_t r=*A; r*=*B;
#if(WYHASH_CONDOM>1)
*A^=(uint64_t)r; *B^=(uint64_t)(r>>64);
#else
*A=(uint64_t)r; *B=(uint64_t)(r>>64);
#endif
#elif defined(_MSC_VER) && defined(_M_X64)
#if(WYHASH_CONDOM>1)
uint64_t a, b;
a=_umul128(*A,*B,&b);
*A^=a; *B^=b;
#else
*A=_umul128(*A,*B,B);
#endif
#else
uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo;
uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
#if(WYHASH_CONDOM>1)
*A^=lo; *B^=hi;
#else
*A=lo; *B=hi;
#endif
#endif
}
//multiply and xor mix function, aka MUM
static inline uint64_t wymix_(uint64_t A, uint64_t B){ wymum_(&A,&B); return A^B; }
//endian macros
#ifndef WYHASH_LITTLE_ENDIAN
#if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define WYHASH_LITTLE_ENDIAN 1
#elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define WYHASH_LITTLE_ENDIAN 0
#else
#warning could not determine endianness! Falling back to little endian.
#define WYHASH_LITTLE_ENDIAN 1
#endif
#endif
//read functions
#if (WYHASH_LITTLE_ENDIAN)
static inline uint64_t wyr8_(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v;}
static inline uint64_t wyr4_(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v;}
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
static inline uint64_t wyr8_(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return __builtin_bswap64(v);}
static inline uint64_t wyr4_(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return __builtin_bswap32(v);}
#elif defined(_MSC_VER)
static inline uint64_t wyr8_(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return _byteswap_uint64(v);}
static inline uint64_t wyr4_(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return _byteswap_ulong(v);}
#else
static inline uint64_t wyr8_(const uint8_t *p) {
uint64_t v; memcpy(&v, p, 8);
return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000));
}
static inline uint64_t wyr4_(const uint8_t *p) {
uint32_t v; memcpy(&v, p, 4);
return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000));
}
#endif
static inline uint64_t wyr3_(const uint8_t *p, size_t k) { return (((uint64_t)p[0])<<16)|(((uint64_t)p[k>>1])<<8)|p[k-1];}
//wyhash main function
static inline uint64_t wyhash(const void *key, size_t len, uint64_t seed, const uint64_t *secret){
const uint8_t *p=(const uint8_t *)key; seed^=wymix_(seed^secret[0],secret[1]); uint64_t a, b;
if(likely_(len<=16)){
if(likely_(len>=4)){ a=(wyr4_(p)<<32)|wyr4_(p+((len>>3)<<2)); b=(wyr4_(p+len-4)<<32)|wyr4_(p+len-4-((len>>3)<<2)); }
else if(likely_(len>0)){ a=wyr3_(p,len); b=0;}
else a=b=0;
}
else{
size_t i=len;
if(unlikely_(i>48)){
uint64_t see1=seed, see2=seed;
do{
seed=wymix_(wyr8_(p)^secret[1],wyr8_(p+8)^seed);
see1=wymix_(wyr8_(p+16)^secret[2],wyr8_(p+24)^see1);
see2=wymix_(wyr8_(p+32)^secret[3],wyr8_(p+40)^see2);
p+=48; i-=48;
}while(likely_(i>48));
seed^=see1^see2;
}
while(unlikely_(i>16)){ seed=wymix_(wyr8_(p)^secret[1],wyr8_(p+8)^seed); i-=16; p+=16; }
a=wyr8_(p+i-16); b=wyr8_(p+i-8);
}
a^=secret[1]; b^=seed; wymum_(&a,&b);
return wymix_(a^secret[0]^len,b^secret[1]);
}
//the default secret parameters
static const uint64_t wyp_[4] = {0xa0761d6478bd642full, 0xe7037ed1a0b428dbull, 0x8ebc6af09c88c6e3ull, 0x589965cc75374cc3ull};
uint64_t flecs_hash(
const void *data,
ecs_size_t length)
{
return wyhash(data, flecs_ito(size_t, length), 0, wyp_);
}

View File

@@ -0,0 +1,263 @@
/**
* @file datastructures/hashmap.c
* @brief Hashmap data structure.
*
* The hashmap data structure is built on top of the map data structure. Where
* the map data structure can only work with 64bit key values, the hashmap can
* hash keys of any size, and handles collisions between hashes.
*/
#include "../private_api.h"
static
int32_t flecs_hashmap_find_key(
const ecs_hashmap_t *map,
ecs_vec_t *keys,
ecs_size_t key_size,
const void *key)
{
int32_t i, count = ecs_vec_count(keys);
void *key_array = ecs_vec_first(keys);
for (i = 0; i < count; i ++) {
void *key_ptr = ECS_OFFSET(key_array, key_size * i);
if (map->compare(key_ptr, key) == 0) {
return i;
}
}
return -1;
}
void flecs_hashmap_init_(
ecs_hashmap_t *map,
ecs_size_t key_size,
ecs_size_t value_size,
ecs_hash_value_action_t hash,
ecs_compare_action_t compare,
ecs_allocator_t *allocator)
{
map->key_size = key_size;
map->value_size = value_size;
map->hash = hash;
map->compare = compare;
flecs_ballocator_init_t(&map->bucket_allocator, ecs_hm_bucket_t);
ecs_map_init(&map->impl, allocator);
}
void flecs_hashmap_fini(
ecs_hashmap_t *map)
{
ecs_allocator_t *a = map->impl.allocator;
ecs_map_iter_t it = ecs_map_iter(&map->impl);
while (ecs_map_next(&it)) {
ecs_hm_bucket_t *bucket = ecs_map_ptr(&it);
ecs_vec_fini(a, &bucket->keys, map->key_size);
ecs_vec_fini(a, &bucket->values, map->value_size);
#ifdef FLECS_SANITIZE
flecs_bfree(&map->bucket_allocator, bucket);
#endif
}
flecs_ballocator_fini(&map->bucket_allocator);
ecs_map_fini(&map->impl);
}
void flecs_hashmap_copy(
ecs_hashmap_t *dst,
const ecs_hashmap_t *src)
{
ecs_assert(dst != src, ECS_INVALID_PARAMETER, NULL);
flecs_hashmap_init_(dst, src->key_size, src->value_size, src->hash,
src->compare, src->impl.allocator);
ecs_map_copy(&dst->impl, &src->impl);
ecs_allocator_t *a = dst->impl.allocator;
ecs_map_iter_t it = ecs_map_iter(&dst->impl);
while (ecs_map_next(&it)) {
ecs_hm_bucket_t **bucket_ptr = ecs_map_ref(&it, ecs_hm_bucket_t);
ecs_hm_bucket_t *src_bucket = bucket_ptr[0];
ecs_hm_bucket_t *dst_bucket = flecs_balloc(&dst->bucket_allocator);
bucket_ptr[0] = dst_bucket;
dst_bucket->keys = ecs_vec_copy(a, &src_bucket->keys, dst->key_size);
dst_bucket->values = ecs_vec_copy(a, &src_bucket->values, dst->value_size);
}
}
void* flecs_hashmap_get_(
const ecs_hashmap_t *map,
ecs_size_t key_size,
const void *key,
ecs_size_t value_size)
{
ecs_assert(map->key_size == key_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(map->value_size == value_size, ECS_INVALID_PARAMETER, NULL);
uint64_t hash = map->hash(key);
ecs_hm_bucket_t *bucket = ecs_map_get_deref(&map->impl,
ecs_hm_bucket_t, hash);
if (!bucket) {
return NULL;
}
int32_t index = flecs_hashmap_find_key(map, &bucket->keys, key_size, key);
if (index == -1) {
return NULL;
}
return ecs_vec_get(&bucket->values, value_size, index);
}
flecs_hashmap_result_t flecs_hashmap_ensure_(
ecs_hashmap_t *map,
ecs_size_t key_size,
const void *key,
ecs_size_t value_size)
{
ecs_assert(map->key_size == key_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(map->value_size == value_size, ECS_INVALID_PARAMETER, NULL);
uint64_t hash = map->hash(key);
ecs_hm_bucket_t **r = ecs_map_ensure_ref(&map->impl, ecs_hm_bucket_t, hash);
ecs_hm_bucket_t *bucket = r[0];
if (!bucket) {
bucket = r[0] = flecs_bcalloc(&map->bucket_allocator);
}
ecs_allocator_t *a = map->impl.allocator;
void *value_ptr, *key_ptr;
ecs_vec_t *keys = &bucket->keys;
ecs_vec_t *values = &bucket->values;
if (!keys->array) {
keys = ecs_vec_init(a, &bucket->keys, key_size, 1);
values = ecs_vec_init(a, &bucket->values, value_size, 1);
key_ptr = ecs_vec_append(a, keys, key_size);
value_ptr = ecs_vec_append(a, values, value_size);
ecs_os_memcpy(key_ptr, key, key_size);
ecs_os_memset(value_ptr, 0, value_size);
} else {
int32_t index = flecs_hashmap_find_key(map, keys, key_size, key);
if (index == -1) {
key_ptr = ecs_vec_append(a, keys, key_size);
value_ptr = ecs_vec_append(a, values, value_size);
ecs_os_memcpy(key_ptr, key, key_size);
ecs_os_memset(value_ptr, 0, value_size);
} else {
key_ptr = ecs_vec_get(keys, key_size, index);
value_ptr = ecs_vec_get(values, value_size, index);
}
}
return (flecs_hashmap_result_t){
.key = key_ptr, .value = value_ptr, .hash = hash
};
}
void flecs_hashmap_set_(
ecs_hashmap_t *map,
ecs_size_t key_size,
void *key,
ecs_size_t value_size,
const void *value)
{
void *value_ptr = flecs_hashmap_ensure_(map, key_size, key, value_size).value;
ecs_assert(value_ptr != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_os_memcpy(value_ptr, value, value_size);
}
ecs_hm_bucket_t* flecs_hashmap_get_bucket(
const ecs_hashmap_t *map,
uint64_t hash)
{
ecs_assert(map != NULL, ECS_INTERNAL_ERROR, NULL);
return ecs_map_get_deref(&map->impl, ecs_hm_bucket_t, hash);
}
void flecs_hm_bucket_remove(
ecs_hashmap_t *map,
ecs_hm_bucket_t *bucket,
uint64_t hash,
int32_t index)
{
ecs_vec_remove(&bucket->keys, map->key_size, index);
ecs_vec_remove(&bucket->values, map->value_size, index);
if (!ecs_vec_count(&bucket->keys)) {
ecs_allocator_t *a = map->impl.allocator;
ecs_vec_fini(a, &bucket->keys, map->key_size);
ecs_vec_fini(a, &bucket->values, map->value_size);
ecs_hm_bucket_t *b = ecs_map_remove_ptr(&map->impl, hash);
ecs_assert(bucket == b, ECS_INTERNAL_ERROR, NULL); (void)b;
flecs_bfree(&map->bucket_allocator, bucket);
}
}
void flecs_hashmap_remove_w_hash_(
ecs_hashmap_t *map,
ecs_size_t key_size,
const void *key,
ecs_size_t value_size,
uint64_t hash)
{
ecs_assert(map->key_size == key_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(map->value_size == value_size, ECS_INVALID_PARAMETER, NULL);
(void)value_size;
ecs_hm_bucket_t *bucket = ecs_map_get_deref(&map->impl,
ecs_hm_bucket_t, hash);
if (!bucket) {
return;
}
int32_t index = flecs_hashmap_find_key(map, &bucket->keys, key_size, key);
if (index == -1) {
return;
}
flecs_hm_bucket_remove(map, bucket, hash, index);
}
void flecs_hashmap_remove_(
ecs_hashmap_t *map,
ecs_size_t key_size,
const void *key,
ecs_size_t value_size)
{
ecs_assert(map->key_size == key_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(map->value_size == value_size, ECS_INVALID_PARAMETER, NULL);
uint64_t hash = map->hash(key);
flecs_hashmap_remove_w_hash_(map, key_size, key, value_size, hash);
}
flecs_hashmap_iter_t flecs_hashmap_iter(
ecs_hashmap_t *map)
{
return (flecs_hashmap_iter_t){
.it = ecs_map_iter(&map->impl)
};
}
void* flecs_hashmap_next_(
flecs_hashmap_iter_t *it,
ecs_size_t key_size,
void *key_out,
ecs_size_t value_size)
{
int32_t index = ++ it->index;
ecs_hm_bucket_t *bucket = it->bucket;
while (!bucket || it->index >= ecs_vec_count(&bucket->keys)) {
ecs_map_next(&it->it);
bucket = it->bucket = ecs_map_ptr(&it->it);
if (!bucket) {
return NULL;
}
index = it->index = 0;
}
if (key_out) {
*(void**)key_out = ecs_vec_get(&bucket->keys, key_size, index);
}
return ecs_vec_get(&bucket->values, value_size, index);
}

View File

@@ -0,0 +1,468 @@
/**
* @file datastructures/map.c
* @brief Map data structure.
*
* Map data structure for 64bit keys and dynamic payload size.
*/
#include "../private_api.h"
/* The ratio used to determine whether the map should flecs_map_rehash. If
* (element_count * ECS_LOAD_FACTOR) > bucket_count, bucket count is increased. */
#define ECS_LOAD_FACTOR (12)
#define ECS_BUCKET_END(b, c) ECS_ELEM_T(b, ecs_bucket_t, c)
static
uint8_t flecs_log2(uint32_t v) {
static const uint8_t log2table[32] =
{0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31};
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return log2table[(uint32_t)(v * 0x07C4ACDDU) >> 27];
}
/* Get bucket count for number of elements */
static
int32_t flecs_map_get_bucket_count(
int32_t count)
{
return flecs_next_pow_of_2((int32_t)(count * ECS_LOAD_FACTOR * 0.1));
}
/* Get bucket shift amount for a given bucket count */
static
uint8_t flecs_map_get_bucket_shift (
int32_t bucket_count)
{
return (uint8_t)(64u - flecs_log2((uint32_t)bucket_count));
}
/* Get bucket index for provided map key */
static
int32_t flecs_map_get_bucket_index(
uint16_t bucket_shift,
ecs_map_key_t key)
{
ecs_assert(bucket_shift != 0, ECS_INTERNAL_ERROR, NULL);
return (int32_t)((11400714819323198485ull * key) >> bucket_shift);
}
/* Get bucket for key */
static
ecs_bucket_t* flecs_map_get_bucket(
const ecs_map_t *map,
ecs_map_key_t key)
{
ecs_assert(map != NULL, ECS_INVALID_PARAMETER, NULL);
int32_t bucket_id = flecs_map_get_bucket_index(map->bucket_shift, key);
ecs_assert(bucket_id < map->bucket_count, ECS_INTERNAL_ERROR, NULL);
return &map->buckets[bucket_id];
}
/* Add element to bucket */
static
ecs_map_val_t* flecs_map_bucket_add(
ecs_block_allocator_t *allocator,
ecs_bucket_t *bucket,
ecs_map_key_t key)
{
ecs_bucket_entry_t *new_entry = flecs_balloc(allocator);
new_entry->key = key;
new_entry->next = bucket->first;
bucket->first = new_entry;
return &new_entry->value;
}
/* Remove element from bucket */
static
ecs_map_val_t flecs_map_bucket_remove(
ecs_map_t *map,
ecs_bucket_t *bucket,
ecs_map_key_t key)
{
ecs_bucket_entry_t *entry;
for (entry = bucket->first; entry; entry = entry->next) {
if (entry->key == key) {
ecs_map_val_t value = entry->value;
ecs_bucket_entry_t **next_holder = &bucket->first;
while(*next_holder != entry) {
next_holder = &(*next_holder)->next;
}
*next_holder = entry->next;
flecs_bfree(map->entry_allocator, entry);
map->count --;
return value;
}
}
return 0;
}
/* Free contents of bucket */
static
void flecs_map_bucket_clear(
ecs_block_allocator_t *allocator,
ecs_bucket_t *bucket)
{
ecs_bucket_entry_t *entry = bucket->first;
while(entry) {
ecs_bucket_entry_t *next = entry->next;
flecs_bfree(allocator, entry);
entry = next;
}
}
/* Get payload pointer for key from bucket */
static
ecs_map_val_t* flecs_map_bucket_get(
ecs_bucket_t *bucket,
ecs_map_key_t key)
{
ecs_bucket_entry_t *entry;
for (entry = bucket->first; entry; entry = entry->next) {
if (entry->key == key) {
return &entry->value;
}
}
return NULL;
}
/* Grow number of buckets */
static
void flecs_map_rehash(
ecs_map_t *map,
int32_t count)
{
count = flecs_next_pow_of_2(count);
if (count < 2) {
count = 2;
}
ecs_assert(count > map->bucket_count, ECS_INTERNAL_ERROR, NULL);
int32_t old_count = map->bucket_count;
ecs_bucket_t *buckets = map->buckets, *b, *end = ECS_BUCKET_END(buckets, old_count);
if (map->allocator) {
map->buckets = flecs_calloc_n(map->allocator, ecs_bucket_t, count);
} else {
map->buckets = ecs_os_calloc_n(ecs_bucket_t, count);
}
map->bucket_count = count;
map->bucket_shift = flecs_map_get_bucket_shift(count);
/* Remap old bucket entries to new buckets */
for (b = buckets; b < end; b++) {
ecs_bucket_entry_t* entry;
for (entry = b->first; entry;) {
ecs_bucket_entry_t* next = entry->next;
int32_t bucket_index = flecs_map_get_bucket_index(
map->bucket_shift, entry->key);
ecs_bucket_t *bucket = &map->buckets[bucket_index];
entry->next = bucket->first;
bucket->first = entry;
entry = next;
}
}
if (map->allocator) {
flecs_free_n(map->allocator, ecs_bucket_t, old_count, buckets);
} else {
ecs_os_free(buckets);
}
}
void ecs_map_params_init(
ecs_map_params_t *params,
ecs_allocator_t *allocator)
{
params->allocator = allocator;
flecs_ballocator_init_t(&params->entry_allocator, ecs_bucket_entry_t);
}
void ecs_map_params_fini(
ecs_map_params_t *params)
{
flecs_ballocator_fini(&params->entry_allocator);
}
void ecs_map_init_w_params(
ecs_map_t *result,
ecs_map_params_t *params)
{
ecs_os_zeromem(result);
result->allocator = params->allocator;
if (params->entry_allocator.chunk_size) {
result->entry_allocator = &params->entry_allocator;
result->shared_allocator = true;
} else {
result->entry_allocator = flecs_ballocator_new_t(ecs_bucket_entry_t);
}
flecs_map_rehash(result, 0);
}
void ecs_map_init_w_params_if(
ecs_map_t *result,
ecs_map_params_t *params)
{
if (!ecs_map_is_init(result)) {
ecs_map_init_w_params(result, params);
}
}
void ecs_map_init(
ecs_map_t *result,
ecs_allocator_t *allocator)
{
ecs_map_init_w_params(result, &(ecs_map_params_t) {
.allocator = allocator
});
}
void ecs_map_init_if(
ecs_map_t *result,
ecs_allocator_t *allocator)
{
if (!ecs_map_is_init(result)) {
ecs_map_init(result, allocator);
}
}
void ecs_map_fini(
ecs_map_t *map)
{
if (!ecs_map_is_init(map)) {
return;
}
bool sanitize = false;
#ifdef FLECS_SANITIZE
sanitize = true;
#endif
/* Free buckets in sanitized mode, so we can replace the allocator with
* regular malloc/free and use asan/valgrind to find memory errors. */
ecs_allocator_t *a = map->allocator;
ecs_block_allocator_t *ea = map->entry_allocator;
if (map->shared_allocator || sanitize) {
ecs_bucket_t *bucket = map->buckets, *end = &bucket[map->bucket_count];
while (bucket != end) {
flecs_map_bucket_clear(ea, bucket);
bucket ++;
}
}
if (ea && !map->shared_allocator) {
flecs_ballocator_free(ea);
map->entry_allocator = NULL;
}
if (a) {
flecs_free_n(a, ecs_bucket_t, map->bucket_count, map->buckets);
} else {
ecs_os_free(map->buckets);
}
map->bucket_shift = 0;
}
ecs_map_val_t* ecs_map_get(
const ecs_map_t *map,
ecs_map_key_t key)
{
return flecs_map_bucket_get(flecs_map_get_bucket(map, key), key);
}
void* ecs_map_get_deref_(
const ecs_map_t *map,
ecs_map_key_t key)
{
ecs_map_val_t* ptr = flecs_map_bucket_get(
flecs_map_get_bucket(map, key), key);
if (ptr) {
return (void*)(uintptr_t)ptr[0];
}
return NULL;
}
void ecs_map_insert(
ecs_map_t *map,
ecs_map_key_t key,
ecs_map_val_t value)
{
ecs_assert(ecs_map_get(map, key) == NULL, ECS_INVALID_PARAMETER, NULL);
int32_t map_count = ++map->count;
int32_t tgt_bucket_count = flecs_map_get_bucket_count(map_count);
int32_t bucket_count = map->bucket_count;
if (tgt_bucket_count > bucket_count) {
flecs_map_rehash(map, tgt_bucket_count);
}
ecs_bucket_t *bucket = flecs_map_get_bucket(map, key);
flecs_map_bucket_add(map->entry_allocator, bucket, key)[0] = value;
}
void* ecs_map_insert_alloc(
ecs_map_t *map,
ecs_size_t elem_size,
ecs_map_key_t key)
{
void *elem = ecs_os_calloc(elem_size);
ecs_map_insert_ptr(map, key, (uintptr_t)elem);
return elem;
}
ecs_map_val_t* ecs_map_ensure(
ecs_map_t *map,
ecs_map_key_t key)
{
ecs_bucket_t *bucket = flecs_map_get_bucket(map, key);
ecs_map_val_t *result = flecs_map_bucket_get(bucket, key);
if (result) {
return result;
}
int32_t map_count = ++map->count;
int32_t tgt_bucket_count = flecs_map_get_bucket_count(map_count);
int32_t bucket_count = map->bucket_count;
if (tgt_bucket_count > bucket_count) {
flecs_map_rehash(map, tgt_bucket_count);
bucket = flecs_map_get_bucket(map, key);
}
ecs_map_val_t* v = flecs_map_bucket_add(map->entry_allocator, bucket, key);
*v = 0;
return v;
}
void* ecs_map_ensure_alloc(
ecs_map_t *map,
ecs_size_t elem_size,
ecs_map_key_t key)
{
ecs_map_val_t *val = ecs_map_ensure(map, key);
if (!*val) {
void *elem = ecs_os_calloc(elem_size);
*val = (ecs_map_val_t)(uintptr_t)elem;
return elem;
} else {
return (void*)(uintptr_t)*val;
}
}
ecs_map_val_t ecs_map_remove(
ecs_map_t *map,
ecs_map_key_t key)
{
return flecs_map_bucket_remove(map, flecs_map_get_bucket(map, key), key);
}
void ecs_map_remove_free(
ecs_map_t *map,
ecs_map_key_t key)
{
ecs_map_val_t val = ecs_map_remove(map, key);
if (val) {
ecs_os_free((void*)(uintptr_t)val);
}
}
void ecs_map_clear(
ecs_map_t *map)
{
ecs_assert(map != NULL, ECS_INVALID_PARAMETER, NULL);
int32_t i, count = map->bucket_count;
for (i = 0; i < count; i ++) {
flecs_map_bucket_clear(map->entry_allocator, &map->buckets[i]);
}
if (map->allocator) {
flecs_free_n(map->allocator, ecs_bucket_t, count, map->buckets);
} else {
ecs_os_free(map->buckets);
}
map->buckets = NULL;
map->bucket_count = 0;
map->count = 0;
flecs_map_rehash(map, 2);
}
ecs_map_iter_t ecs_map_iter(
const ecs_map_t *map)
{
if (ecs_map_is_init(map)) {
return (ecs_map_iter_t){
.map = map,
.bucket = NULL,
.entry = NULL
};
} else {
return (ecs_map_iter_t){ 0 };
}
}
bool ecs_map_next(
ecs_map_iter_t *iter)
{
const ecs_map_t *map = iter->map;
ecs_bucket_t *end;
if (!map || (iter->bucket == (end = &map->buckets[map->bucket_count]))) {
return false;
}
ecs_bucket_entry_t *entry = NULL;
if (!iter->bucket) {
for (iter->bucket = map->buckets;
iter->bucket != end;
++iter->bucket)
{
if (iter->bucket->first) {
entry = iter->bucket->first;
break;
}
}
if (iter->bucket == end) {
return false;
}
} else if ((entry = iter->entry) == NULL) {
do {
++iter->bucket;
if (iter->bucket == end) {
return false;
}
} while(!iter->bucket->first);
entry = iter->bucket->first;
}
ecs_assert(entry != NULL, ECS_INTERNAL_ERROR, NULL);
iter->entry = entry->next;
iter->res = &entry->key;
return true;
}
void ecs_map_copy(
ecs_map_t *dst,
const ecs_map_t *src)
{
if (ecs_map_is_init(dst)) {
ecs_assert(ecs_map_count(dst) == 0, ECS_INVALID_PARAMETER, NULL);
ecs_map_fini(dst);
}
if (!ecs_map_is_init(src)) {
return;
}
ecs_map_init(dst, src->allocator);
ecs_map_iter_t it = ecs_map_iter(src);
while (ecs_map_next(&it)) {
ecs_map_insert(dst, ecs_map_key(&it), ecs_map_value(&it));
}
}

View File

@@ -0,0 +1,238 @@
/**
* @file datastructures/name_index.c
* @brief Data structure for resolving 64bit keys by string (name).
*/
#include "../private_api.h"
static
uint64_t flecs_name_index_hash(
const void *ptr)
{
const ecs_hashed_string_t *str = ptr;
ecs_assert(str->hash != 0, ECS_INTERNAL_ERROR, NULL);
return str->hash;
}
static
int flecs_name_index_compare(
const void *ptr1,
const void *ptr2)
{
const ecs_hashed_string_t *str1 = ptr1;
const ecs_hashed_string_t *str2 = ptr2;
ecs_size_t len1 = str1->length;
ecs_size_t len2 = str2->length;
if (len1 != len2) {
return (len1 > len2) - (len1 < len2);
}
return ecs_os_memcmp(str1->value, str2->value, len1);
}
void flecs_name_index_init(
ecs_hashmap_t *hm,
ecs_allocator_t *allocator)
{
flecs_hashmap_init_(hm,
ECS_SIZEOF(ecs_hashed_string_t), ECS_SIZEOF(uint64_t),
flecs_name_index_hash,
flecs_name_index_compare,
allocator);
}
void flecs_name_index_init_if(
ecs_hashmap_t *hm,
ecs_allocator_t *allocator)
{
if (!hm->compare) {
flecs_name_index_init(hm, allocator);
}
}
bool flecs_name_index_is_init(
const ecs_hashmap_t *hm)
{
return hm->compare != NULL;
}
ecs_hashmap_t* flecs_name_index_new(
ecs_world_t *world,
ecs_allocator_t *allocator)
{
ecs_hashmap_t *result = flecs_bcalloc(&world->allocators.hashmap);
flecs_name_index_init(result, allocator);
result->hashmap_allocator = &world->allocators.hashmap;
return result;
}
void flecs_name_index_fini(
ecs_hashmap_t *map)
{
flecs_hashmap_fini(map);
}
void flecs_name_index_free(
ecs_hashmap_t *map)
{
if (map) {
flecs_name_index_fini(map);
flecs_bfree(map->hashmap_allocator, map);
}
}
ecs_hashmap_t* flecs_name_index_copy(
ecs_hashmap_t *map)
{
ecs_hashmap_t *result = flecs_bcalloc(map->hashmap_allocator);
result->hashmap_allocator = map->hashmap_allocator;
flecs_hashmap_copy(result, map);
return result;
}
ecs_hashed_string_t flecs_get_hashed_string(
const char *name,
ecs_size_t length,
uint64_t hash)
{
if (!length) {
length = ecs_os_strlen(name);
} else {
ecs_assert(length == ecs_os_strlen(name), ECS_INTERNAL_ERROR, NULL);
}
if (!hash) {
hash = flecs_hash(name, length);
} else {
ecs_assert(hash == flecs_hash(name, length), ECS_INTERNAL_ERROR, NULL);
}
return (ecs_hashed_string_t) {
.value = ECS_CONST_CAST(char*, name),
.length = length,
.hash = hash
};
}
const uint64_t* flecs_name_index_find_ptr(
const ecs_hashmap_t *map,
const char *name,
ecs_size_t length,
uint64_t hash)
{
ecs_hashed_string_t hs = flecs_get_hashed_string(name, length, hash);
ecs_hm_bucket_t *b = flecs_hashmap_get_bucket(map, hs.hash);
if (!b) {
return NULL;
}
ecs_hashed_string_t *keys = ecs_vec_first(&b->keys);
int32_t i, count = ecs_vec_count(&b->keys);
for (i = 0; i < count; i ++) {
ecs_hashed_string_t *key = &keys[i];
ecs_assert(key->hash == hs.hash, ECS_INTERNAL_ERROR, NULL);
if (hs.length != key->length) {
continue;
}
if (!ecs_os_strcmp(name, key->value)) {
uint64_t *e = ecs_vec_get_t(&b->values, uint64_t, i);
ecs_assert(e != NULL, ECS_INTERNAL_ERROR, NULL);
return e;
}
}
return NULL;
}
uint64_t flecs_name_index_find(
const ecs_hashmap_t *map,
const char *name,
ecs_size_t length,
uint64_t hash)
{
const uint64_t *id = flecs_name_index_find_ptr(map, name, length, hash);
if (id) {
return id[0];
}
return 0;
}
void flecs_name_index_remove(
ecs_hashmap_t *map,
uint64_t e,
uint64_t hash)
{
ecs_hm_bucket_t *b = flecs_hashmap_get_bucket(map, hash);
if (!b) {
return;
}
uint64_t *ids = ecs_vec_first(&b->values);
int32_t i, count = ecs_vec_count(&b->values);
for (i = 0; i < count; i ++) {
if (ids[i] == e) {
flecs_hm_bucket_remove(map, b, hash, i);
break;
}
}
}
void flecs_name_index_update_name(
ecs_hashmap_t *map,
uint64_t e,
uint64_t hash,
const char *name)
{
ecs_hm_bucket_t *b = flecs_hashmap_get_bucket(map, hash);
if (!b) {
return;
}
uint64_t *ids = ecs_vec_first(&b->values);
int32_t i, count = ecs_vec_count(&b->values);
for (i = 0; i < count; i ++) {
if (ids[i] == e) {
ecs_hashed_string_t *key = ecs_vec_get_t(
&b->keys, ecs_hashed_string_t, i);
key->value = ECS_CONST_CAST(char*, name);
ecs_assert(ecs_os_strlen(name) == key->length,
ECS_INTERNAL_ERROR, NULL);
ecs_assert(flecs_hash(name, key->length) == key->hash,
ECS_INTERNAL_ERROR, NULL);
return;
}
}
/* Record must already have been in the index */
ecs_abort(ECS_INTERNAL_ERROR, NULL);
}
void flecs_name_index_ensure(
ecs_hashmap_t *map,
uint64_t id,
const char *name,
ecs_size_t length,
uint64_t hash)
{
ecs_check(name != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_hashed_string_t key = flecs_get_hashed_string(name, length, hash);
uint64_t existing = flecs_name_index_find(
map, name, key.length, key.hash);
if (existing) {
if (existing != id) {
ecs_abort(ECS_ALREADY_DEFINED,
"conflicting id registered with name '%s'", name);
}
}
flecs_hashmap_result_t hmr = flecs_hashmap_ensure(
map, &key, uint64_t);
*((uint64_t*)hmr.value) = id;
error:
return;
}

View File

@@ -0,0 +1,68 @@
/**
* @file datastructures/name_index.h
* @brief Data structure for resolving 64bit keys by string (name).
*/
#ifndef FLECS_NAME_INDEX_H
#define FLECS_NAME_INDEX_H
void flecs_name_index_init(
ecs_hashmap_t *hm,
ecs_allocator_t *allocator);
void flecs_name_index_init_if(
ecs_hashmap_t *hm,
ecs_allocator_t *allocator);
bool flecs_name_index_is_init(
const ecs_hashmap_t *hm);
ecs_hashmap_t* flecs_name_index_new(
ecs_world_t *world,
ecs_allocator_t *allocator);
void flecs_name_index_fini(
ecs_hashmap_t *map);
void flecs_name_index_free(
ecs_hashmap_t *map);
ecs_hashmap_t* flecs_name_index_copy(
ecs_hashmap_t *dst);
ecs_hashed_string_t flecs_get_hashed_string(
const char *name,
ecs_size_t length,
uint64_t hash);
const uint64_t* flecs_name_index_find_ptr(
const ecs_hashmap_t *map,
const char *name,
ecs_size_t length,
uint64_t hash);
uint64_t flecs_name_index_find(
const ecs_hashmap_t *map,
const char *name,
ecs_size_t length,
uint64_t hash);
void flecs_name_index_ensure(
ecs_hashmap_t *map,
uint64_t id,
const char *name,
ecs_size_t length,
uint64_t hash);
void flecs_name_index_remove(
ecs_hashmap_t *map,
uint64_t id,
uint64_t hash);
void flecs_name_index_update_name(
ecs_hashmap_t *map,
uint64_t e,
uint64_t hash,
const char *name);
#endif

View File

@@ -0,0 +1,697 @@
/**
* @file datastructures/sparse.c
* @brief Sparse set data structure.
*/
#include "../private_api.h"
/** Compute the page index from an id by stripping the first 12 bits */
#define PAGE(index) ((int32_t)((uint32_t)index >> FLECS_SPARSE_PAGE_BITS))
/** This computes the offset of an index inside a page */
#define OFFSET(index) ((int32_t)index & (FLECS_SPARSE_PAGE_SIZE - 1))
/* Utility to get a pointer to the payload */
#define DATA(array, size, offset) (ECS_OFFSET(array, size * offset))
typedef struct ecs_page_t {
int32_t *sparse; /* Sparse array with indices to dense array */
void *data; /* Store data in sparse array to reduce
* indirection and provide stable pointers. */
} ecs_page_t;
static
ecs_page_t* flecs_sparse_page_new(
ecs_sparse_t *sparse,
int32_t page_index)
{
ecs_allocator_t *a = sparse->allocator;
ecs_block_allocator_t *ca = sparse->page_allocator;
int32_t count = ecs_vec_count(&sparse->pages);
ecs_page_t *pages;
if (count <= page_index) {
ecs_vec_set_count_t(a, &sparse->pages, ecs_page_t, page_index + 1);
pages = ecs_vec_first_t(&sparse->pages, ecs_page_t);
ecs_os_memset_n(&pages[count], 0, ecs_page_t, (1 + page_index - count));
} else {
pages = ecs_vec_first_t(&sparse->pages, ecs_page_t);
}
ecs_assert(pages != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_page_t *result = &pages[page_index];
ecs_assert(result->sparse == NULL, ECS_INTERNAL_ERROR, NULL);
ecs_assert(result->data == NULL, ECS_INTERNAL_ERROR, NULL);
/* Initialize sparse array with zero's, as zero is used to indicate that the
* sparse element has not been paired with a dense element. Use zero
* as this means we can take advantage of calloc having a possibly better
* performance than malloc + memset. */
result->sparse = ca ? flecs_bcalloc(ca)
: ecs_os_calloc_n(int32_t, FLECS_SPARSE_PAGE_SIZE);
/* Initialize the data array with zero's to guarantee that data is
* always initialized. When an entry is removed, data is reset back to
* zero. Initialize now, as this can take advantage of calloc. */
result->data = a ? flecs_calloc(a, sparse->size * FLECS_SPARSE_PAGE_SIZE)
: ecs_os_calloc(sparse->size * FLECS_SPARSE_PAGE_SIZE);
ecs_assert(result->sparse != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_assert(result->data != NULL, ECS_INTERNAL_ERROR, NULL);
return result;
}
static
void flecs_sparse_page_free(
ecs_sparse_t *sparse,
ecs_page_t *page)
{
ecs_allocator_t *a = sparse->allocator;
ecs_block_allocator_t *ca = sparse->page_allocator;
if (ca) {
flecs_bfree(ca, page->sparse);
} else {
ecs_os_free(page->sparse);
}
if (a) {
flecs_free(a, sparse->size * FLECS_SPARSE_PAGE_SIZE, page->data);
} else {
ecs_os_free(page->data);
}
}
static
ecs_page_t* flecs_sparse_get_page(
const ecs_sparse_t *sparse,
int32_t page_index)
{
ecs_assert(page_index >= 0, ECS_INVALID_PARAMETER, NULL);
if (page_index >= ecs_vec_count(&sparse->pages)) {
return NULL;
}
return ecs_vec_get_t(&sparse->pages, ecs_page_t, page_index);
}
static
ecs_page_t* flecs_sparse_get_or_create_page(
ecs_sparse_t *sparse,
int32_t page_index)
{
ecs_page_t *page = flecs_sparse_get_page(sparse, page_index);
if (page && page->sparse) {
return page;
}
return flecs_sparse_page_new(sparse, page_index);
}
static
void flecs_sparse_grow_dense(
ecs_sparse_t *sparse)
{
ecs_vec_append_t(sparse->allocator, &sparse->dense, uint64_t);
}
static
uint64_t flecs_sparse_strip_generation(
uint64_t *index_out)
{
uint64_t index = *index_out;
uint64_t gen = index & ECS_GENERATION_MASK;
/* Make sure there's no junk in the id */
ecs_assert(gen == (index & (0xFFFFFFFFull << 32)),
ECS_INVALID_PARAMETER, NULL);
*index_out -= gen;
return gen;
}
static
void flecs_sparse_assign_index(
ecs_page_t * page,
uint64_t * dense_array,
uint64_t index,
int32_t dense)
{
/* Initialize sparse-dense pair. This assigns the dense index to the sparse
* array, and the sparse index to the dense array .*/
page->sparse[OFFSET(index)] = dense;
dense_array[dense] = index;
}
static
uint64_t flecs_sparse_inc_gen(
uint64_t index)
{
/* When an index is deleted, its generation is increased so that we can do
* liveliness checking while recycling ids */
return ECS_GENERATION_INC(index);
}
static
uint64_t flecs_sparse_inc_id(
ecs_sparse_t *sparse)
{
/* Generate a new id. The last issued id could be stored in an external
* variable, such as is the case with the last issued entity id, which is
* stored on the world. */
return ++ sparse->max_id;
}
static
uint64_t flecs_sparse_get_id(
const ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INTERNAL_ERROR, NULL);
return sparse->max_id;
}
static
void flecs_sparse_set_id(
ecs_sparse_t *sparse,
uint64_t value)
{
/* Sometimes the max id needs to be assigned directly, which typically
* happens when the API calls get_or_create for an id that hasn't been
* issued before. */
sparse->max_id = value;
}
/* Pair dense id with new sparse id */
static
uint64_t flecs_sparse_create_id(
ecs_sparse_t *sparse,
int32_t dense)
{
uint64_t index = flecs_sparse_inc_id(sparse);
flecs_sparse_grow_dense(sparse);
ecs_page_t *page = flecs_sparse_get_or_create_page(sparse, PAGE(index));
ecs_assert(page->sparse[OFFSET(index)] == 0, ECS_INTERNAL_ERROR, NULL);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
flecs_sparse_assign_index(page, dense_array, index, dense);
return index;
}
/* Create new id */
static
uint64_t flecs_sparse_new_index(
ecs_sparse_t *sparse)
{
int32_t dense_count = ecs_vec_count(&sparse->dense);
int32_t count = sparse->count ++;
ecs_assert(count <= dense_count, ECS_INTERNAL_ERROR, NULL);
if (count < dense_count) {
/* If there are unused elements in the dense array, return first */
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
return dense_array[count];
} else {
return flecs_sparse_create_id(sparse, count);
}
}
/* Get value from sparse set when it is guaranteed that the value exists. This
* function is used when values are obtained using a dense index */
static
void* flecs_sparse_get_sparse(
const ecs_sparse_t *sparse,
int32_t dense,
uint64_t index)
{
flecs_sparse_strip_generation(&index);
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
if (!page || !page->sparse) {
return NULL;
}
int32_t offset = OFFSET(index);
ecs_assert(page != NULL, ECS_INTERNAL_ERROR, NULL);
ecs_assert(dense == page->sparse[offset], ECS_INTERNAL_ERROR, NULL);
(void)dense;
return DATA(page->data, sparse->size, offset);
}
/* Swap dense elements. A swap occurs when an element is removed, or when a
* removed element is recycled. */
static
void flecs_sparse_swap_dense(
ecs_sparse_t * sparse,
ecs_page_t * page_a,
int32_t a,
int32_t b)
{
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
uint64_t index_a = dense_array[a];
uint64_t index_b = dense_array[b];
ecs_page_t *page_b = flecs_sparse_get_or_create_page(sparse, PAGE(index_b));
flecs_sparse_assign_index(page_a, dense_array, index_a, b);
flecs_sparse_assign_index(page_b, dense_array, index_b, a);
}
void flecs_sparse_init(
ecs_sparse_t *result,
struct ecs_allocator_t *allocator,
ecs_block_allocator_t *page_allocator,
ecs_size_t size)
{
ecs_assert(result != NULL, ECS_OUT_OF_MEMORY, NULL);
result->size = size;
result->max_id = UINT64_MAX;
result->allocator = allocator;
result->page_allocator = page_allocator;
ecs_vec_init_t(allocator, &result->pages, ecs_page_t, 0);
ecs_vec_init_t(allocator, &result->dense, uint64_t, 1);
result->dense.count = 1;
/* Consume first value in dense array as 0 is used in the sparse array to
* indicate that a sparse element hasn't been paired yet. */
ecs_vec_first_t(&result->dense, uint64_t)[0] = 0;
result->count = 1;
}
void flecs_sparse_clear(
ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
int32_t i, count = ecs_vec_count(&sparse->pages);
ecs_page_t *pages = ecs_vec_first_t(&sparse->pages, ecs_page_t);
for (i = 0; i < count; i ++) {
int32_t *indices = pages[i].sparse;
if (indices) {
ecs_os_memset_n(indices, 0, int32_t, FLECS_SPARSE_PAGE_SIZE);
}
}
ecs_vec_set_count_t(sparse->allocator, &sparse->dense, uint64_t, 1);
sparse->count = 1;
sparse->max_id = 0;
}
void flecs_sparse_fini(
ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INTERNAL_ERROR, NULL);
int32_t i, count = ecs_vec_count(&sparse->pages);
ecs_page_t *pages = ecs_vec_first_t(&sparse->pages, ecs_page_t);
for (i = 0; i < count; i ++) {
flecs_sparse_page_free(sparse, &pages[i]);
}
ecs_vec_fini_t(sparse->allocator, &sparse->pages, ecs_page_t);
ecs_vec_fini_t(sparse->allocator, &sparse->dense, uint64_t);
}
uint64_t flecs_sparse_new_id(
ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_sparse_new_index(sparse);
}
void* flecs_sparse_add(
ecs_sparse_t *sparse,
ecs_size_t size)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
uint64_t index = flecs_sparse_new_index(sparse);
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
ecs_assert(page != NULL, ECS_INTERNAL_ERROR, NULL);
return DATA(page->data, size, OFFSET(index));
}
uint64_t flecs_sparse_last_id(
const ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INTERNAL_ERROR, NULL);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
return dense_array[sparse->count - 1];
}
void* flecs_sparse_ensure(
ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(ecs_vec_count(&sparse->dense) > 0, ECS_INTERNAL_ERROR, NULL);
(void)size;
uint64_t gen = flecs_sparse_strip_generation(&index);
ecs_page_t *page = flecs_sparse_get_or_create_page(sparse, PAGE(index));
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
if (dense) {
/* Check if element is alive. If element is not alive, update indices so
* that the first unused dense element points to the sparse element. */
int32_t count = sparse->count;
if (dense >= count) {
/* If dense is not alive, swap it with the first unused element. */
flecs_sparse_swap_dense(sparse, page, dense, count);
dense = count;
/* First unused element is now last used element */
sparse->count ++;
} else {
/* Dense is already alive, nothing to be done */
}
/* Ensure provided generation matches current. Only allow mismatching
* generations if the provided generation count is 0. This allows for
* using the ensure function in combination with ids that have their
* generation stripped. */
#ifdef FLECS_DEBUG
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
ecs_assert(!gen || dense_array[dense] == (index | gen), ECS_INTERNAL_ERROR, NULL);
#endif
} else {
/* Element is not paired yet. Must add a new element to dense array */
flecs_sparse_grow_dense(sparse);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
int32_t dense_count = ecs_vec_count(&sparse->dense) - 1;
int32_t count = sparse->count ++;
/* If index is larger than max id, update max id */
if (index >= flecs_sparse_get_id(sparse)) {
flecs_sparse_set_id(sparse, index);
}
if (count < dense_count) {
/* If there are unused elements in the list, move the first unused
* element to the end of the list */
uint64_t unused = dense_array[count];
ecs_page_t *unused_page = flecs_sparse_get_or_create_page(sparse, PAGE(unused));
flecs_sparse_assign_index(unused_page, dense_array, unused, dense_count);
}
flecs_sparse_assign_index(page, dense_array, index, count);
dense_array[count] |= gen;
}
return DATA(page->data, sparse->size, offset);
}
void* flecs_sparse_ensure_fast(
ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index_long)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(ecs_vec_count(&sparse->dense) > 0, ECS_INTERNAL_ERROR, NULL);
(void)size;
uint32_t index = (uint32_t)index_long;
ecs_page_t *page = flecs_sparse_get_or_create_page(sparse, PAGE(index));
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
int32_t count = sparse->count;
if (!dense) {
/* Element is not paired yet. Must add a new element to dense array */
sparse->count = count + 1;
if (count == ecs_vec_count(&sparse->dense)) {
flecs_sparse_grow_dense(sparse);
}
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
flecs_sparse_assign_index(page, dense_array, index, count);
}
return DATA(page->data, sparse->size, offset);
}
void flecs_sparse_remove(
ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
(void)size;
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
if (!page || !page->sparse) {
return;
}
uint64_t gen = flecs_sparse_strip_generation(&index);
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
if (dense) {
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
uint64_t cur_gen = dense_array[dense] & ECS_GENERATION_MASK;
if (gen != cur_gen) {
/* Generation doesn't match which means that the provided entity is
* already not alive. */
return;
}
/* Increase generation */
dense_array[dense] = index | flecs_sparse_inc_gen(cur_gen);
int32_t count = sparse->count;
if (dense == (count - 1)) {
/* If dense is the last used element, simply decrease count */
sparse->count --;
} else if (dense < count) {
/* If element is alive, move it to unused elements */
flecs_sparse_swap_dense(sparse, page, dense, count - 1);
sparse->count --;
} else {
/* Element is not alive, nothing to be done */
return;
}
/* Reset memory to zero on remove */
void *ptr = DATA(page->data, sparse->size, offset);
ecs_os_memset(ptr, 0, size);
} else {
/* Element is not paired and thus not alive, nothing to be done */
return;
}
}
void flecs_sparse_set_generation(
ecs_sparse_t *sparse,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_page_t *page = flecs_sparse_get_or_create_page(sparse, PAGE(index));
uint64_t index_w_gen = index;
flecs_sparse_strip_generation(&index);
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
if (dense) {
/* Increase generation */
ecs_vec_get_t(&sparse->dense, uint64_t, dense)[0] = index_w_gen;
} else {
/* Element is not paired and thus not alive, nothing to be done */
}
}
void* flecs_sparse_get_dense(
const ecs_sparse_t *sparse,
ecs_size_t size,
int32_t dense_index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(dense_index < sparse->count, ECS_INVALID_PARAMETER, NULL);
(void)size;
dense_index ++;
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
return flecs_sparse_get_sparse(sparse, dense_index, dense_array[dense_index]);
}
bool flecs_sparse_is_alive(
const ecs_sparse_t *sparse,
uint64_t index)
{
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
if (!page || !page->sparse) {
return false;
}
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
if (!dense || (dense >= sparse->count)) {
return false;
}
uint64_t gen = flecs_sparse_strip_generation(&index);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
uint64_t cur_gen = dense_array[dense] & ECS_GENERATION_MASK;
if (cur_gen != gen) {
return false;
}
ecs_assert(dense == page->sparse[offset], ECS_INTERNAL_ERROR, NULL);
return true;
}
void* flecs_sparse_try(
const ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
(void)size;
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
if (!page || !page->sparse) {
return NULL;
}
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
if (!dense || (dense >= sparse->count)) {
return NULL;
}
uint64_t gen = flecs_sparse_strip_generation(&index);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
uint64_t cur_gen = dense_array[dense] & ECS_GENERATION_MASK;
if (cur_gen != gen) {
return NULL;
}
ecs_assert(dense == page->sparse[offset], ECS_INTERNAL_ERROR, NULL);
return DATA(page->data, sparse->size, offset);
}
void* flecs_sparse_get(
const ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
(void)size;
ecs_page_t *page = ecs_vec_get_t(&sparse->pages, ecs_page_t, PAGE(index));
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
ecs_assert(dense != 0, ECS_INTERNAL_ERROR, NULL);
uint64_t gen = flecs_sparse_strip_generation(&index);
uint64_t *dense_array = ecs_vec_first_t(&sparse->dense, uint64_t);
uint64_t cur_gen = dense_array[dense] & ECS_GENERATION_MASK;
(void)cur_gen; (void)gen;
ecs_assert(cur_gen == gen, ECS_INVALID_PARAMETER, NULL);
ecs_assert(dense == page->sparse[offset], ECS_INTERNAL_ERROR, NULL);
ecs_assert(dense < sparse->count, ECS_INTERNAL_ERROR, NULL);
return DATA(page->data, sparse->size, offset);
}
void* flecs_sparse_get_any(
const ecs_sparse_t *sparse,
ecs_size_t size,
uint64_t index)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(!size || size == sparse->size, ECS_INVALID_PARAMETER, NULL);
(void)size;
flecs_sparse_strip_generation(&index);
ecs_page_t *page = flecs_sparse_get_page(sparse, PAGE(index));
if (!page || !page->sparse) {
return NULL;
}
int32_t offset = OFFSET(index);
int32_t dense = page->sparse[offset];
bool in_use = dense && (dense < sparse->count);
if (!in_use) {
return NULL;
}
ecs_assert(dense == page->sparse[offset], ECS_INTERNAL_ERROR, NULL);
return DATA(page->data, sparse->size, offset);
}
int32_t flecs_sparse_count(
const ecs_sparse_t *sparse)
{
if (!sparse || !sparse->count) {
return 0;
}
return sparse->count - 1;
}
const uint64_t* flecs_sparse_ids(
const ecs_sparse_t *sparse)
{
ecs_assert(sparse != NULL, ECS_INVALID_PARAMETER, NULL);
if (sparse->dense.array) {
return &(ecs_vec_first_t(&sparse->dense, uint64_t)[1]);
} else {
return NULL;
}
}
void ecs_sparse_init(
ecs_sparse_t *sparse,
ecs_size_t elem_size)
{
flecs_sparse_init(sparse, NULL, NULL, elem_size);
}
void* ecs_sparse_add(
ecs_sparse_t *sparse,
ecs_size_t elem_size)
{
return flecs_sparse_add(sparse, elem_size);
}
uint64_t ecs_sparse_last_id(
const ecs_sparse_t *sparse)
{
return flecs_sparse_last_id(sparse);
}
int32_t ecs_sparse_count(
const ecs_sparse_t *sparse)
{
return flecs_sparse_count(sparse);
}
void* ecs_sparse_get_dense(
const ecs_sparse_t *sparse,
ecs_size_t elem_size,
int32_t index)
{
return flecs_sparse_get_dense(sparse, elem_size, index);
}
void* ecs_sparse_get(
const ecs_sparse_t *sparse,
ecs_size_t elem_size,
uint64_t id)
{
return flecs_sparse_get(sparse, elem_size, id);
}

View File

@@ -0,0 +1,191 @@
/**
* @file datastructures/stack_allocator.c
* @brief Stack allocator.
*
* The stack allocator enables pushing and popping values to a stack, and has
* a lower overhead when compared to block allocators. A stack allocator is a
* good fit for small temporary allocations.
*
* The stack allocator allocates memory in pages. If the requested size of an
* allocation exceeds the page size, a regular allocator is used instead.
*/
#include "../private_api.h"
#define FLECS_STACK_PAGE_OFFSET ECS_ALIGN(ECS_SIZEOF(ecs_stack_page_t), 16)
int64_t ecs_stack_allocator_alloc_count = 0;
int64_t ecs_stack_allocator_free_count = 0;
static
ecs_stack_page_t* flecs_stack_page_new(uint32_t page_id) {
ecs_stack_page_t *result = ecs_os_malloc(
FLECS_STACK_PAGE_OFFSET + ECS_STACK_PAGE_SIZE);
result->data = ECS_OFFSET(result, FLECS_STACK_PAGE_OFFSET);
result->next = NULL;
result->id = page_id + 1;
ecs_os_linc(&ecs_stack_allocator_alloc_count);
return result;
}
void* flecs_stack_alloc(
ecs_stack_t *stack,
ecs_size_t size,
ecs_size_t align)
{
ecs_stack_page_t *page = stack->tail_page;
if (page == &stack->first && !page->data) {
page->data = ecs_os_malloc(ECS_STACK_PAGE_SIZE);
ecs_os_linc(&ecs_stack_allocator_alloc_count);
}
int16_t sp = flecs_ito(int16_t, ECS_ALIGN(page->sp, align));
int16_t next_sp = flecs_ito(int16_t, sp + size);
void *result = NULL;
if (next_sp > ECS_STACK_PAGE_SIZE) {
if (size > ECS_STACK_PAGE_SIZE) {
result = ecs_os_malloc(size); /* Too large for page */
goto done;
}
if (page->next) {
page = page->next;
} else {
page = page->next = flecs_stack_page_new(page->id);
}
sp = 0;
next_sp = flecs_ito(int16_t, size);
stack->tail_page = page;
}
page->sp = next_sp;
result = ECS_OFFSET(page->data, sp);
done:
#ifdef FLECS_SANITIZE
ecs_os_memset(result, 0xAA, size);
#endif
return result;
}
void* flecs_stack_calloc(
ecs_stack_t *stack,
ecs_size_t size,
ecs_size_t align)
{
void *ptr = flecs_stack_alloc(stack, size, align);
ecs_os_memset(ptr, 0, size);
return ptr;
}
void flecs_stack_free(
void *ptr,
ecs_size_t size)
{
if (size > ECS_STACK_PAGE_SIZE) {
ecs_os_free(ptr);
}
}
ecs_stack_cursor_t* flecs_stack_get_cursor(
ecs_stack_t *stack)
{
ecs_stack_page_t *page = stack->tail_page;
int16_t sp = stack->tail_page->sp;
ecs_stack_cursor_t *result = flecs_stack_alloc_t(stack, ecs_stack_cursor_t);
result->page = page;
result->sp = sp;
result->is_free = false;
#ifdef FLECS_DEBUG
++ stack->cursor_count;
result->owner = stack;
#endif
result->prev = stack->tail_cursor;
stack->tail_cursor = result;
return result;
}
void flecs_stack_restore_cursor(
ecs_stack_t *stack,
ecs_stack_cursor_t *cursor)
{
if (!cursor) {
return;
}
ecs_dbg_assert(stack == cursor->owner, ECS_INVALID_OPERATION, NULL);
ecs_dbg_assert(stack->cursor_count > 0, ECS_DOUBLE_FREE, NULL);
ecs_assert(cursor->is_free == false, ECS_DOUBLE_FREE, NULL);
cursor->is_free = true;
#ifdef FLECS_DEBUG
-- stack->cursor_count;
#endif
/* If cursor is not the last on the stack no memory should be freed */
if (cursor != stack->tail_cursor) {
return;
}
/* Iterate freed cursors to know how much memory we can free */
do {
ecs_stack_cursor_t* prev = cursor->prev;
if (!prev || !prev->is_free) {
break; /* Found active cursor, free up until this point */
}
cursor = prev;
} while (cursor);
stack->tail_cursor = cursor->prev;
stack->tail_page = cursor->page;
stack->tail_page->sp = cursor->sp;
/* If the cursor count is zero, stack should be empty
* if the cursor count is non-zero, stack should not be empty */
ecs_dbg_assert((stack->cursor_count == 0) ==
(stack->tail_page == &stack->first && stack->tail_page->sp == 0),
ECS_LEAK_DETECTED, NULL);
}
void flecs_stack_reset(
ecs_stack_t *stack)
{
ecs_dbg_assert(stack->cursor_count == 0, ECS_LEAK_DETECTED, NULL);
stack->tail_page = &stack->first;
stack->first.sp = 0;
stack->tail_cursor = NULL;
}
void flecs_stack_init(
ecs_stack_t *stack)
{
ecs_os_zeromem(stack);
stack->tail_page = &stack->first;
stack->first.data = NULL;
}
void flecs_stack_fini(
ecs_stack_t *stack)
{
ecs_stack_page_t *next, *cur = &stack->first;
ecs_dbg_assert(stack->cursor_count == 0, ECS_LEAK_DETECTED, NULL);
ecs_assert(stack->tail_page == &stack->first, ECS_LEAK_DETECTED, NULL);
ecs_assert(stack->tail_page->sp == 0, ECS_LEAK_DETECTED, NULL);
do {
next = cur->next;
if (cur == &stack->first) {
if (cur->data) {
ecs_os_linc(&ecs_stack_allocator_free_count);
}
ecs_os_free(cur->data);
} else {
ecs_os_linc(&ecs_stack_allocator_free_count);
ecs_os_free(cur);
}
} while ((cur = next));
}

View File

@@ -0,0 +1,83 @@
/**
* @file datastructures/stack_allocator.h
* @brief Stack allocator.
*/
#ifndef FLECS_STACK_ALLOCATOR_H
#define FLECS_STACK_ALLOCATOR_H
/** Stack allocator for quick allocation of small temporary values */
#define ECS_STACK_PAGE_SIZE (4096)
typedef struct ecs_stack_page_t {
void *data;
struct ecs_stack_page_t *next;
int16_t sp;
uint32_t id;
} ecs_stack_page_t;
typedef struct ecs_stack_t {
ecs_stack_page_t first;
ecs_stack_page_t *tail_page;
ecs_stack_cursor_t *tail_cursor;
#ifdef FLECS_DEBUG
int32_t cursor_count;
#endif
} ecs_stack_t;
FLECS_DBG_API
void flecs_stack_init(
ecs_stack_t *stack);
FLECS_DBG_API
void flecs_stack_fini(
ecs_stack_t *stack);
FLECS_DBG_API
void* flecs_stack_alloc(
ecs_stack_t *stack,
ecs_size_t size,
ecs_size_t align);
#define flecs_stack_alloc_t(stack, T)\
flecs_stack_alloc(stack, ECS_SIZEOF(T), ECS_ALIGNOF(T))
#define flecs_stack_alloc_n(stack, T, count)\
flecs_stack_alloc(stack, ECS_SIZEOF(T) * count, ECS_ALIGNOF(T))
FLECS_DBG_API
void* flecs_stack_calloc(
ecs_stack_t *stack,
ecs_size_t size,
ecs_size_t align);
#define flecs_stack_calloc_t(stack, T)\
flecs_stack_calloc(stack, ECS_SIZEOF(T), ECS_ALIGNOF(T))
#define flecs_stack_calloc_n(stack, T, count)\
flecs_stack_calloc(stack, ECS_SIZEOF(T) * count, ECS_ALIGNOF(T))
FLECS_DBG_API
void flecs_stack_free(
void *ptr,
ecs_size_t size);
#define flecs_stack_free_t(ptr, T)\
flecs_stack_free(ptr, ECS_SIZEOF(T))
#define flecs_stack_free_n(ptr, T, count)\
flecs_stack_free(ptr, ECS_SIZEOF(T) * count)
void flecs_stack_reset(
ecs_stack_t *stack);
FLECS_DBG_API
ecs_stack_cursor_t* flecs_stack_get_cursor(
ecs_stack_t *stack);
FLECS_DBG_API
void flecs_stack_restore_cursor(
ecs_stack_t *stack,
ecs_stack_cursor_t *cursor);
#endif

View File

@@ -0,0 +1,836 @@
/**
* @file datastructures/strbuf.c
* @brief Utility for constructing strings.
*
* A buffer builds up a list of elements which individually can be up to N bytes
* large. While appending, data is added to these elements. More elements are
* added on the fly when needed. When an application calls ecs_strbuf_get, all
* elements are combined in one string and the element administration is freed.
*
* This approach prevents reallocs of large blocks of memory, and therefore
* copying large blocks of memory when appending to a large buffer. A buffer
* preallocates some memory for the element overhead so that for small strings
* there is hardly any overhead, while for large strings the overhead is offset
* by the reduced time spent on copying memory.
*
* The functionality provided by strbuf is similar to std::stringstream.
*/
#include "../private_api.h"
#include <math.h>
/**
* stm32tpl -- STM32 C++ Template Peripheral Library
* Visit https://github.com/antongus/stm32tpl for new versions
*
* Copyright (c) 2011-2020 Anton B. Gusev aka AHTOXA
*/
#define MAX_PRECISION (10)
#define EXP_THRESHOLD (3)
#define INT64_MAX_F ((double)INT64_MAX)
static const double rounders[MAX_PRECISION + 1] =
{
0.5, // 0
0.05, // 1
0.005, // 2
0.0005, // 3
0.00005, // 4
0.000005, // 5
0.0000005, // 6
0.00000005, // 7
0.000000005, // 8
0.0000000005, // 9
0.00000000005 // 10
};
static
char* flecs_strbuf_itoa(
char *buf,
int64_t v)
{
char *ptr = buf;
char * p1;
char c;
if (!v) {
*ptr++ = '0';
} else {
if (v < 0) {
ptr[0] = '-';
ptr ++;
v *= -1;
}
char *p = ptr;
while (v) {
int64_t vdiv = v / 10;
int64_t vmod = v - (vdiv * 10);
p[0] = (char)('0' + vmod);
p ++;
v = vdiv;
}
p1 = p;
while (p > ptr) {
c = *--p;
*p = *ptr;
*ptr++ = c;
}
ptr = p1;
}
return ptr;
}
static
int flecs_strbuf_ftoa(
ecs_strbuf_t *out,
double f,
int precision,
char nan_delim)
{
char buf[64];
char * ptr = buf;
char c;
int64_t intPart;
int64_t exp = 0;
if (ecs_os_isnan(f)) {
if (nan_delim) {
ecs_strbuf_appendch(out, nan_delim);
ecs_strbuf_appendlit(out, "NaN");
return ecs_strbuf_appendch(out, nan_delim);
} else {
return ecs_strbuf_appendlit(out, "NaN");
}
}
if (ecs_os_isinf(f)) {
if (nan_delim) {
ecs_strbuf_appendch(out, nan_delim);
ecs_strbuf_appendlit(out, "Inf");
return ecs_strbuf_appendch(out, nan_delim);
} else {
return ecs_strbuf_appendlit(out, "Inf");
}
}
if (precision > MAX_PRECISION) {
precision = MAX_PRECISION;
}
if (f < 0) {
f = -f;
*ptr++ = '-';
}
if (precision < 0) {
if (f < 1.0) precision = 6;
else if (f < 10.0) precision = 5;
else if (f < 100.0) precision = 4;
else if (f < 1000.0) precision = 3;
else if (f < 10000.0) precision = 2;
else if (f < 100000.0) precision = 1;
else precision = 0;
}
if (precision) {
f += rounders[precision];
}
/* Make sure that number can be represented as 64bit int, increase exp */
while (f > INT64_MAX_F) {
f /= 1000 * 1000 * 1000;
exp += 9;
}
intPart = (int64_t)f;
f -= (double)intPart;
ptr = flecs_strbuf_itoa(ptr, intPart);
if (precision) {
*ptr++ = '.';
while (precision--) {
f *= 10.0;
c = (char)f;
*ptr++ = (char)('0' + c);
f -= c;
}
}
*ptr = 0;
/* Remove trailing 0s */
while ((&ptr[-1] != buf) && (ptr[-1] == '0')) {
ptr[-1] = '\0';
ptr --;
}
if (ptr != buf && ptr[-1] == '.') {
ptr[-1] = '\0';
ptr --;
}
/* If 0s before . exceed threshold, convert to exponent to save space
* without losing precision. */
char *cur = ptr;
while ((&cur[-1] != buf) && (cur[-1] == '0')) {
cur --;
}
if (exp || ((ptr - cur) > EXP_THRESHOLD)) {
cur[0] = '\0';
exp += (ptr - cur);
ptr = cur;
}
if (exp) {
char *p1 = &buf[1];
if (nan_delim) {
ecs_os_memmove(buf + 1, buf, 1 + (ptr - buf));
buf[0] = nan_delim;
p1 ++;
}
/* Make sure that exp starts after first character */
c = p1[0];
if (c) {
p1[0] = '.';
do {
char t = (++p1)[0];
if (t == '.') {
exp ++;
p1 --;
break;
}
p1[0] = c;
c = t;
exp ++;
} while (c);
ptr = p1 + 1;
} else {
ptr = p1;
}
ptr[0] = 'e';
ptr = flecs_strbuf_itoa(ptr + 1, exp);
if (nan_delim) {
ptr[0] = nan_delim;
ptr ++;
}
ptr[0] = '\0';
}
return ecs_strbuf_appendstrn(out, buf, (int32_t)(ptr - buf));
}
/* Add an extra element to the buffer */
static
void flecs_strbuf_grow(
ecs_strbuf_t *b)
{
/* Allocate new element */
ecs_strbuf_element_embedded *e = ecs_os_malloc_t(ecs_strbuf_element_embedded);
b->size += b->current->pos;
b->current->next = (ecs_strbuf_element*)e;
b->current = (ecs_strbuf_element*)e;
b->elementCount ++;
e->super.buffer_embedded = true;
e->super.buf = e->buf;
e->super.pos = 0;
e->super.next = NULL;
}
/* Add an extra dynamic element */
static
void flecs_strbuf_grow_str(
ecs_strbuf_t *b,
const char *str,
char *alloc_str,
int32_t size)
{
/* Allocate new element */
ecs_strbuf_element_str *e = ecs_os_malloc_t(ecs_strbuf_element_str);
b->size += b->current->pos;
b->current->next = (ecs_strbuf_element*)e;
b->current = (ecs_strbuf_element*)e;
b->elementCount ++;
e->super.buffer_embedded = false;
e->super.pos = size ? size : (int32_t)ecs_os_strlen(str);
e->super.next = NULL;
e->super.buf = ECS_CONST_CAST(char*, str);
e->alloc_str = alloc_str;
}
static
char* flecs_strbuf_ptr(
ecs_strbuf_t *b)
{
if (b->buf) {
return &b->buf[b->current->pos];
} else {
return &b->current->buf[b->current->pos];
}
}
/* Compute the amount of space left in the current element */
static
int32_t flecs_strbuf_memLeftInCurrentElement(
ecs_strbuf_t *b)
{
if (b->current->buffer_embedded) {
return ECS_STRBUF_ELEMENT_SIZE - b->current->pos;
} else {
return 0;
}
}
/* Compute the amount of space left */
static
int32_t flecs_strbuf_memLeft(
ecs_strbuf_t *b)
{
if (b->max) {
return b->max - b->size - b->current->pos;
} else {
return INT_MAX;
}
}
static
void flecs_strbuf_init(
ecs_strbuf_t *b)
{
/* Initialize buffer structure only once */
if (!b->elementCount) {
b->size = 0;
b->firstElement.super.next = NULL;
b->firstElement.super.pos = 0;
b->firstElement.super.buffer_embedded = true;
b->firstElement.super.buf = b->firstElement.buf;
b->elementCount ++;
b->current = (ecs_strbuf_element*)&b->firstElement;
}
}
/* Append a format string to a buffer */
static
bool flecs_strbuf_vappend(
ecs_strbuf_t *b,
const char* str,
va_list args)
{
bool result = true;
va_list arg_cpy;
if (!str) {
return result;
}
flecs_strbuf_init(b);
int32_t memLeftInElement = flecs_strbuf_memLeftInCurrentElement(b);
int32_t memLeft = flecs_strbuf_memLeft(b);
if (!memLeft) {
return false;
}
/* Compute the memory required to add the string to the buffer. If user
* provided buffer, use space left in buffer, otherwise use space left in
* current element. */
int32_t max_copy = b->buf ? memLeft : memLeftInElement;
int32_t memRequired;
va_copy(arg_cpy, args);
memRequired = vsnprintf(
flecs_strbuf_ptr(b), (size_t)(max_copy + 1), str, args);
ecs_assert(memRequired != -1, ECS_INTERNAL_ERROR, NULL);
if (memRequired <= memLeftInElement) {
/* Element was large enough to fit string */
b->current->pos += memRequired;
} else if ((memRequired - memLeftInElement) < memLeft) {
/* If string is a format string, a new buffer of size memRequired is
* needed to re-evaluate the format string and only use the part that
* wasn't already copied to the previous element */
if (memRequired <= ECS_STRBUF_ELEMENT_SIZE) {
/* Resulting string fits in standard-size buffer. Note that the
* entire string needs to fit, not just the remainder, as the
* format string cannot be partially evaluated */
flecs_strbuf_grow(b);
/* Copy entire string to new buffer */
ecs_os_vsprintf(flecs_strbuf_ptr(b), str, arg_cpy);
/* Ignore the part of the string that was copied into the
* previous buffer. The string copied into the new buffer could
* be memmoved so that only the remainder is left, but that is
* most likely more expensive than just keeping the entire
* string. */
/* Update position in buffer */
b->current->pos += memRequired;
} else {
/* Resulting string does not fit in standard-size buffer.
* Allocate a new buffer that can hold the entire string. */
char *dst = ecs_os_malloc(memRequired + 1);
ecs_os_vsprintf(dst, str, arg_cpy);
flecs_strbuf_grow_str(b, dst, dst, memRequired);
}
}
va_end(arg_cpy);
return flecs_strbuf_memLeft(b) > 0;
}
static
bool flecs_strbuf_appendstr(
ecs_strbuf_t *b,
const char* str,
int n)
{
flecs_strbuf_init(b);
int32_t memLeftInElement = flecs_strbuf_memLeftInCurrentElement(b);
int32_t memLeft = flecs_strbuf_memLeft(b);
if (memLeft <= 0) {
return false;
}
/* Never write more than what the buffer can store */
if (n > memLeft) {
n = memLeft;
}
if (n <= memLeftInElement) {
/* Element was large enough to fit string */
ecs_os_strncpy(flecs_strbuf_ptr(b), str, n);
b->current->pos += n;
} else if ((n - memLeftInElement) < memLeft) {
ecs_os_strncpy(flecs_strbuf_ptr(b), str, memLeftInElement);
/* Element was not large enough, but buffer still has space */
b->current->pos += memLeftInElement;
n -= memLeftInElement;
/* Current element was too small, copy remainder into new element */
if (n < ECS_STRBUF_ELEMENT_SIZE) {
/* A standard-size buffer is large enough for the new string */
flecs_strbuf_grow(b);
/* Copy the remainder to the new buffer */
if (n) {
/* If a max number of characters to write is set, only a
* subset of the string should be copied to the buffer */
ecs_os_strncpy(
flecs_strbuf_ptr(b),
str + memLeftInElement,
(size_t)n);
} else {
ecs_os_strcpy(flecs_strbuf_ptr(b), str + memLeftInElement);
}
/* Update to number of characters copied to new buffer */
b->current->pos += n;
} else {
/* String doesn't fit in a single element, strdup */
char *remainder = ecs_os_strdup(str + memLeftInElement);
flecs_strbuf_grow_str(b, remainder, remainder, n);
}
} else {
/* Buffer max has been reached */
return false;
}
return flecs_strbuf_memLeft(b) > 0;
}
static
bool flecs_strbuf_appendch(
ecs_strbuf_t *b,
char ch)
{
flecs_strbuf_init(b);
int32_t memLeftInElement = flecs_strbuf_memLeftInCurrentElement(b);
int32_t memLeft = flecs_strbuf_memLeft(b);
if (memLeft <= 0) {
return false;
}
if (memLeftInElement) {
/* Element was large enough to fit string */
flecs_strbuf_ptr(b)[0] = ch;
b->current->pos ++;
} else {
flecs_strbuf_grow(b);
flecs_strbuf_ptr(b)[0] = ch;
b->current->pos ++;
}
return flecs_strbuf_memLeft(b) > 0;
}
bool ecs_strbuf_vappend(
ecs_strbuf_t *b,
const char* fmt,
va_list args)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(fmt != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_strbuf_vappend(b, fmt, args);
}
bool ecs_strbuf_append(
ecs_strbuf_t *b,
const char* fmt,
...)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(fmt != NULL, ECS_INVALID_PARAMETER, NULL);
va_list args;
va_start(args, fmt);
bool result = flecs_strbuf_vappend(b, fmt, args);
va_end(args);
return result;
}
bool ecs_strbuf_appendstrn(
ecs_strbuf_t *b,
const char* str,
int32_t len)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_strbuf_appendstr(b, str, len);
}
bool ecs_strbuf_appendch(
ecs_strbuf_t *b,
char ch)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_strbuf_appendch(b, ch);
}
bool ecs_strbuf_appendint(
ecs_strbuf_t *b,
int64_t v)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
char numbuf[32];
char *ptr = flecs_strbuf_itoa(numbuf, v);
return ecs_strbuf_appendstrn(b, numbuf, flecs_ito(int32_t, ptr - numbuf));
}
bool ecs_strbuf_appendflt(
ecs_strbuf_t *b,
double flt,
char nan_delim)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_strbuf_ftoa(b, flt, 10, nan_delim);
}
bool ecs_strbuf_appendbool(
ecs_strbuf_t *buffer,
bool v)
{
ecs_assert(buffer != NULL, ECS_INVALID_PARAMETER, NULL);
if (v) {
return ecs_strbuf_appendlit(buffer, "true");
} else {
return ecs_strbuf_appendlit(buffer, "false");
}
}
bool ecs_strbuf_appendstr_zerocpy(
ecs_strbuf_t *b,
char* str)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
flecs_strbuf_init(b);
flecs_strbuf_grow_str(b, str, str, 0);
return true;
}
bool ecs_strbuf_appendstr_zerocpyn(
ecs_strbuf_t *b,
char *str,
int32_t n)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
flecs_strbuf_init(b);
flecs_strbuf_grow_str(b, str, str, n);
return true;
}
bool ecs_strbuf_appendstr_zerocpy_const(
ecs_strbuf_t *b,
const char* str)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
/* Removes const modifier, but logic prevents changing / delete string */
flecs_strbuf_init(b);
flecs_strbuf_grow_str(b, str, NULL, 0);
return true;
}
bool ecs_strbuf_appendstr_zerocpyn_const(
ecs_strbuf_t *b,
const char *str,
int32_t n)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
/* Removes const modifier, but logic prevents changing / delete string */
flecs_strbuf_init(b);
flecs_strbuf_grow_str(b, str, NULL, n);
return true;
}
bool ecs_strbuf_appendstr(
ecs_strbuf_t *b,
const char* str)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
return flecs_strbuf_appendstr(b, str, ecs_os_strlen(str));
}
bool ecs_strbuf_mergebuff(
ecs_strbuf_t *dst_buffer,
ecs_strbuf_t *src_buffer)
{
if (src_buffer->elementCount) {
if (src_buffer->buf) {
return ecs_strbuf_appendstrn(
dst_buffer, src_buffer->buf, src_buffer->length);
} else {
ecs_strbuf_element *e = (ecs_strbuf_element*)&src_buffer->firstElement;
/* Copy first element as it is inlined in the src buffer */
ecs_strbuf_appendstrn(dst_buffer, e->buf, e->pos);
while ((e = e->next)) {
dst_buffer->current->next = ecs_os_malloc(sizeof(ecs_strbuf_element));
*dst_buffer->current->next = *e;
}
}
*src_buffer = ECS_STRBUF_INIT;
}
return true;
}
char* ecs_strbuf_get(
ecs_strbuf_t *b)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
char* result = NULL;
if (b->elementCount) {
if (b->buf) {
b->buf[b->current->pos] = '\0';
result = ecs_os_strdup(b->buf);
} else {
void *next = NULL;
int32_t len = b->size + b->current->pos + 1;
ecs_strbuf_element *e = (ecs_strbuf_element*)&b->firstElement;
result = ecs_os_malloc(len);
char* ptr = result;
do {
ecs_os_memcpy(ptr, e->buf, e->pos);
ptr += e->pos;
next = e->next;
if (e != &b->firstElement.super) {
if (!e->buffer_embedded) {
ecs_os_free(((ecs_strbuf_element_str*)e)->alloc_str);
}
ecs_os_free(e);
}
} while ((e = next));
result[len - 1] = '\0';
b->length = len;
}
} else {
result = NULL;
}
b->elementCount = 0;
b->content = result;
return result;
}
char *ecs_strbuf_get_small(
ecs_strbuf_t *b)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
int32_t written = ecs_strbuf_written(b);
ecs_assert(written <= ECS_STRBUF_ELEMENT_SIZE, ECS_INVALID_OPERATION, NULL);
char *buf = b->firstElement.buf;
buf[written] = '\0';
return buf;
}
void ecs_strbuf_reset(
ecs_strbuf_t *b)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
if (b->elementCount && !b->buf) {
void *next = NULL;
ecs_strbuf_element *e = (ecs_strbuf_element*)&b->firstElement;
do {
next = e->next;
if (e != (ecs_strbuf_element*)&b->firstElement) {
ecs_os_free(e);
}
} while ((e = next));
}
*b = ECS_STRBUF_INIT;
}
void ecs_strbuf_list_push(
ecs_strbuf_t *b,
const char *list_open,
const char *separator)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(list_open != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(separator != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(b->list_sp >= 0, ECS_INVALID_OPERATION, NULL);
b->list_sp ++;
ecs_assert(b->list_sp < ECS_STRBUF_MAX_LIST_DEPTH,
ECS_INVALID_OPERATION, NULL);
b->list_stack[b->list_sp].count = 0;
b->list_stack[b->list_sp].separator = separator;
if (list_open) {
char ch = list_open[0];
if (ch && !list_open[1]) {
ecs_strbuf_appendch(b, ch);
} else {
ecs_strbuf_appendstr(b, list_open);
}
}
}
void ecs_strbuf_list_pop(
ecs_strbuf_t *b,
const char *list_close)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(list_close != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(b->list_sp > 0, ECS_INVALID_OPERATION, NULL);
b->list_sp --;
if (list_close) {
char ch = list_close[0];
if (ch && !list_close[1]) {
ecs_strbuf_appendch(b, list_close[0]);
} else {
ecs_strbuf_appendstr(b, list_close);
}
}
}
void ecs_strbuf_list_next(
ecs_strbuf_t *b)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
int32_t list_sp = b->list_sp;
if (b->list_stack[list_sp].count != 0) {
const char *sep = b->list_stack[list_sp].separator;
if (sep && !sep[1]) {
ecs_strbuf_appendch(b, sep[0]);
} else {
ecs_strbuf_appendstr(b, sep);
}
}
b->list_stack[list_sp].count ++;
}
bool ecs_strbuf_list_appendch(
ecs_strbuf_t *b,
char ch)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_strbuf_list_next(b);
return flecs_strbuf_appendch(b, ch);
}
bool ecs_strbuf_list_append(
ecs_strbuf_t *b,
const char *fmt,
...)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(fmt != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_strbuf_list_next(b);
va_list args;
va_start(args, fmt);
bool result = flecs_strbuf_vappend(b, fmt, args);
va_end(args);
return result;
}
bool ecs_strbuf_list_appendstr(
ecs_strbuf_t *b,
const char *str)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_strbuf_list_next(b);
return ecs_strbuf_appendstr(b, str);
}
bool ecs_strbuf_list_appendstrn(
ecs_strbuf_t *b,
const char *str,
int32_t n)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(str != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_strbuf_list_next(b);
return ecs_strbuf_appendstrn(b, str, n);
}
int32_t ecs_strbuf_written(
const ecs_strbuf_t *b)
{
ecs_assert(b != NULL, ECS_INVALID_PARAMETER, NULL);
if (b->current) {
return b->size + b->current->pos;
} else {
return 0;
}
}

View File

@@ -0,0 +1,383 @@
/**
* @file datastructures/switch_list.c
* @brief Interleaved linked list for storing mutually exclusive values.
*
* Datastructure that stores N interleaved linked lists in an array.
* This allows for efficient storage of elements with mutually exclusive values.
* Each linked list has a header element which points to the index in the array
* that stores the first node of the list. Each list node points to the next
* array element.
*
* The datastructure allows for efficient storage and retrieval for values with
* mutually exclusive values, such as enumeration values. The linked list allows
* an application to obtain all elements for a given (enumeration) value without
* having to search.
*
* While the list accepts 64 bit values, it only uses the lower 32bits of the
* value for selecting the correct linked list.
*
* The switch list is used to store union relationships.
*/
#include "../private_api.h"
#ifdef FLECS_SANITIZE
static
void flecs_switch_verify_nodes(
ecs_switch_header_t *hdr,
ecs_switch_node_t *nodes)
{
if (!hdr) {
return;
}
int32_t prev = -1, elem = hdr->element, count = 0;
while (elem != -1) {
ecs_assert(prev == nodes[elem].prev, ECS_INTERNAL_ERROR, NULL);
prev = elem;
elem = nodes[elem].next;
count ++;
}
ecs_assert(count == hdr->count, ECS_INTERNAL_ERROR, NULL);
}
#else
#define flecs_switch_verify_nodes(hdr, nodes)
#endif
static
ecs_switch_header_t* flecs_switch_get_header(
const ecs_switch_t *sw,
uint64_t value)
{
if (value == 0) {
return NULL;
}
return (ecs_switch_header_t*)ecs_map_get(&sw->hdrs, value);
}
static
ecs_switch_header_t *flecs_switch_ensure_header(
ecs_switch_t *sw,
uint64_t value)
{
ecs_switch_header_t *node = flecs_switch_get_header(sw, value);
if (!node && (value != 0)) {
node = (ecs_switch_header_t*)ecs_map_ensure(&sw->hdrs, value);
node->count = 0;
node->element = -1;
}
return node;
}
static
void flecs_switch_remove_node(
ecs_switch_header_t *hdr,
ecs_switch_node_t *nodes,
ecs_switch_node_t *node,
int32_t element)
{
ecs_assert(&nodes[element] == node, ECS_INTERNAL_ERROR, NULL);
/* Update previous node/header */
if (hdr->element == element) {
ecs_assert(node->prev == -1, ECS_INVALID_PARAMETER, NULL);
/* If this is the first node, update the header */
hdr->element = node->next;
} else {
/* If this is not the first node, update the previous node to the
* removed node's next ptr */
ecs_assert(node->prev != -1, ECS_INVALID_PARAMETER, NULL);
ecs_switch_node_t *prev_node = &nodes[node->prev];
prev_node->next = node->next;
}
/* Update next node */
int32_t next = node->next;
if (next != -1) {
ecs_assert(next >= 0, ECS_INVALID_PARAMETER, NULL);
/* If this is not the last node, update the next node to point to the
* removed node's prev ptr */
ecs_switch_node_t *next_node = &nodes[next];
next_node->prev = node->prev;
}
/* Decrease count of current header */
hdr->count --;
ecs_assert(hdr->count >= 0, ECS_INTERNAL_ERROR, NULL);
}
void flecs_switch_init(
ecs_switch_t *sw,
ecs_allocator_t *allocator,
int32_t elements)
{
ecs_map_init(&sw->hdrs, allocator);
ecs_vec_init_t(allocator, &sw->nodes, ecs_switch_node_t, elements);
ecs_vec_init_t(allocator, &sw->values, uint64_t, elements);
ecs_switch_node_t *nodes = ecs_vec_first(&sw->nodes);
uint64_t *values = ecs_vec_first(&sw->values);
int i;
for (i = 0; i < elements; i ++) {
nodes[i].prev = -1;
nodes[i].next = -1;
values[i] = 0;
}
}
void flecs_switch_clear(
ecs_switch_t *sw)
{
ecs_map_clear(&sw->hdrs);
ecs_vec_fini_t(sw->hdrs.allocator, &sw->nodes, ecs_switch_node_t);
ecs_vec_fini_t(sw->hdrs.allocator, &sw->values, uint64_t);
}
void flecs_switch_fini(
ecs_switch_t *sw)
{
ecs_map_fini(&sw->hdrs);
ecs_vec_fini_t(sw->hdrs.allocator, &sw->nodes, ecs_switch_node_t);
ecs_vec_fini_t(sw->hdrs.allocator, &sw->values, uint64_t);
}
void flecs_switch_add(
ecs_switch_t *sw)
{
ecs_switch_node_t *node = ecs_vec_append_t(sw->hdrs.allocator,
&sw->nodes, ecs_switch_node_t);
uint64_t *value = ecs_vec_append_t(sw->hdrs.allocator,
&sw->values, uint64_t);
node->prev = -1;
node->next = -1;
*value = 0;
}
void flecs_switch_set_count(
ecs_switch_t *sw,
int32_t count)
{
int32_t old_count = ecs_vec_count(&sw->nodes);
if (old_count == count) {
return;
}
ecs_vec_set_count_t(sw->hdrs.allocator, &sw->nodes, ecs_switch_node_t, count);
ecs_vec_set_count_t(sw->hdrs.allocator, &sw->values, uint64_t, count);
ecs_switch_node_t *nodes = ecs_vec_first(&sw->nodes);
uint64_t *values = ecs_vec_first(&sw->values);
int32_t i;
for (i = old_count; i < count; i ++) {
ecs_switch_node_t *node = &nodes[i];
node->prev = -1;
node->next = -1;
values[i] = 0;
}
}
int32_t flecs_switch_count(
ecs_switch_t *sw)
{
ecs_assert(ecs_vec_count(&sw->values) == ecs_vec_count(&sw->nodes),
ECS_INTERNAL_ERROR, NULL);
return ecs_vec_count(&sw->values);
}
void flecs_switch_ensure(
ecs_switch_t *sw,
int32_t count)
{
int32_t old_count = ecs_vec_count(&sw->nodes);
if (old_count >= count) {
return;
}
flecs_switch_set_count(sw, count);
}
void flecs_switch_addn(
ecs_switch_t *sw,
int32_t count)
{
int32_t old_count = ecs_vec_count(&sw->nodes);
flecs_switch_set_count(sw, old_count + count);
}
void flecs_switch_set(
ecs_switch_t *sw,
int32_t element,
uint64_t value)
{
ecs_assert(sw != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(element < ecs_vec_count(&sw->nodes), ECS_INVALID_PARAMETER, NULL);
ecs_assert(element < ecs_vec_count(&sw->values), ECS_INVALID_PARAMETER, NULL);
ecs_assert(element >= 0, ECS_INVALID_PARAMETER, NULL);
uint64_t *values = ecs_vec_first(&sw->values);
uint64_t cur_value = values[element];
/* If the node is already assigned to the value, nothing to be done */
if (cur_value == value) {
return;
}
ecs_switch_node_t *nodes = ecs_vec_first(&sw->nodes);
ecs_switch_node_t *node = &nodes[element];
ecs_switch_header_t *dst_hdr = flecs_switch_ensure_header(sw, value);
ecs_switch_header_t *cur_hdr = flecs_switch_get_header(sw, cur_value);
flecs_switch_verify_nodes(cur_hdr, nodes);
flecs_switch_verify_nodes(dst_hdr, nodes);
/* If value is not 0, and dst_hdr is NULL, then this is not a valid value
* for this switch */
ecs_assert(dst_hdr != NULL || !value, ECS_INVALID_PARAMETER, NULL);
if (cur_hdr) {
flecs_switch_remove_node(cur_hdr, nodes, node, element);
}
/* Now update the node itself by adding it as the first node of dst */
node->prev = -1;
values[element] = value;
if (dst_hdr) {
node->next = dst_hdr->element;
/* Also update the dst header */
int32_t first = dst_hdr->element;
if (first != -1) {
ecs_assert(first >= 0, ECS_INTERNAL_ERROR, NULL);
ecs_switch_node_t *first_node = &nodes[first];
first_node->prev = element;
}
dst_hdr->element = element;
dst_hdr->count ++;
}
}
void flecs_switch_remove(
ecs_switch_t *sw,
int32_t elem)
{
ecs_assert(sw != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(elem < ecs_vec_count(&sw->nodes), ECS_INVALID_PARAMETER, NULL);
ecs_assert(elem >= 0, ECS_INVALID_PARAMETER, NULL);
uint64_t *values = ecs_vec_first(&sw->values);
uint64_t value = values[elem];
ecs_switch_node_t *nodes = ecs_vec_first(&sw->nodes);
ecs_switch_node_t *node = &nodes[elem];
/* If node is currently assigned to a case, remove it from the list */
if (value != 0) {
ecs_switch_header_t *hdr = flecs_switch_get_header(sw, value);
ecs_assert(hdr != NULL, ECS_INTERNAL_ERROR, NULL);
flecs_switch_verify_nodes(hdr, nodes);
flecs_switch_remove_node(hdr, nodes, node, elem);
}
int32_t last_elem = ecs_vec_count(&sw->nodes) - 1;
if (last_elem != elem) {
ecs_switch_node_t *last = ecs_vec_last_t(&sw->nodes, ecs_switch_node_t);
int32_t next = last->next, prev = last->prev;
if (next != -1) {
ecs_switch_node_t *n = &nodes[next];
n->prev = elem;
}
if (prev != -1) {
ecs_switch_node_t *n = &nodes[prev];
n->next = elem;
} else {
ecs_switch_header_t *hdr = flecs_switch_get_header(sw, values[last_elem]);
if (hdr && hdr->element != -1) {
ecs_assert(hdr->element == last_elem,
ECS_INTERNAL_ERROR, NULL);
hdr->element = elem;
}
}
}
/* Remove element from arrays */
ecs_vec_remove_t(&sw->nodes, ecs_switch_node_t, elem);
ecs_vec_remove_t(&sw->values, uint64_t, elem);
}
uint64_t flecs_switch_get(
const ecs_switch_t *sw,
int32_t element)
{
ecs_assert(sw != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(element < ecs_vec_count(&sw->nodes), ECS_INVALID_PARAMETER, NULL);
ecs_assert(element < ecs_vec_count(&sw->values), ECS_INVALID_PARAMETER, NULL);
ecs_assert(element >= 0, ECS_INVALID_PARAMETER, NULL);
uint64_t *values = ecs_vec_first(&sw->values);
return values[element];
}
ecs_vec_t* flecs_switch_values(
const ecs_switch_t *sw)
{
return ECS_CONST_CAST(ecs_vec_t*, &sw->values);
}
int32_t flecs_switch_case_count(
const ecs_switch_t *sw,
uint64_t value)
{
ecs_switch_header_t *hdr = flecs_switch_get_header(sw, value);
if (!hdr) {
return 0;
}
return hdr->count;
}
void flecs_switch_swap(
ecs_switch_t *sw,
int32_t elem_1,
int32_t elem_2)
{
uint64_t v1 = flecs_switch_get(sw, elem_1);
uint64_t v2 = flecs_switch_get(sw, elem_2);
flecs_switch_set(sw, elem_2, v1);
flecs_switch_set(sw, elem_1, v2);
}
int32_t flecs_switch_first(
const ecs_switch_t *sw,
uint64_t value)
{
ecs_assert(sw != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_switch_header_t *hdr = flecs_switch_get_header(sw, value);
if (!hdr) {
return -1;
}
return hdr->element;
}
int32_t flecs_switch_next(
const ecs_switch_t *sw,
int32_t element)
{
ecs_assert(sw != NULL, ECS_INVALID_PARAMETER, NULL);
ecs_assert(element < ecs_vec_count(&sw->nodes), ECS_INVALID_PARAMETER, NULL);
ecs_assert(element >= 0, ECS_INVALID_PARAMETER, NULL);
ecs_switch_node_t *nodes = ecs_vec_first(&sw->nodes);
return nodes[element].next;
}

View File

@@ -0,0 +1,296 @@
/**
* @file datastructures/vec.c
* @brief Vector with allocator support.
*/
#include "../private_api.h"
ecs_vec_t* ecs_vec_init(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size,
int32_t elem_count)
{
ecs_assert(size != 0, ECS_INVALID_PARAMETER, NULL);
v->array = NULL;
v->count = 0;
if (elem_count) {
if (allocator) {
v->array = flecs_alloc(allocator, size * elem_count);
} else {
v->array = ecs_os_malloc(size * elem_count);
}
}
v->size = elem_count;
#ifdef FLECS_SANITIZE
v->elem_size = size;
#endif
return v;
}
void ecs_vec_init_if(
ecs_vec_t *vec,
ecs_size_t size)
{
ecs_san_assert(!vec->elem_size || vec->elem_size == size, ECS_INVALID_PARAMETER, NULL);
(void)vec;
(void)size;
#ifdef FLECS_SANITIZE
if (!vec->elem_size) {
ecs_assert(vec->count == 0, ECS_INTERNAL_ERROR, NULL);
ecs_assert(vec->size == 0, ECS_INTERNAL_ERROR, NULL);
ecs_assert(vec->array == NULL, ECS_INTERNAL_ERROR, NULL);
vec->elem_size = size;
}
#endif
}
void ecs_vec_fini(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size)
{
if (v->array) {
ecs_san_assert(!size || size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
if (allocator) {
flecs_free(allocator, size * v->size, v->array);
} else {
ecs_os_free(v->array);
}
v->array = NULL;
v->count = 0;
v->size = 0;
}
}
ecs_vec_t* ecs_vec_reset(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size)
{
if (!v->size) {
ecs_vec_init(allocator, v, size, 0);
} else {
ecs_san_assert(size == v->elem_size, ECS_INTERNAL_ERROR, NULL);
ecs_vec_clear(v);
}
return v;
}
void ecs_vec_clear(
ecs_vec_t *vec)
{
vec->count = 0;
}
ecs_vec_t ecs_vec_copy(
ecs_allocator_t *allocator,
const ecs_vec_t *v,
ecs_size_t size)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
void *array;
if (allocator) {
array = flecs_dup(allocator, size * v->size, v->array);
} else {
array = ecs_os_memdup(v->array, size * v->size);
}
return (ecs_vec_t) {
.count = v->count,
.size = v->size,
.array = array
#ifdef FLECS_SANITIZE
, .elem_size = size
#endif
};
}
void ecs_vec_reclaim(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
int32_t count = v->count;
if (count < v->size) {
if (count) {
if (allocator) {
v->array = flecs_realloc(
allocator, size * count, size * v->size, v->array);
} else {
v->array = ecs_os_realloc(v->array, size * count);
}
v->size = count;
} else {
ecs_vec_fini(allocator, v, size);
}
}
}
void ecs_vec_set_size(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size,
int32_t elem_count)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
if (v->size != elem_count) {
if (elem_count < v->count) {
elem_count = v->count;
}
elem_count = flecs_next_pow_of_2(elem_count);
if (elem_count < 2) {
elem_count = 2;
}
if (elem_count != v->size) {
if (allocator) {
v->array = flecs_realloc(
allocator, size * elem_count, size * v->size, v->array);
} else {
v->array = ecs_os_realloc(v->array, size * elem_count);
}
v->size = elem_count;
}
}
}
void ecs_vec_set_min_size(
struct ecs_allocator_t *allocator,
ecs_vec_t *vec,
ecs_size_t size,
int32_t elem_count)
{
if (elem_count > vec->size) {
ecs_vec_set_size(allocator, vec, size, elem_count);
}
}
void ecs_vec_set_min_count(
struct ecs_allocator_t *allocator,
ecs_vec_t *vec,
ecs_size_t size,
int32_t elem_count)
{
ecs_vec_set_min_size(allocator, vec, size, elem_count);
if (vec->count < elem_count) {
vec->count = elem_count;
}
}
void ecs_vec_set_min_count_zeromem(
struct ecs_allocator_t *allocator,
ecs_vec_t *vec,
ecs_size_t size,
int32_t elem_count)
{
int32_t count = vec->count;
if (count < elem_count) {
ecs_vec_set_min_count(allocator, vec, size, elem_count);
ecs_os_memset(ECS_ELEM(vec->array, size, count), 0,
size * (elem_count - count));
}
}
void ecs_vec_set_count(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size,
int32_t elem_count)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
if (v->count != elem_count) {
if (v->size < elem_count) {
ecs_vec_set_size(allocator, v, size, elem_count);
}
v->count = elem_count;
}
}
void* ecs_vec_grow(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size,
int32_t elem_count)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(elem_count > 0, ECS_INTERNAL_ERROR, NULL);
int32_t count = v->count;
ecs_vec_set_count(allocator, v, size, count + elem_count);
return ECS_ELEM(v->array, size, count);
}
void* ecs_vec_append(
ecs_allocator_t *allocator,
ecs_vec_t *v,
ecs_size_t size)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
int32_t count = v->count;
if (v->size == count) {
ecs_vec_set_size(allocator, v, size, count + 1);
}
v->count = count + 1;
return ECS_ELEM(v->array, size, count);
}
void ecs_vec_remove(
ecs_vec_t *v,
ecs_size_t size,
int32_t index)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(index < v->count, ECS_OUT_OF_RANGE, NULL);
if (index == --v->count) {
return;
}
ecs_os_memcpy(
ECS_ELEM(v->array, size, index),
ECS_ELEM(v->array, size, v->count),
size);
}
void ecs_vec_remove_last(
ecs_vec_t *v)
{
v->count --;
}
int32_t ecs_vec_count(
const ecs_vec_t *v)
{
return v->count;
}
int32_t ecs_vec_size(
const ecs_vec_t *v)
{
return v->size;
}
void* ecs_vec_get(
const ecs_vec_t *v,
ecs_size_t size,
int32_t index)
{
ecs_san_assert(size == v->elem_size, ECS_INVALID_PARAMETER, NULL);
ecs_assert(index < v->count, ECS_OUT_OF_RANGE, NULL);
return ECS_ELEM(v->array, size, index);
}
void* ecs_vec_last(
const ecs_vec_t *v,
ecs_size_t size)
{
ecs_san_assert(!v->elem_size || size == v->elem_size,
ECS_INVALID_PARAMETER, NULL);
return ECS_ELEM(v->array, size, v->count - 1);
}
void* ecs_vec_first(
const ecs_vec_t *v)
{
return v->array;
}