Synchronize most shared template code with Godot 4.4

(cherry picked from commit 1edfca295b)
This commit is contained in:
David Snopek
2025-02-25 09:29:22 -06:00
parent 67f6f96a9b
commit 0a39efe3d3
16 changed files with 725 additions and 369 deletions

View File

@@ -37,8 +37,10 @@
#include <godot_cpp/templates/safe_refcount.hpp>
#include <cstring>
#include <initializer_list>
#include <new>
#include <type_traits>
#include <utility>
namespace godot {
@@ -166,13 +168,25 @@ private:
return *out;
}
void _unref(void *p_data);
// Decrements the reference count. Deallocates the backing buffer if needed.
// After this function, _ptr is guaranteed to be NULL.
void _unref();
void _ref(const CowData *p_from);
void _ref(const CowData &p_from);
USize _copy_on_write();
Error _realloc(Size p_alloc_size);
public:
void operator=(const CowData<T> &p_from) { _ref(p_from); }
void operator=(CowData<T> &&p_from) {
if (_ptr == p_from._ptr) {
return;
}
_unref();
_ptr = p_from._ptr;
p_from._ptr = nullptr;
}
_FORCE_INLINE_ T *ptrw() {
_copy_on_write();
@@ -221,19 +235,22 @@ public:
T *p = ptrw();
Size len = size();
for (Size i = p_index; i < len - 1; i++) {
p[i] = p[i + 1];
p[i] = std::move(p[i + 1]);
}
resize(len - 1);
}
Error insert(Size p_pos, const T &p_val) {
ERR_FAIL_INDEX_V(p_pos, size() + 1, ERR_INVALID_PARAMETER);
resize(size() + 1);
for (Size i = (size() - 1); i > p_pos; i--) {
set(i, get(i - 1));
Size new_size = size() + 1;
ERR_FAIL_INDEX_V(p_pos, new_size, ERR_INVALID_PARAMETER);
Error err = resize(new_size);
ERR_FAIL_COND_V(err, err);
T *p = ptrw();
for (Size i = new_size - 1; i > p_pos; i--) {
p[i] = std::move(p[i - 1]);
}
set(p_pos, p_val);
p[p_pos] = p_val;
return OK;
}
@@ -243,35 +260,47 @@ public:
Size count(const T &p_val) const;
_FORCE_INLINE_ CowData() {}
_FORCE_INLINE_ ~CowData();
_FORCE_INLINE_ CowData(CowData<T> &p_from) { _ref(p_from); };
_FORCE_INLINE_ ~CowData() { _unref(); }
_FORCE_INLINE_ CowData(std::initializer_list<T> p_init);
_FORCE_INLINE_ CowData(const CowData<T> &p_from) { _ref(p_from); }
_FORCE_INLINE_ CowData(CowData<T> &&p_from) {
_ptr = p_from._ptr;
p_from._ptr = nullptr;
}
};
template <typename T>
void CowData<T>::_unref(void *p_data) {
if (!p_data) {
void CowData<T>::_unref() {
if (!_ptr) {
return;
}
SafeNumeric<USize> *refc = _get_refcount();
if (refc->decrement() > 0) {
return; // still in use
// Data is still in use elsewhere.
_ptr = nullptr;
return;
}
// clean up
// Clean up.
// First, invalidate our own reference.
// NOTE: It is required to do so immediately because it must not be observable outside of this
// function after refcount has already been reduced to 0.
// WARNING: It must be done before calling the destructors, because one of them may otherwise
// observe it through a reference to us. In this case, it may try to access the buffer,
// which is illegal after some of the elements in it have already been destructed, and
// may lead to a segmentation fault.
USize current_size = *_get_size();
T *prev_ptr = _ptr;
_ptr = nullptr;
if constexpr (!std::is_trivially_destructible_v<T>) {
USize *count = _get_size();
T *data = (T *)(count + 1);
for (USize i = 0; i < *count; ++i) {
// call destructors
data[i].~T();
for (USize i = 0; i < current_size; ++i) {
prev_ptr[i].~T();
}
}
// free mem
Memory::free_static(((uint8_t *)p_data) - DATA_OFFSET, false);
Memory::free_static((uint8_t *)prev_ptr - DATA_OFFSET, false);
}
template <typename T>
@@ -306,7 +335,7 @@ typename CowData<T>::USize CowData<T>::_copy_on_write() {
}
}
_unref(_ptr);
_unref();
_ptr = _data_ptr;
rc = 1;
@@ -326,14 +355,13 @@ Error CowData<T>::resize(Size p_size) {
}
if (p_size == 0) {
// wants to clean up
_unref(_ptr);
_ptr = nullptr;
// Wants to clean up.
_unref(); // Resets _ptr to nullptr.
return OK;
}
// possibly changing size, copy on write
USize rc = _copy_on_write();
_copy_on_write();
USize current_alloc_size = _get_alloc_size(current_size);
USize alloc_size;
@@ -354,16 +382,12 @@ Error CowData<T>::resize(Size p_size) {
*(_size_ptr) = 0; //size, currently none
_ptr = _data_ptr;
} else {
uint8_t *mem_new = (uint8_t *)Memory::realloc_static(((uint8_t *)_ptr) - DATA_OFFSET, alloc_size + DATA_OFFSET, false);
ERR_FAIL_NULL_V(mem_new, ERR_OUT_OF_MEMORY);
SafeNumeric<USize> *_refc_ptr = _get_refcount_ptr(mem_new);
T *_data_ptr = _get_data_ptr(mem_new);
new (_refc_ptr) SafeNumeric<USize>(rc); //refcount
_ptr = _data_ptr;
const Error error = _realloc(alloc_size);
if (error) {
return error;
}
}
}
@@ -389,15 +413,10 @@ Error CowData<T>::resize(Size p_size) {
}
if (alloc_size != current_alloc_size) {
uint8_t *mem_new = (uint8_t *)Memory::realloc_static(((uint8_t *)_ptr) - DATA_OFFSET, alloc_size + DATA_OFFSET, false);
ERR_FAIL_NULL_V(mem_new, ERR_OUT_OF_MEMORY);
SafeNumeric<USize> *_refc_ptr = _get_refcount_ptr(mem_new);
T *_data_ptr = _get_data_ptr(mem_new);
new (_refc_ptr) SafeNumeric<USize>(rc); //refcount
_ptr = _data_ptr;
const Error error = _realloc(alloc_size);
if (error) {
return error;
}
}
*_get_size() = p_size;
@@ -406,6 +425,21 @@ Error CowData<T>::resize(Size p_size) {
return OK;
}
template <typename T>
Error CowData<T>::_realloc(Size p_alloc_size) {
uint8_t *mem_new = (uint8_t *)Memory::realloc_static(((uint8_t *)_ptr) - DATA_OFFSET, p_alloc_size + DATA_OFFSET, false);
ERR_FAIL_NULL_V(mem_new, ERR_OUT_OF_MEMORY);
SafeNumeric<USize> *_refc_ptr = _get_refcount_ptr(mem_new);
T *_data_ptr = _get_data_ptr(mem_new);
// If we realloc, we're guaranteed to be the only reference.
new (_refc_ptr) SafeNumeric<USize>(1);
_ptr = _data_ptr;
return OK;
}
template <typename T>
typename CowData<T>::Size CowData<T>::find(const T &p_val, Size p_from) const {
Size ret = -1;
@@ -465,11 +499,10 @@ void CowData<T>::_ref(const CowData &p_from) {
return; // self assign, do nothing.
}
_unref(_ptr);
_ptr = nullptr;
_unref(); // Resets _ptr to nullptr.
if (!p_from._ptr) {
return; // nothing to do
return; //nothing to do
}
if (p_from._get_refcount()->conditional_increment() > 0) { // could reference
@@ -478,8 +511,16 @@ void CowData<T>::_ref(const CowData &p_from) {
}
template <typename T>
CowData<T>::~CowData() {
_unref(_ptr);
CowData<T>::CowData(std::initializer_list<T> p_init) {
Error err = resize(p_init.size());
if (err != OK) {
return;
}
Size i = 0;
for (const T &element : p_init) {
set(i++, element);
}
}
#if defined(__GNUC__) && !defined(__clang__)

View File

@@ -61,15 +61,17 @@ struct HashMapElement {
data(p_key, p_value) {}
};
bool _hashmap_variant_less_than(const Variant &p_left, const Variant &p_right);
template <typename TKey, typename TValue,
typename Hasher = HashMapHasherDefault,
typename Comparator = HashMapComparatorDefault<TKey>,
typename Allocator = DefaultTypedAllocator<HashMapElement<TKey, TValue>>>
class HashMap {
public:
const uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
const float MAX_OCCUPANCY = 0.75;
const uint32_t EMPTY_HASH = 0;
static constexpr uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
static constexpr float MAX_OCCUPANCY = 0.75;
static constexpr uint32_t EMPTY_HASH = 0;
private:
Allocator element_alloc;
@@ -91,19 +93,20 @@ private:
return hash;
}
_FORCE_INLINE_ uint32_t _get_probe_length(uint32_t p_pos, uint32_t p_hash, uint32_t p_capacity) const {
uint32_t original_pos = p_hash % p_capacity;
return (p_pos - original_pos + p_capacity) % p_capacity;
static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
return fastmod(p_pos - original_pos + p_capacity, p_capacity_inv, p_capacity);
}
bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
if (elements == nullptr) {
if (elements == nullptr || num_elements == 0) {
return false; // Failed lookups, no elements
}
uint32_t capacity = hash_table_size_primes[capacity_index];
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = _hash(p_key);
uint32_t pos = hash % capacity;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
uint32_t distance = 0;
while (true) {
@@ -111,7 +114,7 @@ private:
return false;
}
if (distance > _get_probe_length(pos, hashes[pos], capacity)) {
if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
return false;
}
@@ -120,17 +123,18 @@ private:
return true;
}
pos = (pos + 1) % capacity;
pos = fastmod((pos + 1), capacity_inv, capacity);
distance++;
}
}
void _insert_with_hash(uint32_t p_hash, HashMapElement<TKey, TValue> *p_value) {
uint32_t capacity = hash_table_size_primes[capacity_index];
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = p_hash;
HashMapElement<TKey, TValue> *value = p_value;
uint32_t distance = 0;
uint32_t pos = hash % capacity;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
while (true) {
if (hashes[pos] == EMPTY_HASH) {
@@ -143,14 +147,14 @@ private:
}
// Not an empty slot, let's check the probing length of the existing one.
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity);
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
if (existing_probe_len < distance) {
SWAP(hash, hashes[pos]);
SWAP(value, elements[pos]);
distance = existing_probe_len;
}
pos = (pos + 1) % capacity;
pos = fastmod((pos + 1), capacity_inv, capacity);
distance++;
}
}
@@ -250,7 +254,7 @@ public:
}
void clear() {
if (elements == nullptr) {
if (elements == nullptr || num_elements == 0) {
return;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
@@ -269,6 +273,47 @@ public:
num_elements = 0;
}
void sort() {
if (elements == nullptr || num_elements < 2) {
return; // An empty or single element HashMap is already sorted.
}
// Use insertion sort because we want this operation to be fast for the
// common case where the input is already sorted or nearly sorted.
HashMapElement<TKey, TValue> *inserting = head_element->next;
while (inserting != nullptr) {
HashMapElement<TKey, TValue> *after = nullptr;
for (HashMapElement<TKey, TValue> *current = inserting->prev; current != nullptr; current = current->prev) {
if (_hashmap_variant_less_than(inserting->data.key, current->data.key)) {
after = current;
} else {
break;
}
}
HashMapElement<TKey, TValue> *next = inserting->next;
if (after != nullptr) {
// Modify the elements around `inserting` to remove it from its current position.
inserting->prev->next = next;
if (next == nullptr) {
tail_element = inserting->prev;
} else {
next->prev = inserting->prev;
}
// Modify `before` and `after` to insert `inserting` between them.
HashMapElement<TKey, TValue> *before = after->prev;
if (before == nullptr) {
head_element = inserting;
} else {
before->next = inserting;
}
after->prev = inserting;
// Point `inserting` to its new surroundings.
inserting->prev = before;
inserting->next = after;
}
inserting = next;
}
}
TValue &get(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
@@ -316,13 +361,14 @@ public:
return false;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
uint32_t next_pos = (pos + 1) % capacity;
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity) != 0) {
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod((pos + 1), capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
SWAP(hashes[next_pos], hashes[pos]);
SWAP(elements[next_pos], elements[pos]);
pos = next_pos;
next_pos = (pos + 1) % capacity;
next_pos = fastmod((pos + 1), capacity_inv, capacity);
}
hashes[pos] = EMPTY_HASH;
@@ -350,6 +396,40 @@ public:
return true;
}
// Replace the key of an entry in-place, without invalidating iterators or changing the entries position during iteration.
// p_old_key must exist in the map and p_new_key must not, unless it is equal to p_old_key.
bool replace_key(const TKey &p_old_key, const TKey &p_new_key) {
if (p_old_key == p_new_key) {
return true;
}
uint32_t pos = 0;
ERR_FAIL_COND_V(_lookup_pos(p_new_key, pos), false);
ERR_FAIL_COND_V(!_lookup_pos(p_old_key, pos), false);
HashMapElement<TKey, TValue> *element = elements[pos];
// Delete the old entries in hashes and elements.
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod((pos + 1), capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
SWAP(hashes[next_pos], hashes[pos]);
SWAP(elements[next_pos], elements[pos]);
pos = next_pos;
next_pos = fastmod((pos + 1), capacity_inv, capacity);
}
hashes[pos] = EMPTY_HASH;
elements[pos] = nullptr;
// _insert_with_hash will increment this again.
num_elements--;
// Update the HashMapElement with the new key and reinsert it.
const_cast<TKey &>(element->data.key) = p_new_key;
uint32_t hash = _hash(p_new_key);
_insert_with_hash(hash, element);
return true;
}
// Reserves space for a number of elements, useful to avoid many resizes and rehashes.
// If adding a known (possibly large) number of elements at once, must be larger than old capacity.
void reserve(uint32_t p_new_capacity) {
@@ -560,6 +640,13 @@ public:
capacity_index = MIN_CAPACITY_INDEX;
}
HashMap(std::initializer_list<KeyValue<TKey, TValue>> p_init) {
reserve(p_init.size());
for (const KeyValue<TKey, TValue> &E : p_init) {
insert(E.key, E.value);
}
}
uint32_t debug_get_hash(uint32_t p_index) {
if (num_elements == 0) {
return 0;

View File

@@ -75,19 +75,20 @@ private:
return hash;
}
_FORCE_INLINE_ uint32_t _get_probe_length(uint32_t p_pos, uint32_t p_hash, uint32_t p_capacity) const {
uint32_t original_pos = p_hash % p_capacity;
return (p_pos - original_pos + p_capacity) % p_capacity;
static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
return fastmod(p_pos - original_pos + p_capacity, p_capacity_inv, p_capacity);
}
bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
if (keys == nullptr) {
if (keys == nullptr || num_elements == 0) {
return false; // Failed lookups, no elements
}
uint32_t capacity = hash_table_size_primes[capacity_index];
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = _hash(p_key);
uint32_t pos = hash % capacity;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
uint32_t distance = 0;
while (true) {
@@ -95,7 +96,7 @@ private:
return false;
}
if (distance > _get_probe_length(pos, hashes[pos], capacity)) {
if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
return false;
}
@@ -104,17 +105,18 @@ private:
return true;
}
pos = (pos + 1) % capacity;
pos = fastmod(pos + 1, capacity_inv, capacity);
distance++;
}
}
uint32_t _insert_with_hash(uint32_t p_hash, uint32_t p_index) {
uint32_t capacity = hash_table_size_primes[capacity_index];
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = p_hash;
uint32_t index = p_index;
uint32_t distance = 0;
uint32_t pos = hash % capacity;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
while (true) {
if (hashes[pos] == EMPTY_HASH) {
@@ -125,7 +127,7 @@ private:
}
// Not an empty slot, let's check the probing length of the existing one.
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity);
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
if (existing_probe_len < distance) {
key_to_hash[index] = pos;
SWAP(hash, hashes[pos]);
@@ -133,7 +135,7 @@ private:
distance = existing_probe_len;
}
pos = (pos + 1) % capacity;
pos = fastmod(pos + 1, capacity_inv, capacity);
distance++;
}
}
@@ -236,7 +238,7 @@ public:
}
void clear() {
if (keys == nullptr) {
if (keys == nullptr || num_elements == 0) {
return;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
@@ -264,11 +266,12 @@ public:
}
uint32_t key_pos = pos;
pos = key_to_hash[pos]; // make hash pos
pos = key_to_hash[pos]; //make hash pos
uint32_t capacity = hash_table_size_primes[capacity_index];
uint32_t next_pos = (pos + 1) % capacity;
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity) != 0) {
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod(pos + 1, capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
uint32_t kpos = hash_to_key[pos];
uint32_t kpos_next = hash_to_key[next_pos];
SWAP(key_to_hash[kpos], key_to_hash[kpos_next]);
@@ -276,7 +279,7 @@ public:
SWAP(hash_to_key[next_pos], hash_to_key[pos]);
pos = next_pos;
next_pos = (pos + 1) % capacity;
next_pos = fastmod(pos + 1, capacity_inv, capacity);
}
hashes[pos] = EMPTY_HASH;
@@ -443,6 +446,13 @@ public:
capacity_index = MIN_CAPACITY_INDEX;
}
HashSet(std::initializer_list<TKey> p_init) {
reserve(p_init.size());
for (const TKey &E : p_init) {
insert(E);
}
}
void reset() {
clear();

View File

@@ -66,10 +66,11 @@ namespace godot {
static _FORCE_INLINE_ uint32_t hash_djb2(const char *p_cstr) {
const unsigned char *chr = (const unsigned char *)p_cstr;
uint32_t hash = 5381;
uint32_t c;
uint32_t c = *chr++;
while ((c = *chr++)) {
while (c) {
hash = ((hash << 5) + hash) ^ c; /* hash * 33 ^ c */
c = *chr++;
}
return hash;
@@ -107,6 +108,16 @@ static _FORCE_INLINE_ uint32_t hash_one_uint64(const uint64_t p_int) {
return uint32_t(v);
}
static _FORCE_INLINE_ uint64_t hash64_murmur3_64(uint64_t key, uint64_t seed) {
key ^= seed;
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
return key;
}
#define HASH_MURMUR3_SEED 0x7F07C65
// Murmurhash3 32-bit version.
// All MurmurHash versions are public domain software, and the author disclaims all copyright to their code.
@@ -227,7 +238,7 @@ static _FORCE_INLINE_ uint32_t hash_murmur3_buffer(const void *key, int length,
k1 = hash_rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
}
};
// Finalize with additional bit mixing.
h1 ^= length;
@@ -310,40 +321,41 @@ struct HashMapHasherDefault {
static _FORCE_INLINE_ uint32_t hash(const String &p_string) { return p_string.hash(); }
static _FORCE_INLINE_ uint32_t hash(const char *p_cstr) { return hash_djb2(p_cstr); }
static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return hash_fmix32(p_wchar); }
static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return hash_fmix32(p_uchar); }
static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return hash_fmix32(p_uchar); }
static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return hash_fmix32(uint32_t(p_wchar)); }
static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return hash_fmix32(uint32_t(p_uchar)); }
static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return hash_fmix32(uint32_t(p_uchar)); }
static _FORCE_INLINE_ uint32_t hash(const RID &p_rid) { return hash_one_uint64(p_rid.get_id()); }
static _FORCE_INLINE_ uint32_t hash(const CharString &p_char_string) { return hash_djb2(p_char_string.get_data()); }
static _FORCE_INLINE_ uint32_t hash(const StringName &p_string_name) { return p_string_name.hash(); }
static _FORCE_INLINE_ uint32_t hash(const NodePath &p_path) { return p_path.hash(); }
static _FORCE_INLINE_ uint32_t hash(const ObjectID &p_id) { return hash_one_uint64(p_id); }
static _FORCE_INLINE_ uint32_t hash(const uint64_t p_int) { return hash_one_uint64(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash_one_uint64(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash_one_uint64(uint64_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const float p_float) { return hash_murmur3_one_float(p_float); }
static _FORCE_INLINE_ uint32_t hash(const double p_double) { return hash_murmur3_one_double(p_double); }
static _FORCE_INLINE_ uint32_t hash(const uint32_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const Vector2i &p_vec) {
uint32_t h = hash_murmur3_one_32(p_vec.x);
h = hash_murmur3_one_32(p_vec.y, h);
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector3i &p_vec) {
uint32_t h = hash_murmur3_one_32(p_vec.x);
h = hash_murmur3_one_32(p_vec.y, h);
h = hash_murmur3_one_32(p_vec.z, h);
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
h = hash_murmur3_one_32(uint32_t(p_vec.z), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector4i &p_vec) {
uint32_t h = hash_murmur3_one_32(p_vec.x);
h = hash_murmur3_one_32(p_vec.y, h);
h = hash_murmur3_one_32(p_vec.z, h);
h = hash_murmur3_one_32(p_vec.w, h);
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
h = hash_murmur3_one_32(uint32_t(p_vec.z), h);
h = hash_murmur3_one_32(uint32_t(p_vec.w), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector2 &p_vec) {
@@ -365,10 +377,10 @@ struct HashMapHasherDefault {
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Rect2i &p_rect) {
uint32_t h = hash_murmur3_one_32(p_rect.position.x);
h = hash_murmur3_one_32(p_rect.position.y, h);
h = hash_murmur3_one_32(p_rect.size.x, h);
h = hash_murmur3_one_32(p_rect.size.y, h);
uint32_t h = hash_murmur3_one_32(uint32_t(p_rect.position.x));
h = hash_murmur3_one_32(uint32_t(p_rect.position.y), h);
h = hash_murmur3_one_32(uint32_t(p_rect.size.x), h);
h = hash_murmur3_one_32(uint32_t(p_rect.size.y), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Rect2 &p_rect) {
@@ -389,6 +401,19 @@ struct HashMapHasherDefault {
}
};
struct HashHasher {
static _FORCE_INLINE_ uint32_t hash(const int32_t hash) { return hash; }
static _FORCE_INLINE_ uint32_t hash(const uint32_t hash) { return hash; }
static _FORCE_INLINE_ uint64_t hash(const int64_t hash) { return hash; }
static _FORCE_INLINE_ uint64_t hash(const uint64_t hash) { return hash; }
};
// TODO: Fold this into HashMapHasherDefault once C++20 concepts are allowed
template <typename T>
struct HashableHasher {
static _FORCE_INLINE_ uint32_t hash(const T &hashable) { return hashable.hash(); }
};
template <typename T>
struct HashMapComparatorDefault {
static bool compare(const T &p_lhs, const T &p_rhs) {
@@ -410,6 +435,13 @@ struct HashMapComparatorDefault<double> {
}
};
template <>
struct HashMapComparatorDefault<Color> {
static bool compare(const Color &p_lhs, const Color &p_rhs) {
return ((p_lhs.r == p_rhs.r) || (Math::is_nan(p_lhs.r) && Math::is_nan(p_rhs.r))) && ((p_lhs.g == p_rhs.g) || (Math::is_nan(p_lhs.g) && Math::is_nan(p_rhs.g))) && ((p_lhs.b == p_rhs.b) || (Math::is_nan(p_lhs.b) && Math::is_nan(p_rhs.b))) && ((p_lhs.a == p_rhs.a) || (Math::is_nan(p_lhs.a) && Math::is_nan(p_rhs.a)));
}
};
template <>
struct HashMapComparatorDefault<Vector2> {
static bool compare(const Vector2 &p_lhs, const Vector2 &p_rhs) {
@@ -424,9 +456,90 @@ struct HashMapComparatorDefault<Vector3> {
}
};
template <>
struct HashMapComparatorDefault<Vector4> {
static bool compare(const Vector4 &p_lhs, const Vector4 &p_rhs) {
return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y))) && ((p_lhs.z == p_rhs.z) || (Math::is_nan(p_lhs.z) && Math::is_nan(p_rhs.z))) && ((p_lhs.w == p_rhs.w) || (Math::is_nan(p_lhs.w) && Math::is_nan(p_rhs.w)));
}
};
template <>
struct HashMapComparatorDefault<Rect2> {
static bool compare(const Rect2 &p_lhs, const Rect2 &p_rhs) {
return HashMapComparatorDefault<Vector2>().compare(p_lhs.position, p_rhs.position) && HashMapComparatorDefault<Vector2>().compare(p_lhs.size, p_rhs.size);
}
};
template <>
struct HashMapComparatorDefault<AABB> {
static bool compare(const AABB &p_lhs, const AABB &p_rhs) {
return HashMapComparatorDefault<Vector3>().compare(p_lhs.position, p_rhs.position) && HashMapComparatorDefault<Vector3>().compare(p_lhs.size, p_rhs.size);
}
};
template <>
struct HashMapComparatorDefault<Plane> {
static bool compare(const Plane &p_lhs, const Plane &p_rhs) {
return HashMapComparatorDefault<Vector3>().compare(p_lhs.normal, p_rhs.normal) && ((p_lhs.d == p_rhs.d) || (Math::is_nan(p_lhs.d) && Math::is_nan(p_rhs.d)));
}
};
template <>
struct HashMapComparatorDefault<Transform2D> {
static bool compare(const Transform2D &p_lhs, const Transform2D &p_rhs) {
for (int i = 0; i < 3; ++i) {
if (!HashMapComparatorDefault<Vector2>().compare(p_lhs.columns[i], p_rhs.columns[i])) {
return false;
}
}
return true;
}
};
template <>
struct HashMapComparatorDefault<Basis> {
static bool compare(const Basis &p_lhs, const Basis &p_rhs) {
for (int i = 0; i < 3; ++i) {
if (!HashMapComparatorDefault<Vector3>().compare(p_lhs.rows[i], p_rhs.rows[i])) {
return false;
}
}
return true;
}
};
template <>
struct HashMapComparatorDefault<Transform3D> {
static bool compare(const Transform3D &p_lhs, const Transform3D &p_rhs) {
return HashMapComparatorDefault<Basis>().compare(p_lhs.basis, p_rhs.basis) && HashMapComparatorDefault<Vector3>().compare(p_lhs.origin, p_rhs.origin);
}
};
template <>
struct HashMapComparatorDefault<Projection> {
static bool compare(const Projection &p_lhs, const Projection &p_rhs) {
for (int i = 0; i < 4; ++i) {
if (!HashMapComparatorDefault<Vector4>().compare(p_lhs.columns[i], p_rhs.columns[i])) {
return false;
}
}
return true;
}
};
template <>
struct HashMapComparatorDefault<Quaternion> {
static bool compare(const Quaternion &p_lhs, const Quaternion &p_rhs) {
return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y))) && ((p_lhs.z == p_rhs.z) || (Math::is_nan(p_lhs.z) && Math::is_nan(p_rhs.z))) && ((p_lhs.w == p_rhs.w) || (Math::is_nan(p_lhs.w) && Math::is_nan(p_rhs.w)));
}
};
constexpr uint32_t HASH_TABLE_SIZE_MAX = 29;
const uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
inline constexpr uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
5,
13,
23,
@@ -459,7 +572,7 @@ const uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
};
// Computed with elem_i = UINT64_C (0 x FFFFFFFF FFFFFFFF ) / d_i + 1, where d_i is the i-th element of the above array.
const uint64_t hash_table_size_primes_inv[HASH_TABLE_SIZE_MAX] = {
inline constexpr uint64_t hash_table_size_primes_inv[HASH_TABLE_SIZE_MAX] = {
3689348814741910324,
1418980313362273202,
802032351030850071,

View File

@@ -34,6 +34,8 @@
#include <godot_cpp/core/memory.hpp>
#include <godot_cpp/templates/sort_array.hpp>
#include <initializer_list>
/**
* Generic Templatized Linked List Implementation.
* The implementation differs from the STL one because
@@ -133,6 +135,8 @@ public:
data->erase(this);
}
void transfer_to_back(List<T, A> *p_dst_list);
_FORCE_INLINE_ Element() {}
};
@@ -223,7 +227,7 @@ private:
Element *last = nullptr;
int size_cache = 0;
bool erase(const Element *p_I) {
bool erase(Element *p_I) {
ERR_FAIL_NULL_V(p_I, false);
ERR_FAIL_COND_V(p_I->data != this, false);
@@ -243,7 +247,7 @@ private:
p_I->next_ptr->prev_ptr = p_I->prev_ptr;
}
memdelete_allocator<Element, A>(const_cast<Element *>(p_I));
memdelete_allocator<Element, A>(p_I);
size_cache--;
return true;
@@ -429,7 +433,7 @@ public:
/**
* erase an element in the list, by iterator pointing to it. Return true if it was found/erased.
*/
bool erase(const Element *p_I) {
bool erase(Element *p_I) {
if (_data && p_I) {
bool ret = _data->erase(p_I);
@@ -521,10 +525,14 @@ public:
it = it->next();
}
}
void operator=(List &&p_list) {
if (unlikely(this == &p_list)) {
return;
}
// Index operator, kept for compatibility.
_FORCE_INLINE_ T &operator[](int p_index) {
return get(p_index);
clear();
_data = p_list._data;
p_list._data = nullptr;
}
// Random access to elements, use with care,
@@ -542,11 +550,6 @@ public:
return I->get();
}
// Index operator, kept for compatibility.
_FORCE_INLINE_ const T &operator[](int p_index) const {
return get(p_index);
}
// Random access to elements, use with care,
// do not use for iteration.
const T &get(int p_index) const {
@@ -720,8 +723,8 @@ public:
template <typename C>
void sort_custom() {
// this version uses auxiliary memory for speed.
// if you don't want to use auxiliary memory, use the in_place version
//this version uses auxiliary memory for speed.
//if you don't want to use auxiliary memory, use the in_place version
int s = size();
if (s < 2) {
@@ -769,9 +772,19 @@ public:
it = it->next();
}
}
List(List &&p_list) {
_data = p_list._data;
p_list._data = nullptr;
}
List() {}
List(std::initializer_list<T> p_init) {
for (const T &E : p_init) {
push_back(E);
}
}
~List() {
clear();
if (_data) {
@@ -781,4 +794,41 @@ public:
}
};
template <typename T, typename A>
void List<T, A>::Element::transfer_to_back(List<T, A> *p_dst_list) {
// Detach from current.
if (data->first == this) {
data->first = data->first->next_ptr;
}
if (data->last == this) {
data->last = data->last->prev_ptr;
}
if (prev_ptr) {
prev_ptr->next_ptr = next_ptr;
}
if (next_ptr) {
next_ptr->prev_ptr = prev_ptr;
}
data->size_cache--;
// Attach to the back of the new one.
if (!p_dst_list->_data) {
p_dst_list->_data = memnew_allocator(_Data, A);
p_dst_list->_data->first = this;
p_dst_list->_data->last = nullptr;
p_dst_list->_data->size_cache = 0;
prev_ptr = nullptr;
} else {
p_dst_list->_data->last->next_ptr = this;
prev_ptr = p_dst_list->_data->last;
}
p_dst_list->_data->last = this;
next_ptr = nullptr;
data = p_dst_list->_data;
p_dst_list->_data->size_cache++;
}
} // namespace godot

View File

@@ -58,21 +58,18 @@ public:
return data;
}
// Must take a copy instead of a reference (see GH-31736).
_FORCE_INLINE_ void push_back(T p_elem) {
if (unlikely(count == capacity)) {
if (capacity == 0) {
capacity = 1;
} else {
capacity <<= 1;
}
capacity = tight ? (capacity + 1) : MAX((U)1, capacity << 1);
data = (T *)memrealloc(data, capacity * sizeof(T));
CRASH_COND_MSG(!data, "Out of memory");
}
if constexpr (!std::is_trivially_constructible<T>::value && !force_trivial) {
if constexpr (!std::is_trivially_constructible_v<T> && !force_trivial) {
memnew_placement(&data[count++], T(p_elem));
} else {
data[count++] = p_elem;
data[count++] = std::move(p_elem);
}
}
@@ -80,31 +77,49 @@ public:
ERR_FAIL_UNSIGNED_INDEX(p_index, count);
count--;
for (U i = p_index; i < count; i++) {
data[i] = data[i + 1];
data[i] = std::move(data[i + 1]);
}
if constexpr (!std::is_trivially_destructible<T>::value && !force_trivial) {
if constexpr (!std::is_trivially_destructible_v<T> && !force_trivial) {
data[count].~T();
}
}
/// Removes the item copying the last value into the position of the one to
/// remove. It's generally faster than `remove`.
/// remove. It's generally faster than `remove_at`.
void remove_at_unordered(U p_index) {
ERR_FAIL_INDEX(p_index, count);
count--;
if (count > p_index) {
data[p_index] = data[count];
data[p_index] = std::move(data[count]);
}
if constexpr (!std::is_trivially_destructible<T>::value && !force_trivial) {
if constexpr (!std::is_trivially_destructible_v<T> && !force_trivial) {
data[count].~T();
}
}
void erase(const T &p_val) {
_FORCE_INLINE_ bool erase(const T &p_val) {
int64_t idx = find(p_val);
if (idx >= 0) {
remove_at(idx);
return true;
}
return false;
}
U erase_multiple_unordered(const T &p_val) {
U from = 0;
U occurrences = 0;
while (true) {
int64_t idx = find(p_val, from);
if (idx == -1) {
break;
}
remove_at_unordered(idx);
from = idx;
occurrences++;
}
return occurrences;
}
void invert() {
@@ -136,7 +151,7 @@ public:
_FORCE_INLINE_ U size() const { return count; }
void resize(U p_size) {
if (p_size < count) {
if constexpr (!std::is_trivially_destructible<T>::value && !force_trivial) {
if constexpr (!std::is_trivially_destructible_v<T> && !force_trivial) {
for (U i = p_size; i < count; i++) {
data[i].~T();
}
@@ -144,16 +159,11 @@ public:
count = p_size;
} else if (p_size > count) {
if (unlikely(p_size > capacity)) {
if (capacity == 0) {
capacity = 1;
}
while (capacity < p_size) {
capacity <<= 1;
}
capacity = tight ? p_size : nearest_power_of_2_templated(p_size);
data = (T *)memrealloc(data, capacity * sizeof(T));
CRASH_COND_MSG(!data, "Out of memory");
}
if constexpr (!std::is_trivially_constructible<T>::value && !force_trivial) {
if constexpr (!std::is_trivially_constructible_v<T> && !force_trivial) {
for (U i = count; i < p_size; i++) {
memnew_placement(&data[i], T);
}
@@ -237,13 +247,13 @@ public:
void insert(U p_pos, T p_val) {
ERR_FAIL_UNSIGNED_INDEX(p_pos, count + 1);
if (p_pos == count) {
push_back(p_val);
push_back(std::move(p_val));
} else {
resize(count + 1);
for (U i = count - 1; i > p_pos; i--) {
data[i] = data[i - 1];
data[i] = std::move(data[i - 1]);
}
data[p_pos] = p_val;
data[p_pos] = std::move(p_val);
}
}
@@ -287,9 +297,17 @@ public:
operator Vector<T>() const {
Vector<T> ret;
ret.resize(size());
ret.resize(count);
T *w = ret.ptrw();
memcpy(w, data, sizeof(T) * count);
if (w) {
if constexpr (std::is_trivially_copyable_v<T>) {
memcpy(w, data, sizeof(T) * count);
} else {
for (U i = 0; i < count; i++) {
w[i] = data[i];
}
}
}
return ret;
}
@@ -297,7 +315,9 @@ public:
Vector<uint8_t> ret;
ret.resize(count * sizeof(T));
uint8_t *w = ret.ptrw();
memcpy(w, data, sizeof(T) * count);
if (w) {
memcpy(w, data, sizeof(T) * count);
}
return ret;
}
@@ -314,6 +334,16 @@ public:
data[i] = p_from.data[i];
}
}
_FORCE_INLINE_ LocalVector(LocalVector &&p_from) {
data = p_from.data;
count = p_from.count;
capacity = p_from.capacity;
p_from.data = nullptr;
p_from.count = 0;
p_from.capacity = 0;
}
inline void operator=(const LocalVector &p_from) {
resize(p_from.size());
for (U i = 0; i < p_from.count; i++) {
@@ -326,6 +356,26 @@ public:
data[i] = p_from[i];
}
}
inline void operator=(LocalVector &&p_from) {
if (unlikely(this == &p_from)) {
return;
}
reset();
data = p_from.data;
count = p_from.count;
capacity = p_from.capacity;
p_from.data = nullptr;
p_from.count = 0;
p_from.capacity = 0;
}
inline void operator=(Vector<T> &&p_from) {
resize(p_from.size());
for (U i = 0; i < count; i++) {
data[i] = std::move(p_from[i]);
}
}
_FORCE_INLINE_ ~LocalVector() {
if (data) {

View File

@@ -68,6 +68,15 @@ struct PairSort {
}
};
template <typename F, typename S>
struct PairHash {
static uint32_t hash(const Pair<F, S> &P) {
uint64_t h1 = HashMapHasherDefault::hash(P.first);
uint64_t h2 = HashMapHasherDefault::hash(P.second);
return hash_one_uint64((h1 << 32) | h2);
}
};
template <typename K, typename V>
struct KeyValue {
const K key;

View File

@@ -34,6 +34,8 @@
#include <godot_cpp/core/memory.hpp>
#include <godot_cpp/templates/pair.hpp>
#include <initializer_list>
namespace godot {
// based on the very nice implementation of rb-trees by:
@@ -97,6 +99,8 @@ public:
typedef KeyValue<K, V> ValueType;
struct Iterator {
friend class RBMap<K, V, C, A>;
_FORCE_INLINE_ KeyValue<K, V> &operator*() const {
return E->key_value();
}
@@ -110,11 +114,16 @@ public:
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
_FORCE_INLINE_ bool operator==(const Iterator &p_it) const { return E == p_it.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &p_it) const { return E != p_it.E; }
explicit operator bool() const {
return E != nullptr;
}
Iterator &operator=(const Iterator &p_it) {
E = p_it.E;
return *this;
}
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
@@ -137,11 +146,16 @@ public:
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
_FORCE_INLINE_ bool operator==(const ConstIterator &p_it) const { return E == p_it.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &p_it) const { return E != p_it.E; }
explicit operator bool() const {
return E != nullptr;
}
ConstIterator &operator=(const ConstIterator &p_it) {
E = p_it.E;
return *this;
}
ConstIterator(const Element *p_E) { E = p_E; }
ConstIterator() {}
ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
@@ -418,7 +432,7 @@ private:
new_node->right = _data._nil;
new_node->left = _data._nil;
// new_node->data=_data;
//new_node->data=_data;
if (new_parent == _data._root || less(p_key, new_parent->_data.key)) {
new_parent->left = new_node;
@@ -752,6 +766,12 @@ public:
_copy_from(p_map);
}
RBMap(std::initializer_list<KeyValue<K, V>> p_init) {
for (const KeyValue<K, V> &E : p_init) {
insert(E.key, E.value);
}
}
_FORCE_INLINE_ RBMap() {}
~RBMap() {

View File

@@ -32,6 +32,8 @@
#include <godot_cpp/core/memory.hpp>
#include <initializer_list>
// based on the very nice implementation of rb-trees by:
// https://web.archive.org/web/20120507164830/https://web.mit.edu/~emin/www/source_code/red_black_tree/index.html
@@ -398,7 +400,7 @@ private:
new_node->right = _data._nil;
new_node->left = _data._nil;
new_node->value = p_value;
// new_node->data=_data;
//new_node->data=_data;
if (new_parent == _data._root || less(p_value, new_parent->value)) {
new_parent->left = new_node;
@@ -701,6 +703,12 @@ public:
_copy_from(p_set);
}
RBSet(std::initializer_list<T> p_init) {
for (const T &E : p_init) {
insert(E);
}
}
_FORCE_INLINE_ RBSet() {}
~RBSet() {

View File

@@ -51,7 +51,7 @@ namespace godot {
#define SAFE_NUMERIC_TYPE_PUN_GUARANTEES(m_type) \
static_assert(sizeof(SafeNumeric<m_type>) == sizeof(m_type)); \
static_assert(alignof(SafeNumeric<m_type>) == alignof(m_type)); \
static_assert(std::is_trivially_destructible<std::atomic<m_type>>::value);
static_assert(std::is_trivially_destructible_v<std::atomic<m_type>>);
#define SAFE_FLAG_TYPE_PUN_GUARANTEES \
static_assert(sizeof(SafeFlag) == sizeof(bool)); \
static_assert(alignof(SafeFlag) == alignof(bool));
@@ -102,6 +102,17 @@ public:
return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value;
}
_ALWAYS_INLINE_ T bit_or(T p_value) {
return value.fetch_or(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T bit_and(T p_value) {
return value.fetch_and(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T bit_xor(T p_value) {
return value.fetch_xor(p_value, std::memory_order_acq_rel);
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postsub(T p_value) {
return value.fetch_sub(p_value, std::memory_order_acq_rel);
@@ -113,7 +124,8 @@ public:
if (tmp >= p_value) {
return tmp; // already greater, or equal
}
if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) {
if (value.compare_exchange_weak(tmp, p_value, std::memory_order_acq_rel)) {
return p_value;
}
}
@@ -125,7 +137,7 @@ public:
if (c == 0) {
return 0;
}
if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) {
if (value.compare_exchange_weak(c, c + 1, std::memory_order_acq_rel)) {
return c + 1;
}
}
@@ -166,6 +178,16 @@ public:
class SafeRefCount {
SafeNumeric<uint32_t> count;
#ifdef DEV_ENABLED
_ALWAYS_INLINE_ void _check_unref_safety() {
// This won't catch every misuse, but it's better than nothing.
CRASH_COND_MSG(count.get() == 0,
"Trying to unreference a SafeRefCount which is already zero is wrong and a symptom of it being misused.\n"
"Upon a SafeRefCount reaching zero any object whose lifetime is tied to it, as well as the ref count itself, must be destroyed.\n"
"Moreover, to guarantee that, no multiple threads should be racing to do the final unreferencing to zero.");
}
#endif
public:
_ALWAYS_INLINE_ bool ref() { // true on success
return count.conditional_increment() != 0;
@@ -176,10 +198,16 @@ public:
}
_ALWAYS_INLINE_ bool unref() { // true if must be disposed of
#ifdef DEV_ENABLED
_check_unref_safety();
#endif
return count.decrement() == 0;
}
_ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
#ifdef DEV_ENABLED
_check_unref_safety();
#endif
return count.decrement();
}
@@ -192,141 +220,6 @@ public:
}
};
#else
template <typename T>
class SafeNumeric {
protected:
T value;
public:
_ALWAYS_INLINE_ void set(T p_value) {
value = p_value;
}
_ALWAYS_INLINE_ T get() const {
return value;
}
_ALWAYS_INLINE_ T increment() {
return ++value;
}
_ALWAYS_INLINE_ T postincrement() {
return value++;
}
_ALWAYS_INLINE_ T decrement() {
return --value;
}
_ALWAYS_INLINE_ T postdecrement() {
return value--;
}
_ALWAYS_INLINE_ T add(T p_value) {
return value += p_value;
}
_ALWAYS_INLINE_ T postadd(T p_value) {
T old = value;
value += p_value;
return old;
}
_ALWAYS_INLINE_ T sub(T p_value) {
return value -= p_value;
}
_ALWAYS_INLINE_ T postsub(T p_value) {
T old = value;
value -= p_value;
return old;
}
_ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
if (value < p_value) {
value = p_value;
}
return value;
}
_ALWAYS_INLINE_ T conditional_increment() {
if (value == 0) {
return 0;
} else {
return ++value;
}
}
_ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) :
value(p_value) {
}
};
class SafeFlag {
protected:
bool flag;
public:
_ALWAYS_INLINE_ bool is_set() const {
return flag;
}
_ALWAYS_INLINE_ void set() {
flag = true;
}
_ALWAYS_INLINE_ void clear() {
flag = false;
}
_ALWAYS_INLINE_ void set_to(bool p_value) {
flag = p_value;
}
_ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) :
flag(p_value) {}
};
class SafeRefCount {
uint32_t count = 0;
public:
_ALWAYS_INLINE_ bool ref() { // true on success
if (count != 0) {
++count;
return true;
} else {
return false;
}
}
_ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
if (count != 0) {
return ++count;
} else {
return 0;
}
}
_ALWAYS_INLINE_ bool unref() { // true if must be disposed of
return --count == 0;
}
_ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
return --count;
}
_ALWAYS_INLINE_ uint32_t get() const {
return count;
}
_ALWAYS_INLINE_ void init(uint32_t p_value = 1) {
count = p_value;
}
};
#endif
} // namespace godot
#endif // !defined(NO_THREADS)

View File

@@ -39,12 +39,12 @@ class SearchArray {
public:
Comparator compare;
inline int bisect(const T *p_array, int p_len, const T &p_value, bool p_before) const {
int lo = 0;
int hi = p_len;
inline int64_t bisect(const T *p_array, int64_t p_len, const T &p_value, bool p_before) const {
int64_t lo = 0;
int64_t hi = p_len;
if (p_before) {
while (lo < hi) {
const int mid = (lo + hi) / 2;
const int64_t mid = (lo + hi) / 2;
if (compare(p_array[mid], p_value)) {
lo = mid + 1;
} else {
@@ -53,7 +53,7 @@ public:
}
} else {
while (lo < hi) {
const int mid = (lo + hi) / 2;
const int64_t mid = (lo + hi) / 2;
if (compare(p_value, p_array[mid])) {
hi = mid;
} else {

View File

@@ -100,11 +100,74 @@ public:
p_elem->_root = nullptr;
}
void clear() {
while (_first) {
remove(_first);
}
}
void sort() {
sort_custom<Comparator<T>>();
}
template <typename C>
void sort_custom() {
if (_first == _last) {
return;
}
SelfList<T> *from = _first;
SelfList<T> *current = from;
SelfList<T> *to = from;
while (current) {
SelfList<T> *next = current->_next;
if (from != current) {
current->_prev = nullptr;
current->_next = from;
SelfList<T> *find = from;
C less;
while (find && less(*find->_self, *current->_self)) {
current->_prev = find;
current->_next = find->_next;
find = find->_next;
}
if (current->_prev) {
current->_prev->_next = current;
} else {
from = current;
}
if (current->_next) {
current->_next->_prev = current;
} else {
to = current;
}
} else {
current->_prev = nullptr;
current->_next = nullptr;
}
current = next;
}
_first = from;
_last = to;
}
_FORCE_INLINE_ SelfList<T> *first() { return _first; }
_FORCE_INLINE_ const SelfList<T> *first() const { return _first; }
// Forbid copying, which has broken behavior.
void operator=(const List &) = delete;
_FORCE_INLINE_ List() {}
_FORCE_INLINE_ ~List() { ERR_FAIL_COND(_first != nullptr); }
_FORCE_INLINE_ ~List() {
// A self list must be empty on destruction.
DEV_ASSERT(_first == nullptr);
}
};
private:
@@ -126,6 +189,9 @@ public:
_FORCE_INLINE_ const SelfList<T> *prev() const { return _prev; }
_FORCE_INLINE_ T *self() const { return _self; }
// Forbid copying, which has broken behavior.
void operator=(const SelfList<T> &) = delete;
_FORCE_INLINE_ SelfList(T *p_self) {
_self = p_self;
}

View File

@@ -78,8 +78,8 @@ public:
}
}
inline int bitlog(int n) const {
int k;
inline int64_t bitlog(int64_t n) const {
int64_t k;
for (k = 0; n != 1; n >>= 1) {
++k;
}
@@ -88,8 +88,8 @@ public:
/* Heap / Heapsort functions */
inline void push_heap(int p_first, int p_hole_idx, int p_top_index, T p_value, T *p_array) const {
int parent = (p_hole_idx - 1) / 2;
inline void push_heap(int64_t p_first, int64_t p_hole_idx, int64_t p_top_index, T p_value, T *p_array) const {
int64_t parent = (p_hole_idx - 1) / 2;
while (p_hole_idx > p_top_index && compare(p_array[p_first + parent], p_value)) {
p_array[p_first + p_hole_idx] = p_array[p_first + parent];
p_hole_idx = parent;
@@ -98,17 +98,17 @@ public:
p_array[p_first + p_hole_idx] = p_value;
}
inline void pop_heap(int p_first, int p_last, int p_result, T p_value, T *p_array) const {
inline void pop_heap(int64_t p_first, int64_t p_last, int64_t p_result, T p_value, T *p_array) const {
p_array[p_result] = p_array[p_first];
adjust_heap(p_first, 0, p_last - p_first, p_value, p_array);
}
inline void pop_heap(int p_first, int p_last, T *p_array) const {
inline void pop_heap(int64_t p_first, int64_t p_last, T *p_array) const {
pop_heap(p_first, p_last - 1, p_last - 1, p_array[p_last - 1], p_array);
}
inline void adjust_heap(int p_first, int p_hole_idx, int p_len, T p_value, T *p_array) const {
int top_index = p_hole_idx;
int second_child = 2 * p_hole_idx + 2;
inline void adjust_heap(int64_t p_first, int64_t p_hole_idx, int64_t p_len, T p_value, T *p_array) const {
int64_t top_index = p_hole_idx;
int64_t second_child = 2 * p_hole_idx + 2;
while (second_child < p_len) {
if (compare(p_array[p_first + second_child], p_array[p_first + (second_child - 1)])) {
@@ -127,18 +127,18 @@ public:
push_heap(p_first, p_hole_idx, top_index, p_value, p_array);
}
inline void sort_heap(int p_first, int p_last, T *p_array) const {
inline void sort_heap(int64_t p_first, int64_t p_last, T *p_array) const {
while (p_last - p_first > 1) {
pop_heap(p_first, p_last--, p_array);
}
}
inline void make_heap(int p_first, int p_last, T *p_array) const {
inline void make_heap(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_last - p_first < 2) {
return;
}
int len = p_last - p_first;
int parent = (len - 2) / 2;
int64_t len = p_last - p_first;
int64_t parent = (len - 2) / 2;
while (true) {
adjust_heap(p_first, parent, len, p_array[p_first + parent], p_array);
@@ -149,9 +149,9 @@ public:
}
}
inline void partial_sort(int p_first, int p_last, int p_middle, T *p_array) const {
inline void partial_sort(int64_t p_first, int64_t p_last, int64_t p_middle, T *p_array) const {
make_heap(p_first, p_middle, p_array);
for (int i = p_middle; i < p_last; i++) {
for (int64_t i = p_middle; i < p_last; i++) {
if (compare(p_array[i], p_array[p_first])) {
pop_heap(p_first, p_middle, i, p_array[i], p_array);
}
@@ -159,29 +159,29 @@ public:
sort_heap(p_first, p_middle, p_array);
}
inline void partial_select(int p_first, int p_last, int p_middle, T *p_array) const {
inline void partial_select(int64_t p_first, int64_t p_last, int64_t p_middle, T *p_array) const {
make_heap(p_first, p_middle, p_array);
for (int i = p_middle; i < p_last; i++) {
for (int64_t i = p_middle; i < p_last; i++) {
if (compare(p_array[i], p_array[p_first])) {
pop_heap(p_first, p_middle, i, p_array[i], p_array);
}
}
}
inline int partitioner(int p_first, int p_last, T p_pivot, T *p_array) const {
const int unmodified_first = p_first;
const int unmodified_last = p_last;
inline int64_t partitioner(int64_t p_first, int64_t p_last, T p_pivot, T *p_array) const {
const int64_t unmodified_first = p_first;
const int64_t unmodified_last = p_last;
while (true) {
while (compare(p_array[p_first], p_pivot)) {
if (Validate) {
if constexpr (Validate) {
ERR_BAD_COMPARE(p_first == unmodified_last - 1);
}
p_first++;
}
p_last--;
while (compare(p_pivot, p_array[p_last])) {
if (Validate) {
if constexpr (Validate) {
ERR_BAD_COMPARE(p_last == unmodified_first);
}
p_last--;
@@ -196,7 +196,7 @@ public:
}
}
inline void introsort(int p_first, int p_last, T *p_array, int p_max_depth) const {
inline void introsort(int64_t p_first, int64_t p_last, T *p_array, int64_t p_max_depth) const {
while (p_last - p_first > INTROSORT_THRESHOLD) {
if (p_max_depth == 0) {
partial_sort(p_first, p_last, p_last, p_array);
@@ -205,7 +205,7 @@ public:
p_max_depth--;
int cut = partitioner(
int64_t cut = partitioner(
p_first,
p_last,
median_of_3(
@@ -219,7 +219,7 @@ public:
}
}
inline void introselect(int p_first, int p_nth, int p_last, T *p_array, int p_max_depth) const {
inline void introselect(int64_t p_first, int64_t p_nth, int64_t p_last, T *p_array, int64_t p_max_depth) const {
while (p_last - p_first > 3) {
if (p_max_depth == 0) {
partial_select(p_first, p_nth + 1, p_last, p_array);
@@ -229,7 +229,7 @@ public:
p_max_depth--;
int cut = partitioner(
int64_t cut = partitioner(
p_first,
p_last,
median_of_3(
@@ -248,10 +248,10 @@ public:
insertion_sort(p_first, p_last, p_array);
}
inline void unguarded_linear_insert(int p_last, T p_value, T *p_array) const {
int next = p_last - 1;
inline void unguarded_linear_insert(int64_t p_last, T p_value, T *p_array) const {
int64_t next = p_last - 1;
while (compare(p_value, p_array[next])) {
if (Validate) {
if constexpr (Validate) {
ERR_BAD_COMPARE(next == 0);
}
p_array[p_last] = p_array[next];
@@ -261,10 +261,10 @@ public:
p_array[p_last] = p_value;
}
inline void linear_insert(int p_first, int p_last, T *p_array) const {
inline void linear_insert(int64_t p_first, int64_t p_last, T *p_array) const {
T val = p_array[p_last];
if (compare(val, p_array[p_first])) {
for (int i = p_last; i > p_first; i--) {
for (int64_t i = p_last; i > p_first; i--) {
p_array[i] = p_array[i - 1];
}
@@ -274,22 +274,22 @@ public:
}
}
inline void insertion_sort(int p_first, int p_last, T *p_array) const {
inline void insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_first == p_last) {
return;
}
for (int i = p_first + 1; i != p_last; i++) {
for (int64_t i = p_first + 1; i != p_last; i++) {
linear_insert(p_first, i, p_array);
}
}
inline void unguarded_insertion_sort(int p_first, int p_last, T *p_array) const {
for (int i = p_first; i != p_last; i++) {
inline void unguarded_insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
for (int64_t i = p_first; i != p_last; i++) {
unguarded_linear_insert(i, p_array[i], p_array);
}
}
inline void final_insertion_sort(int p_first, int p_last, T *p_array) const {
inline void final_insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_last - p_first > INTROSORT_THRESHOLD) {
insertion_sort(p_first, p_first + INTROSORT_THRESHOLD, p_array);
unguarded_insertion_sort(p_first + INTROSORT_THRESHOLD, p_last, p_array);
@@ -298,18 +298,18 @@ public:
}
}
inline void sort_range(int p_first, int p_last, T *p_array) const {
inline void sort_range(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_first != p_last) {
introsort(p_first, p_last, p_array, bitlog(p_last - p_first) * 2);
final_insertion_sort(p_first, p_last, p_array);
}
}
inline void sort(T *p_array, int p_len) const {
inline void sort(T *p_array, int64_t p_len) const {
sort_range(0, p_len, p_array);
}
inline void nth_element(int p_first, int p_last, int p_nth, T *p_array) const {
inline void nth_element(int64_t p_first, int64_t p_last, int64_t p_nth, T *p_array) const {
if (p_first == p_last || p_nth == p_last) {
return;
}

View File

@@ -68,6 +68,7 @@ private:
CowData<T> _cowdata;
public:
// Must take a copy instead of a reference (see GH-31736).
bool push_back(T p_elem);
_FORCE_INLINE_ bool append(const T &p_elem) { return push_back(p_elem); } //alias
void fill(T p_elem);
@@ -96,11 +97,13 @@ public:
Error resize(Size p_size) { return _cowdata.resize(p_size); }
Error resize_zeroed(Size p_size) { return _cowdata.template resize<true>(p_size); }
_FORCE_INLINE_ const T &operator[](Size p_index) const { return _cowdata.get(p_index); }
// Must take a copy instead of a reference (see GH-31736).
Error insert(Size p_pos, T p_val) { return _cowdata.insert(p_pos, p_val); }
Size find(const T &p_val, Size p_from = 0) const { return _cowdata.find(p_val, p_from); }
Size rfind(const T &p_val, Size p_from = -1) const { return _cowdata.rfind(p_val, p_from); }
Size count(const T &p_val) const { return _cowdata.count(p_val); }
// Must take a copy instead of a reference (see GH-31736).
void append_array(Vector<T> p_other);
_FORCE_INLINE_ bool has(const T &p_val) const { return find(p_val) != -1; }
@@ -145,17 +148,19 @@ public:
insert(i, p_val);
}
inline void operator=(const Vector &p_from) {
_cowdata._ref(p_from._cowdata);
}
void operator=(const Vector &p_from) { _cowdata._ref(p_from._cowdata); }
void operator=(Vector &&p_from) { _cowdata = std::move(p_from._cowdata); }
Vector<uint8_t> to_byte_array() const {
Vector<uint8_t> ret;
if (is_empty()) {
return ret;
}
ret.resize(size() * sizeof(T));
memcpy(ret.ptrw(), ptr(), sizeof(T) * size());
size_t alloc_size = size() * sizeof(T);
ret.resize(alloc_size);
if (alloc_size) {
memcpy(ret.ptrw(), ptr(), alloc_size);
}
return ret;
}
@@ -278,16 +283,11 @@ public:
}
_FORCE_INLINE_ Vector() {}
_FORCE_INLINE_ Vector(std::initializer_list<T> p_init) {
Error err = _cowdata.resize(p_init.size());
ERR_FAIL_COND(err);
Size i = 0;
for (const T &element : p_init) {
_cowdata.set(i++, element);
}
}
_FORCE_INLINE_ Vector(std::initializer_list<T> p_init) :
_cowdata(p_init) {}
_FORCE_INLINE_ Vector(const Vector &p_from) { _cowdata._ref(p_from._cowdata); }
_FORCE_INLINE_ Vector(Vector &&p_from) :
_cowdata(std::move(p_from._cowdata)) {}
_FORCE_INLINE_ ~Vector() {}
};

View File

@@ -72,16 +72,16 @@ private:
middle = (low + high) / 2;
if (p_val < a[middle].key) {
high = middle - 1; // search low end of array
high = middle - 1; //search low end of array
} else if (a[middle].key < p_val) {
low = middle + 1; // search high end of array
low = middle + 1; //search high end of array
} else {
r_exact = true;
return middle;
}
}
// return the position where this would be inserted
//return the position where this would be inserted
if (a[middle].key < p_val) {
middle++;
}
@@ -102,9 +102,9 @@ private:
middle = (low + high) / 2;
if (p_val < a[middle].key) {
high = middle - 1; // search low end of array
high = middle - 1; //search low end of array
} else if (a[middle].key < p_val) {
low = middle + 1; // search high end of array
low = middle + 1; //search high end of array
} else {
return middle;
}
@@ -142,6 +142,9 @@ public:
}
int find_nearest(const T &p_val) const {
if (_cowdata.is_empty()) {
return -1;
}
bool exact;
return _find(p_val, exact);
}
@@ -191,6 +194,8 @@ public:
}
_FORCE_INLINE_ VMap() {}
_FORCE_INLINE_ VMap(std::initializer_list<T> p_init) :
_cowdata(p_init) {}
_FORCE_INLINE_ VMap(const VMap &p_from) { _cowdata._ref(p_from._cowdata); }
inline void operator=(const VMap &p_from) {

View File

@@ -59,16 +59,16 @@ class VSet {
middle = (low + high) / 2;
if (p_val < a[middle]) {
high = middle - 1; // search low end of array
high = middle - 1; //search low end of array
} else if (a[middle] < p_val) {
low = middle + 1; // search high end of array
low = middle + 1; //search high end of array
} else {
r_exact = true;
return middle;
}
}
// return the position where this would be inserted
//return the position where this would be inserted
if (a[middle] < p_val) {
middle++;
}
@@ -89,9 +89,9 @@ class VSet {
middle = (low + high) / 2;
if (p_val < a[middle]) {
high = middle - 1; // search low end of array
high = middle - 1; //search low end of array
} else if (a[middle] < p_val) {
low = middle + 1; // search high end of array
low = middle + 1; //search high end of array
} else {
return middle;
}
@@ -137,6 +137,10 @@ public:
inline const T &operator[](int p_index) const {
return _data[p_index];
}
_FORCE_INLINE_ VSet() {}
_FORCE_INLINE_ VSet(std::initializer_list<T> p_init) :
_data(p_init) {}
};
} // namespace godot