17 namespace concurrent {
25 template<
typename _HashMapT>
26 class hash_map_iterator {
27 template<
typename,
typename,
int>
friend
30 static constexpr
int key_nibbles =
sizeof(
typename _HashMapT::key_type) * 2;
31 const _HashMapT *_Map;
32 typename _HashMapT::value_type *_Current;
33 std::vector<int8_t> _Key;
35 hash_map_iterator(
const _HashMapT *pMap,
typename _HashMapT::value_type *pCurrent,
36 const std::vector<int8_t> &oKey) : _Map(pMap), _Current(pCurrent), _Key(oKey) {}
39 using value_type =
typename _HashMapT::value_type;
41 hash_map_iterator(
const hash_map_iterator &src) : _Map(src._Map), _Current(src._Current), _Key(src._Key) {}
43 hash_map_iterator(hash_map_iterator &&src) : _Map(src._Map), _Current(src._Current), _Key(std::move(src._Key)) {}
45 hash_map_iterator() : _Map(nullptr), _Current(nullptr), _Key(key_nibbles, -1) {}
47 hash_map_iterator &operator=(
const hash_map_iterator &src) {
52 _Current = src._Current;
56 hash_map_iterator &operator==(hash_map_iterator &&src) {
61 _Current = src._Current;
62 _Key = std::move(src._Key);
65 bool operator==(
const hash_map_iterator &rhs)
const {
66 return (_Current == rhs._Current);
69 bool operator!=(
const hash_map_iterator &rhs)
const {
70 return (_Current != rhs._Current);
73 value_type *
get() {
return _Current; }
75 const value_type *
get()
const {
return _Current; }
77 value_type *operator->() {
return _Current; }
79 const value_type *operator->()
const {
return _Current; }
81 value_type &operator*() {
86 const value_type &operator*()
const {
91 hash_map_iterator &operator++() {
93 _Current = _Map->_next(&_Key[0]);
97 hash_map_iterator operator++(
int) {
98 hash_map_iterator oRet(*
this);
103 hash_map_iterator &operator--() {
105 _Current = _Map->_prev(&_Key[0]);
109 hash_map_iterator operator--(
int) {
110 hash_map_iterator oRet(*
this);
125 template<
typename _KeyT,
typename _ValueT,
int _NibblePos = sizeof(_KeyT) * 2>
127 using _my_t = hash_map<_KeyT, _ValueT, _NibblePos>;
128 using child_bucket_type = hash_map<_KeyT, _ValueT, _NibblePos - 1>;
129 static constexpr
int nibble_pos = _NibblePos;
130 template<
typename>
friend
131 class hash_map_iterator;
133 template<
typename,
typename,
int>
friend
136 static constexpr int8_t nibble_count = 16;
137 std::atomic<child_bucket_type *> _Buckets[nibble_count];
140 using value_type = _ValueT;
141 using key_type = _KeyT;
142 using iterator_type = hash_map_iterator<_my_t>;
145 for (
auto &oItem : _Buckets) {
146 oItem.store(
nullptr);
151 for (int8_t i=0 ; i<nibble_count ; ++i){
152 auto pItem = _Buckets[i].load();
159 hash_map(
const hash_map &) =
delete;
161 hash_map &operator=(
const hash_map &) =
delete;
168 bool insert(
const key_type &Key, value_type &&Value) {
170 int Index = (x & 0xf);
171 auto pChild = _Buckets[Index].load();
173 pChild =
new child_bucket_type;
174 child_bucket_type *pNullBucket =
nullptr;
175 if (!_Buckets[Index].compare_exchange_strong(pNullBucket, pChild)) {
180 return pChild->insert(
intrinsic_cast(x), std::forward<value_type>(Value));
187 bool exists(
const key_type &Key)
const {
189 int Index = (x & 0xf);
190 auto pChild = _Buckets[Index].load();
199 bool remove(
const key_type &Key) {
201 int Index = (x & 0xf);
202 auto pChild = _Buckets[Index].load();
215 value_type &operator[](
const key_type &Key) {
217 int Index = (x & 0xf);
218 auto pChild = _Buckets[Index].load();
220 insert(Key, value_type());
221 pChild = _Buckets[Index].load();
228 iterator_type begin()
const {
229 std::vector<int8_t> oKey(iterator_type::key_nibbles, -1);
230 return iterator_type(
this, _begin(&oKey[0]), oKey);
234 iterator_type end()
const {
235 return iterator_type(
this,
nullptr, std::vector<int8_t>(iterator_type::key_nibbles, -1));
239 iterator_type back()
const {
240 std::vector<int8_t> oKey(iterator_type::key_nibbles, -1);
241 return iterator_type(
this, _back(&oKey[0]), oKey);
246 value_type *_begin(int8_t *pKey)
const {
247 child_bucket_type *pChildBucket;
248 for (*pKey = 0; *pKey < nibble_count; ++*pKey) {
250 if ((pChildBucket = _Buckets[*pKey].load()) && (pRet = pChildBucket->_begin(1 + pKey))) {
257 value_type *_back(int8_t *pKey)
const {
258 child_bucket_type *pChildBucket;
259 for (*pKey = nibble_count - 1; *pKey >= 0; --*pKey) {
261 if ((pChildBucket = _Buckets[*pKey].load()) && (pRet = pChildBucket->_back(1 + pKey))) {
268 value_type *_next(int8_t *pKey)
const {
269 child_bucket_type *pChildBucket;
270 if (*pKey < 0 || *pKey >= nibble_count) {
273 for (; *pKey < nibble_count; ++*pKey) {
275 if ((pChildBucket = _Buckets[*pKey].load()) && (pRet = pChildBucket->_next(1 + pKey))) {
283 value_type *_prev(int8_t *pKey)
const {
284 child_bucket_type *pChildBucket;
285 if (*pKey < 0 || *pKey >= nibble_count) {
286 *pKey = nibble_count - 1;
288 for (; *pKey >= 0; --*pKey) {
290 if ((pChildBucket = _Buckets[*pKey].load()) && (pRet = pChildBucket->_prev(1 + pKey))) {
302 template<
typename _KeyT,
typename _ValueT>
303 class hash_map<_KeyT, _ValueT, 1> {
304 template<
typename,
typename,
int>
friend
307 static constexpr
int nibble_pos = 1;
308 static constexpr int8_t nibble_count = 16;
309 std::atomic<_ValueT *> _Values[nibble_count];
311 using _my_t = hash_map<_KeyT, _ValueT, 1>;
312 using value_type = _ValueT;
313 using key_type = _KeyT;
317 for (
auto &oItem : _Values) {
318 oItem.store(
nullptr);
323 for (int8_t i=0 ; i<nibble_count ; ++i){
324 auto pItem = _Values[i].load();
331 bool insert(
const key_type &Key, value_type &&Value) {
333 int Index = (x & 0xf);
334 auto pValue = _Values[Index].load();
338 pValue =
new value_type(std::forward<value_type>(Value));
339 value_type *pNullValue =
nullptr;
340 if (!_Values[Index].compare_exchange_strong(pNullValue, pValue)) {
347 bool remove(
const key_type &Key) {
349 int Index = (x & 0xf);
350 auto pVal = _Values[Index].load();
351 value_type *pNullValue =
nullptr;
352 if (pVal && _Values[Index].compare_exchange_strong(pVal, pNullValue)) {
359 bool exists(
const key_type &Key)
const {
361 int Index = (x & 0xf);
362 auto pVal = _Values[Index].load();
363 return (pVal ?
true :
false);
366 value_type &operator[](
const key_type &Key) {
368 int Index = (x & 0xf);
369 auto pRet = _Values[Index].load();
371 pRet =
new value_type;
372 value_type *pNull =
nullptr;
373 if (!_Values[Index].compare_exchange_strong(pNull, pRet)) {
375 pRet = _Values[Index].load();
384 value_type *_begin(int8_t *pKey)
const {
385 for (*pKey = 0; *pKey < nibble_count; ++*pKey) {
387 if ((pRet = _Values[*pKey].load())) {
394 value_type *_back(int8_t *pKey)
const {
395 for (*pKey = nibble_count - 1; *pKey >= 0; --*pKey) {
397 if ((pRet = _Values[*pKey].load())) {
404 value_type *_next(int8_t *pKey)
const {
405 if (*pKey < 0 || *pKey >= nibble_count) {
408 for (; *pKey < nibble_count; ++*pKey) {
410 if ((pRet = _Values[*pKey].load())) {
418 value_type *_prev(int8_t *pKey)
const {
419 if (*pKey < 0 || *pKey >= nibble_count) {
420 *pKey = nibble_count - 1;
422 for (; *pKey >= 0; --*pKey) {
424 if ((pRet = _Values[*pKey].load())) {
shared declarations for the concurrent namespace
constexpr processor_intrinsic< _Ty >::type intrinsic_cast(_Ty src)
casts a pointer to the processor intrinsic storage type