cxx/include/__hash_table

1946 lines
68 KiB
Plaintext
Raw Normal View History

// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP__HASH_TABLE
#define _LIBCPP__HASH_TABLE
#include <__config>
#include <initializer_list>
#include <memory>
#include <iterator>
#include <algorithm>
#include <cmath>
#include <__undef_min_max>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
_LIBCPP_VISIBLE
size_t __next_prime(size_t __n);
template <class _NodePtr>
struct __hash_node_base
{
typedef __hash_node_base __first_node;
// typedef _NodePtr pointer;
_NodePtr __next_;
_LIBCPP_INLINE_VISIBILITY __hash_node_base() _NOEXCEPT : __next_(nullptr) {}
};
template <class _Tp, class _VoidPtr>
struct __hash_node
: public __hash_node_base
<
typename pointer_traits<_VoidPtr>::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<__hash_node<_Tp, _VoidPtr> >
#else
rebind<__hash_node<_Tp, _VoidPtr> >::other
#endif
>
{
typedef _Tp value_type;
size_t __hash_;
value_type __value_;
};
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
inline _LIBCPP_INLINE_VISIBILITY
bool
__is_power2(size_t __bc)
{
return __bc > 2 && !(__bc & (__bc - 1));
}
inline _LIBCPP_INLINE_VISIBILITY
size_t
__constrain_hash(size_t __h, size_t __bc)
{
return !(__bc & (__bc - 1)) ? __h & (__bc - 1) : __h % __bc;
}
inline _LIBCPP_INLINE_VISIBILITY
size_t
__next_pow2(size_t __n)
{
return size_t(1) << (std::numeric_limits<size_t>::digits - __clz(__n-1));
}
template <class _Tp, class _Hash, class _Equal, class _Alloc> class __hash_table;
template <class _ConstNodePtr> class _LIBCPP_VISIBLE __hash_const_iterator;
template <class _HashIterator> class _LIBCPP_VISIBLE __hash_map_iterator;
template <class _HashIterator> class _LIBCPP_VISIBLE __hash_map_const_iterator;
template <class _Key, class _Tp, class _Hash, class _Pred, class _Alloc>
class _LIBCPP_VISIBLE unordered_map;
template <class _NodePtr>
class _LIBCPP_VISIBLE __hash_iterator
{
typedef _NodePtr __node_pointer;
__node_pointer __node_;
public:
typedef forward_iterator_tag iterator_category;
typedef typename pointer_traits<__node_pointer>::element_type::value_type value_type;
typedef typename pointer_traits<__node_pointer>::difference_type difference_type;
typedef value_type& reference;
typedef typename pointer_traits<__node_pointer>::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<value_type>
#else
rebind<value_type>::other
#endif
pointer;
_LIBCPP_INLINE_VISIBILITY __hash_iterator() _NOEXCEPT {}
_LIBCPP_INLINE_VISIBILITY
reference operator*() const {return __node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const {return _VSTD::addressof(__node_->__value_);}
_LIBCPP_INLINE_VISIBILITY
__hash_iterator& operator++()
{
__node_ = __node_->__next_;
return *this;
}
_LIBCPP_INLINE_VISIBILITY
__hash_iterator operator++(int)
{
__hash_iterator __t(*this);
++(*this);
return __t;
}
friend _LIBCPP_INLINE_VISIBILITY
bool operator==(const __hash_iterator& __x, const __hash_iterator& __y)
{return __x.__node_ == __y.__node_;}
friend _LIBCPP_INLINE_VISIBILITY
bool operator!=(const __hash_iterator& __x, const __hash_iterator& __y)
{return __x.__node_ != __y.__node_;}
private:
_LIBCPP_INLINE_VISIBILITY
__hash_iterator(__node_pointer __node) _NOEXCEPT
: __node_(__node)
{}
template <class, class, class, class> friend class __hash_table;
template <class> friend class _LIBCPP_VISIBLE __hash_const_iterator;
template <class> friend class _LIBCPP_VISIBLE __hash_map_iterator;
template <class, class, class, class, class> friend class _LIBCPP_VISIBLE unordered_map;
template <class, class, class, class, class> friend class _LIBCPP_VISIBLE unordered_multimap;
};
template <class _ConstNodePtr>
class _LIBCPP_VISIBLE __hash_const_iterator
{
typedef _ConstNodePtr __node_pointer;
__node_pointer __node_;
typedef typename remove_const<
typename pointer_traits<__node_pointer>::element_type
>::type __node;
public:
typedef forward_iterator_tag iterator_category;
typedef typename __node::value_type value_type;
typedef typename pointer_traits<__node_pointer>::difference_type difference_type;
typedef const value_type& reference;
typedef typename pointer_traits<__node_pointer>::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<const value_type>
#else
rebind<const value_type>::other
#endif
pointer;
typedef typename pointer_traits<__node_pointer>::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<__node>
#else
rebind<__node>::other
#endif
__non_const_node_pointer;
typedef __hash_iterator<__non_const_node_pointer> __non_const_iterator;
_LIBCPP_INLINE_VISIBILITY __hash_const_iterator() _NOEXCEPT {}
_LIBCPP_INLINE_VISIBILITY
__hash_const_iterator(const __non_const_iterator& __x) _NOEXCEPT
: __node_(__x.__node_)
{}
_LIBCPP_INLINE_VISIBILITY
reference operator*() const {return __node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const {return _VSTD::addressof(__node_->__value_);}
_LIBCPP_INLINE_VISIBILITY
__hash_const_iterator& operator++()
{
__node_ = __node_->__next_;
return *this;
}
_LIBCPP_INLINE_VISIBILITY
__hash_const_iterator operator++(int)
{
__hash_const_iterator __t(*this);
++(*this);
return __t;
}
friend _LIBCPP_INLINE_VISIBILITY
bool operator==(const __hash_const_iterator& __x, const __hash_const_iterator& __y)
{return __x.__node_ == __y.__node_;}
friend _LIBCPP_INLINE_VISIBILITY
bool operator!=(const __hash_const_iterator& __x, const __hash_const_iterator& __y)
{return __x.__node_ != __y.__node_;}
private:
_LIBCPP_INLINE_VISIBILITY
__hash_const_iterator(__node_pointer __node) _NOEXCEPT
: __node_(__node)
{}
template <class, class, class, class> friend class __hash_table;
template <class> friend class _LIBCPP_VISIBLE __hash_map_const_iterator;
template <class, class, class, class, class> friend class _LIBCPP_VISIBLE unordered_map;
template <class, class, class, class, class> friend class _LIBCPP_VISIBLE unordered_multimap;
};
template <class _ConstNodePtr> class _LIBCPP_VISIBLE __hash_const_local_iterator;
template <class _NodePtr>
class _LIBCPP_VISIBLE __hash_local_iterator
{
typedef _NodePtr __node_pointer;
__node_pointer __node_;
size_t __bucket_;
size_t __bucket_count_;
typedef pointer_traits<__node_pointer> __pointer_traits;
public:
typedef forward_iterator_tag iterator_category;
typedef typename __pointer_traits::element_type::value_type value_type;
typedef typename __pointer_traits::difference_type difference_type;
typedef value_type& reference;
typedef typename __pointer_traits::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<value_type>
#else
rebind<value_type>::other
#endif
pointer;
_LIBCPP_INLINE_VISIBILITY __hash_local_iterator() _NOEXCEPT {}
_LIBCPP_INLINE_VISIBILITY
reference operator*() const {return __node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const {return &__node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
__hash_local_iterator& operator++()
{
__node_ = __node_->__next_;
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
if (__node_ != nullptr && __constrain_hash(__node_->__hash_, __bucket_count_) != __bucket_)
__node_ = nullptr;
return *this;
}
_LIBCPP_INLINE_VISIBILITY
__hash_local_iterator operator++(int)
{
__hash_local_iterator __t(*this);
++(*this);
return __t;
}
friend _LIBCPP_INLINE_VISIBILITY
bool operator==(const __hash_local_iterator& __x, const __hash_local_iterator& __y)
{return __x.__node_ == __y.__node_;}
friend _LIBCPP_INLINE_VISIBILITY
bool operator!=(const __hash_local_iterator& __x, const __hash_local_iterator& __y)
{return __x.__node_ != __y.__node_;}
private:
_LIBCPP_INLINE_VISIBILITY
__hash_local_iterator(__node_pointer __node, size_t __bucket,
size_t __bucket_count) _NOEXCEPT
: __node_(__node),
__bucket_(__bucket),
__bucket_count_(__bucket_count)
{
if (__node_ != nullptr)
__node_ = __node_->__next_;
}
template <class, class, class, class> friend class __hash_table;
template <class> friend class _LIBCPP_VISIBLE __hash_const_local_iterator;
template <class> friend class _LIBCPP_VISIBLE __hash_map_iterator;
};
template <class _ConstNodePtr>
class _LIBCPP_VISIBLE __hash_const_local_iterator
{
typedef _ConstNodePtr __node_pointer;
__node_pointer __node_;
size_t __bucket_;
size_t __bucket_count_;
typedef pointer_traits<__node_pointer> __pointer_traits;
typedef typename __pointer_traits::element_type __node;
typedef typename remove_const<__node>::type __non_const_node;
typedef typename pointer_traits<__node_pointer>::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<__non_const_node>
#else
rebind<__non_const_node>::other
#endif
__non_const_node_pointer;
typedef __hash_local_iterator<__non_const_node_pointer>
__non_const_iterator;
public:
typedef forward_iterator_tag iterator_category;
typedef typename remove_const<
typename __pointer_traits::element_type::value_type
>::type value_type;
typedef typename __pointer_traits::difference_type difference_type;
typedef const value_type& reference;
typedef typename __pointer_traits::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind<const value_type>
#else
rebind<const value_type>::other
#endif
pointer;
_LIBCPP_INLINE_VISIBILITY __hash_const_local_iterator() _NOEXCEPT {}
_LIBCPP_INLINE_VISIBILITY
__hash_const_local_iterator(const __non_const_iterator& __x) _NOEXCEPT
: __node_(__x.__node_),
__bucket_(__x.__bucket_),
__bucket_count_(__x.__bucket_count_)
{}
_LIBCPP_INLINE_VISIBILITY
reference operator*() const {return __node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
pointer operator->() const {return &__node_->__value_;}
_LIBCPP_INLINE_VISIBILITY
__hash_const_local_iterator& operator++()
{
__node_ = __node_->__next_;
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
if (__node_ != nullptr && __constrain_hash(__node_->__hash_, __bucket_count_) != __bucket_)
__node_ = nullptr;
return *this;
}
_LIBCPP_INLINE_VISIBILITY
__hash_const_local_iterator operator++(int)
{
__hash_const_local_iterator __t(*this);
++(*this);
return __t;
}
friend _LIBCPP_INLINE_VISIBILITY
bool operator==(const __hash_const_local_iterator& __x, const __hash_const_local_iterator& __y)
{return __x.__node_ == __y.__node_;}
friend _LIBCPP_INLINE_VISIBILITY
bool operator!=(const __hash_const_local_iterator& __x, const __hash_const_local_iterator& __y)
{return __x.__node_ != __y.__node_;}
private:
_LIBCPP_INLINE_VISIBILITY
__hash_const_local_iterator(__node_pointer __node, size_t __bucket,
size_t __bucket_count) _NOEXCEPT
: __node_(__node),
__bucket_(__bucket),
__bucket_count_(__bucket_count)
{
if (__node_ != nullptr)
__node_ = __node_->__next_;
}
template <class, class, class, class> friend class __hash_table;
template <class> friend class _LIBCPP_VISIBLE __hash_map_const_iterator;
};
template <class _Alloc>
class __bucket_list_deallocator
{
typedef _Alloc allocator_type;
typedef allocator_traits<allocator_type> __alloc_traits;
typedef typename __alloc_traits::size_type size_type;
__compressed_pair<size_type, allocator_type> __data_;
public:
typedef typename __alloc_traits::pointer pointer;
_LIBCPP_INLINE_VISIBILITY
__bucket_list_deallocator()
_NOEXCEPT_(is_nothrow_default_constructible<allocator_type>::value)
: __data_(0) {}
_LIBCPP_INLINE_VISIBILITY
__bucket_list_deallocator(const allocator_type& __a, size_type __size)
_NOEXCEPT_(is_nothrow_copy_constructible<allocator_type>::value)
: __data_(__size, __a) {}
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
_LIBCPP_INLINE_VISIBILITY
__bucket_list_deallocator(__bucket_list_deallocator&& __x)
_NOEXCEPT_(is_nothrow_move_constructible<allocator_type>::value)
: __data_(_VSTD::move(__x.__data_))
{
__x.size() = 0;
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
_LIBCPP_INLINE_VISIBILITY
size_type& size() _NOEXCEPT {return __data_.first();}
_LIBCPP_INLINE_VISIBILITY
size_type size() const _NOEXCEPT {return __data_.first();}
_LIBCPP_INLINE_VISIBILITY
allocator_type& __alloc() _NOEXCEPT {return __data_.second();}
_LIBCPP_INLINE_VISIBILITY
const allocator_type& __alloc() const _NOEXCEPT {return __data_.second();}
_LIBCPP_INLINE_VISIBILITY
void operator()(pointer __p) _NOEXCEPT
{
__alloc_traits::deallocate(__alloc(), __p, size());
}
};
template <class _Alloc> class __hash_map_node_destructor;
template <class _Alloc>
class __hash_node_destructor
{
typedef _Alloc allocator_type;
typedef allocator_traits<allocator_type> __alloc_traits;
typedef typename __alloc_traits::value_type::value_type value_type;
public:
typedef typename __alloc_traits::pointer pointer;
private:
allocator_type& __na_;
__hash_node_destructor& operator=(const __hash_node_destructor&);
public:
bool __value_constructed;
_LIBCPP_INLINE_VISIBILITY
explicit __hash_node_destructor(allocator_type& __na,
bool __constructed = false) _NOEXCEPT
: __na_(__na),
__value_constructed(__constructed)
{}
_LIBCPP_INLINE_VISIBILITY
void operator()(pointer __p) _NOEXCEPT
{
if (__value_constructed)
__alloc_traits::destroy(__na_, _VSTD::addressof(__p->__value_));
if (__p)
__alloc_traits::deallocate(__na_, __p, 1);
}
template <class> friend class __hash_map_node_destructor;
};
template <class _Tp, class _Hash, class _Equal, class _Alloc>
class __hash_table
{
public:
typedef _Tp value_type;
typedef _Hash hasher;
typedef _Equal key_equal;
typedef _Alloc allocator_type;
private:
typedef allocator_traits<allocator_type> __alloc_traits;
public:
typedef value_type& reference;
typedef const value_type& const_reference;
typedef typename __alloc_traits::pointer pointer;
typedef typename __alloc_traits::const_pointer const_pointer;
typedef typename __alloc_traits::size_type size_type;
typedef typename __alloc_traits::difference_type difference_type;
public:
// Create __node
typedef __hash_node<value_type, typename __alloc_traits::void_pointer> __node;
typedef typename __alloc_traits::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind_alloc<__node>
#else
rebind_alloc<__node>::other
#endif
__node_allocator;
typedef allocator_traits<__node_allocator> __node_traits;
typedef typename __node_traits::pointer __node_pointer;
typedef typename __node_traits::const_pointer __node_const_pointer;
typedef __hash_node_base<__node_pointer> __first_node;
private:
typedef typename __node_traits::template
#ifndef _LIBCPP_HAS_NO_TEMPLATE_ALIASES
rebind_alloc<__node_pointer>
#else
rebind_alloc<__node_pointer>::other
#endif
__pointer_allocator;
typedef __bucket_list_deallocator<__pointer_allocator> __bucket_list_deleter;
typedef unique_ptr<__node_pointer[], __bucket_list_deleter> __bucket_list;
typedef allocator_traits<__pointer_allocator> __pointer_alloc_traits;
typedef typename __bucket_list_deleter::pointer __node_pointer_pointer;
// --- Member data begin ---
__bucket_list __bucket_list_;
__compressed_pair<__first_node, __node_allocator> __p1_;
__compressed_pair<size_type, hasher> __p2_;
__compressed_pair<float, key_equal> __p3_;
// --- Member data end ---
_LIBCPP_INLINE_VISIBILITY
size_type& size() _NOEXCEPT {return __p2_.first();}
public:
_LIBCPP_INLINE_VISIBILITY
size_type size() const _NOEXCEPT {return __p2_.first();}
_LIBCPP_INLINE_VISIBILITY
hasher& hash_function() _NOEXCEPT {return __p2_.second();}
_LIBCPP_INLINE_VISIBILITY
const hasher& hash_function() const _NOEXCEPT {return __p2_.second();}
_LIBCPP_INLINE_VISIBILITY
float& max_load_factor() _NOEXCEPT {return __p3_.first();}
_LIBCPP_INLINE_VISIBILITY
float max_load_factor() const _NOEXCEPT {return __p3_.first();}
_LIBCPP_INLINE_VISIBILITY
key_equal& key_eq() _NOEXCEPT {return __p3_.second();}
_LIBCPP_INLINE_VISIBILITY
const key_equal& key_eq() const _NOEXCEPT {return __p3_.second();}
_LIBCPP_INLINE_VISIBILITY
__node_allocator& __node_alloc() _NOEXCEPT {return __p1_.second();}
_LIBCPP_INLINE_VISIBILITY
const __node_allocator& __node_alloc() const _NOEXCEPT
{return __p1_.second();}
public:
typedef __hash_iterator<__node_pointer> iterator;
typedef __hash_const_iterator<__node_const_pointer> const_iterator;
typedef __hash_local_iterator<__node_pointer> local_iterator;
typedef __hash_const_local_iterator<__node_const_pointer> const_local_iterator;
__hash_table()
_NOEXCEPT_(
is_nothrow_default_constructible<__bucket_list>::value &&
is_nothrow_default_constructible<__first_node>::value &&
is_nothrow_default_constructible<__node_allocator>::value &&
is_nothrow_default_constructible<hasher>::value &&
is_nothrow_default_constructible<key_equal>::value);
__hash_table(const hasher& __hf, const key_equal& __eql);
__hash_table(const hasher& __hf, const key_equal& __eql,
const allocator_type& __a);
explicit __hash_table(const allocator_type& __a);
__hash_table(const __hash_table& __u);
__hash_table(const __hash_table& __u, const allocator_type& __a);
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
__hash_table(__hash_table&& __u)
_NOEXCEPT_(
is_nothrow_move_constructible<__bucket_list>::value &&
is_nothrow_move_constructible<__first_node>::value &&
is_nothrow_move_constructible<__node_allocator>::value &&
is_nothrow_move_constructible<hasher>::value &&
is_nothrow_move_constructible<key_equal>::value);
__hash_table(__hash_table&& __u, const allocator_type& __a);
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
~__hash_table();
__hash_table& operator=(const __hash_table& __u);
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
__hash_table& operator=(__hash_table&& __u)
_NOEXCEPT_(
__node_traits::propagate_on_container_move_assignment::value &&
is_nothrow_move_assignable<__node_allocator>::value &&
is_nothrow_move_assignable<hasher>::value &&
is_nothrow_move_assignable<key_equal>::value);
#endif
template <class _InputIterator>
void __assign_unique(_InputIterator __first, _InputIterator __last);
template <class _InputIterator>
void __assign_multi(_InputIterator __first, _InputIterator __last);
_LIBCPP_INLINE_VISIBILITY
size_type max_size() const _NOEXCEPT
{
return allocator_traits<__pointer_allocator>::max_size(
__bucket_list_.get_deleter().__alloc());
}
pair<iterator, bool> __node_insert_unique(__node_pointer __nd);
iterator __node_insert_multi(__node_pointer __nd);
iterator __node_insert_multi(const_iterator __p,
__node_pointer __nd);
#if !defined(_LIBCPP_HAS_NO_RVALUE_REFERENCES) && !defined(_LIBCPP_HAS_NO_VARIADICS)
template <class... _Args>
pair<iterator, bool> __emplace_unique(_Args&&... __args);
template <class... _Args>
iterator __emplace_multi(_Args&&... __args);
template <class... _Args>
iterator __emplace_hint_multi(const_iterator __p, _Args&&... __args);
#endif // !defined(_LIBCPP_HAS_NO_RVALUE_REFERENCES) && !defined(_LIBCPP_HAS_NO_VARIADICS)
pair<iterator, bool> __insert_unique(const value_type& __x);
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Pp>
pair<iterator, bool> __insert_unique(_Pp&& __x);
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Pp>
iterator __insert_multi(_Pp&& __x);
template <class _Pp>
iterator __insert_multi(const_iterator __p, _Pp&& __x);
#else // _LIBCPP_HAS_NO_RVALUE_REFERENCES
iterator __insert_multi(const value_type& __x);
iterator __insert_multi(const_iterator __p, const value_type& __x);
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
void clear() _NOEXCEPT;
void rehash(size_type __n);
_LIBCPP_INLINE_VISIBILITY void reserve(size_type __n)
{rehash(static_cast<size_type>(ceil(__n / max_load_factor())));}
_LIBCPP_INLINE_VISIBILITY
size_type bucket_count() const _NOEXCEPT
{
return __bucket_list_.get_deleter().size();
}
iterator begin() _NOEXCEPT;
iterator end() _NOEXCEPT;
const_iterator begin() const _NOEXCEPT;
const_iterator end() const _NOEXCEPT;
template <class _Key>
_LIBCPP_INLINE_VISIBILITY
size_type bucket(const _Key& __k) const
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
{return __constrain_hash(hash_function()(__k), bucket_count());}
template <class _Key>
iterator find(const _Key& __x);
template <class _Key>
const_iterator find(const _Key& __x) const;
typedef __hash_node_destructor<__node_allocator> _Dp;
typedef unique_ptr<__node, _Dp> __node_holder;
iterator erase(const_iterator __p);
iterator erase(const_iterator __first, const_iterator __last);
template <class _Key>
size_type __erase_unique(const _Key& __k);
template <class _Key>
size_type __erase_multi(const _Key& __k);
__node_holder remove(const_iterator __p) _NOEXCEPT;
template <class _Key>
size_type __count_unique(const _Key& __k) const;
template <class _Key>
size_type __count_multi(const _Key& __k) const;
template <class _Key>
pair<iterator, iterator>
__equal_range_unique(const _Key& __k);
template <class _Key>
pair<const_iterator, const_iterator>
__equal_range_unique(const _Key& __k) const;
template <class _Key>
pair<iterator, iterator>
__equal_range_multi(const _Key& __k);
template <class _Key>
pair<const_iterator, const_iterator>
__equal_range_multi(const _Key& __k) const;
void swap(__hash_table& __u)
_NOEXCEPT_(
(!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value ||
__is_nothrow_swappable<__pointer_allocator>::value) &&
(!__node_traits::propagate_on_container_swap::value ||
__is_nothrow_swappable<__node_allocator>::value) &&
__is_nothrow_swappable<hasher>::value &&
__is_nothrow_swappable<key_equal>::value);
_LIBCPP_INLINE_VISIBILITY
size_type max_bucket_count() const _NOEXCEPT
{return __bucket_list_.get_deleter().__alloc().max_size();}
size_type bucket_size(size_type __n) const;
_LIBCPP_INLINE_VISIBILITY float load_factor() const _NOEXCEPT
{
size_type __bc = bucket_count();
return __bc != 0 ? (float)size() / __bc : 0.f;
}
_LIBCPP_INLINE_VISIBILITY void max_load_factor(float __mlf) _NOEXCEPT
{max_load_factor() = _VSTD::max(__mlf, load_factor());}
_LIBCPP_INLINE_VISIBILITY local_iterator begin(size_type __n)
{return local_iterator(__bucket_list_[__n], __n, bucket_count());}
_LIBCPP_INLINE_VISIBILITY local_iterator end(size_type __n)
{return local_iterator(nullptr, __n, bucket_count());}
_LIBCPP_INLINE_VISIBILITY const_local_iterator cbegin(size_type __n) const
{return const_local_iterator(__bucket_list_[__n], __n, bucket_count());}
_LIBCPP_INLINE_VISIBILITY const_local_iterator cend(size_type __n) const
{return const_local_iterator(nullptr, __n, bucket_count());}
private:
void __rehash(size_type __n);
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
#ifndef _LIBCPP_HAS_NO_VARIADICS
template <class ..._Args>
__node_holder __construct_node(_Args&& ...__args);
#endif // _LIBCPP_HAS_NO_VARIADICS
__node_holder __construct_node(value_type&& __v, size_t __hash);
#else // _LIBCPP_HAS_NO_RVALUE_REFERENCES
__node_holder __construct_node(const value_type& __v);
#endif
__node_holder __construct_node(const value_type& __v, size_t __hash);
_LIBCPP_INLINE_VISIBILITY
void __copy_assign_alloc(const __hash_table& __u)
{__copy_assign_alloc(__u, integral_constant<bool,
__node_traits::propagate_on_container_copy_assignment::value>());}
void __copy_assign_alloc(const __hash_table& __u, true_type);
_LIBCPP_INLINE_VISIBILITY
void __copy_assign_alloc(const __hash_table&, false_type) {}
void __move_assign(__hash_table& __u, false_type);
void __move_assign(__hash_table& __u, true_type)
_NOEXCEPT_(
is_nothrow_move_assignable<__node_allocator>::value &&
is_nothrow_move_assignable<hasher>::value &&
is_nothrow_move_assignable<key_equal>::value);
_LIBCPP_INLINE_VISIBILITY
void __move_assign_alloc(__hash_table& __u)
_NOEXCEPT_(
!__node_traits::propagate_on_container_move_assignment::value ||
(is_nothrow_move_assignable<__pointer_allocator>::value &&
is_nothrow_move_assignable<__node_allocator>::value))
{__move_assign_alloc(__u, integral_constant<bool,
__node_traits::propagate_on_container_move_assignment::value>());}
_LIBCPP_INLINE_VISIBILITY
void __move_assign_alloc(__hash_table& __u, true_type)
_NOEXCEPT_(
is_nothrow_move_assignable<__pointer_allocator>::value &&
is_nothrow_move_assignable<__node_allocator>::value)
{
__bucket_list_.get_deleter().__alloc() =
_VSTD::move(__u.__bucket_list_.get_deleter().__alloc());
__node_alloc() = _VSTD::move(__u.__node_alloc());
}
_LIBCPP_INLINE_VISIBILITY
void __move_assign_alloc(__hash_table&, false_type) _NOEXCEPT {}
template <class _Ap>
_LIBCPP_INLINE_VISIBILITY
static
void
__swap_alloc(_Ap& __x, _Ap& __y)
_NOEXCEPT_(
!allocator_traits<_Ap>::propagate_on_container_swap::value ||
__is_nothrow_swappable<_Ap>::value)
{
__swap_alloc(__x, __y,
integral_constant<bool,
allocator_traits<_Ap>::propagate_on_container_swap::value
>());
}
template <class _Ap>
_LIBCPP_INLINE_VISIBILITY
static
void
__swap_alloc(_Ap& __x, _Ap& __y, true_type)
_NOEXCEPT_(__is_nothrow_swappable<_Ap>::value)
{
using _VSTD::swap;
swap(__x, __y);
}
template <class _Ap>
_LIBCPP_INLINE_VISIBILITY
static
void
__swap_alloc(_Ap&, _Ap&, false_type) _NOEXCEPT {}
void __deallocate(__node_pointer __np) _NOEXCEPT;
__node_pointer __detach() _NOEXCEPT;
};
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table()
_NOEXCEPT_(
is_nothrow_default_constructible<__bucket_list>::value &&
is_nothrow_default_constructible<__first_node>::value &&
is_nothrow_default_constructible<hasher>::value &&
is_nothrow_default_constructible<key_equal>::value)
: __p2_(0),
__p3_(1.0f)
{
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const hasher& __hf,
const key_equal& __eql)
: __bucket_list_(nullptr, __bucket_list_deleter()),
__p1_(),
__p2_(0, __hf),
__p3_(1.0f, __eql)
{
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const hasher& __hf,
const key_equal& __eql,
const allocator_type& __a)
: __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)),
__p1_(__node_allocator(__a)),
__p2_(0, __hf),
__p3_(1.0f, __eql)
{
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const allocator_type& __a)
: __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)),
__p1_(__node_allocator(__a)),
__p2_(0),
__p3_(1.0f)
{
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u)
: __bucket_list_(nullptr,
__bucket_list_deleter(allocator_traits<__pointer_allocator>::
select_on_container_copy_construction(
__u.__bucket_list_.get_deleter().__alloc()), 0)),
__p1_(allocator_traits<__node_allocator>::
select_on_container_copy_construction(__u.__node_alloc())),
__p2_(0, __u.hash_function()),
__p3_(__u.__p3_)
{
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u,
const allocator_type& __a)
: __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)),
__p1_(__node_allocator(__a)),
__p2_(0, __u.hash_function()),
__p3_(__u.__p3_)
{
}
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u)
_NOEXCEPT_(
is_nothrow_move_constructible<__bucket_list>::value &&
is_nothrow_move_constructible<__first_node>::value &&
is_nothrow_move_constructible<hasher>::value &&
is_nothrow_move_constructible<key_equal>::value)
: __bucket_list_(_VSTD::move(__u.__bucket_list_)),
__p1_(_VSTD::move(__u.__p1_)),
__p2_(_VSTD::move(__u.__p2_)),
__p3_(_VSTD::move(__u.__p3_))
{
if (size() > 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__p1_.first().__next_->__hash_, bucket_count())] =
static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
__u.__p1_.first().__next_ = nullptr;
__u.size() = 0;
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u,
const allocator_type& __a)
: __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)),
__p1_(__node_allocator(__a)),
__p2_(0, _VSTD::move(__u.hash_function())),
__p3_(_VSTD::move(__u.__p3_))
{
if (__a == allocator_type(__u.__node_alloc()))
{
__bucket_list_.reset(__u.__bucket_list_.release());
__bucket_list_.get_deleter().size() = __u.__bucket_list_.get_deleter().size();
__u.__bucket_list_.get_deleter().size() = 0;
if (__u.size() > 0)
{
__p1_.first().__next_ = __u.__p1_.first().__next_;
__u.__p1_.first().__next_ = nullptr;
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__p1_.first().__next_->__hash_, bucket_count())] =
static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
size() = __u.size();
__u.size() = 0;
}
}
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::~__hash_table()
{
__deallocate(__p1_.first().__next_);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__copy_assign_alloc(
const __hash_table& __u, true_type)
{
if (__node_alloc() != __u.__node_alloc())
{
clear();
__bucket_list_.reset();
__bucket_list_.get_deleter().size() = 0;
}
__bucket_list_.get_deleter().__alloc() = __u.__bucket_list_.get_deleter().__alloc();
__node_alloc() = __u.__node_alloc();
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
__hash_table<_Tp, _Hash, _Equal, _Alloc>&
__hash_table<_Tp, _Hash, _Equal, _Alloc>::operator=(const __hash_table& __u)
{
if (this != &__u)
{
__copy_assign_alloc(__u);
hash_function() = __u.hash_function();
key_eq() = __u.key_eq();
max_load_factor() = __u.max_load_factor();
__assign_multi(__u.begin(), __u.end());
}
return *this;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__deallocate(__node_pointer __np)
_NOEXCEPT
{
__node_allocator& __na = __node_alloc();
while (__np != nullptr)
{
__node_pointer __next = __np->__next_;
__node_traits::destroy(__na, _VSTD::addressof(__np->__value_));
__node_traits::deallocate(__na, __np, 1);
__np = __next;
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_pointer
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__detach() _NOEXCEPT
{
size_type __bc = bucket_count();
for (size_type __i = 0; __i < __bc; ++__i)
__bucket_list_[__i] = nullptr;
size() = 0;
__node_pointer __cache = __p1_.first().__next_;
__p1_.first().__next_ = nullptr;
return __cache;
}
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign(
__hash_table& __u, true_type)
_NOEXCEPT_(
is_nothrow_move_assignable<__node_allocator>::value &&
is_nothrow_move_assignable<hasher>::value &&
is_nothrow_move_assignable<key_equal>::value)
{
clear();
__bucket_list_.reset(__u.__bucket_list_.release());
__bucket_list_.get_deleter().size() = __u.__bucket_list_.get_deleter().size();
__u.__bucket_list_.get_deleter().size() = 0;
__move_assign_alloc(__u);
size() = __u.size();
hash_function() = _VSTD::move(__u.hash_function());
max_load_factor() = __u.max_load_factor();
key_eq() = _VSTD::move(__u.key_eq());
__p1_.first().__next_ = __u.__p1_.first().__next_;
if (size() > 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__p1_.first().__next_->__hash_, bucket_count())] =
static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
__u.__p1_.first().__next_ = nullptr;
__u.size() = 0;
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign(
__hash_table& __u, false_type)
{
if (__node_alloc() == __u.__node_alloc())
__move_assign(__u, true_type());
else
{
hash_function() = _VSTD::move(__u.hash_function());
key_eq() = _VSTD::move(__u.key_eq());
max_load_factor() = __u.max_load_factor();
if (bucket_count() != 0)
{
__node_pointer __cache = __detach();
#ifndef _LIBCPP_NO_EXCEPTIONS
try
{
#endif // _LIBCPP_NO_EXCEPTIONS
const_iterator __i = __u.begin();
while (__cache != nullptr && __u.size() != 0)
{
__cache->__value_ = _VSTD::move(__u.remove(__i++)->__value_);
__node_pointer __next = __cache->__next_;
__node_insert_multi(__cache);
__cache = __next;
}
#ifndef _LIBCPP_NO_EXCEPTIONS
}
catch (...)
{
__deallocate(__cache);
throw;
}
#endif // _LIBCPP_NO_EXCEPTIONS
__deallocate(__cache);
}
const_iterator __i = __u.begin();
while (__u.size() != 0)
{
__node_holder __h =
__construct_node(_VSTD::move(__u.remove(__i++)->__value_));
__node_insert_multi(__h.get());
__h.release();
}
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
__hash_table<_Tp, _Hash, _Equal, _Alloc>&
__hash_table<_Tp, _Hash, _Equal, _Alloc>::operator=(__hash_table&& __u)
_NOEXCEPT_(
__node_traits::propagate_on_container_move_assignment::value &&
is_nothrow_move_assignable<__node_allocator>::value &&
is_nothrow_move_assignable<hasher>::value &&
is_nothrow_move_assignable<key_equal>::value)
{
__move_assign(__u, integral_constant<bool,
__node_traits::propagate_on_container_move_assignment::value>());
return *this;
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _InputIterator>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_unique(_InputIterator __first,
_InputIterator __last)
{
if (bucket_count() != 0)
{
__node_pointer __cache = __detach();
#ifndef _LIBCPP_NO_EXCEPTIONS
try
{
#endif // _LIBCPP_NO_EXCEPTIONS
for (; __cache != nullptr && __first != __last; ++__first)
{
__cache->__value_ = *__first;
__node_pointer __next = __cache->__next_;
__node_insert_unique(__cache);
__cache = __next;
}
#ifndef _LIBCPP_NO_EXCEPTIONS
}
catch (...)
{
__deallocate(__cache);
throw;
}
#endif // _LIBCPP_NO_EXCEPTIONS
__deallocate(__cache);
}
for (; __first != __last; ++__first)
__insert_unique(*__first);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _InputIterator>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_multi(_InputIterator __first,
_InputIterator __last)
{
if (bucket_count() != 0)
{
__node_pointer __cache = __detach();
#ifndef _LIBCPP_NO_EXCEPTIONS
try
{
#endif // _LIBCPP_NO_EXCEPTIONS
for (; __cache != nullptr && __first != __last; ++__first)
{
__cache->__value_ = *__first;
__node_pointer __next = __cache->__next_;
__node_insert_multi(__cache);
__cache = __next;
}
#ifndef _LIBCPP_NO_EXCEPTIONS
}
catch (...)
{
__deallocate(__cache);
throw;
}
#endif // _LIBCPP_NO_EXCEPTIONS
__deallocate(__cache);
}
for (; __first != __last; ++__first)
__insert_multi(*__first);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() _NOEXCEPT
{
return iterator(__p1_.first().__next_);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::end() _NOEXCEPT
{
return iterator(nullptr);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() const _NOEXCEPT
{
return const_iterator(__p1_.first().__next_);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::end() const _NOEXCEPT
{
return const_iterator(nullptr);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::clear() _NOEXCEPT
{
if (size() > 0)
{
__deallocate(__p1_.first().__next_);
__p1_.first().__next_ = nullptr;
size_type __bc = bucket_count();
for (size_type __i = 0; __i < __bc; ++__i)
__bucket_list_[__i] = nullptr;
size() = 0;
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator, bool>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_unique(__node_pointer __nd)
{
__nd->__hash_ = hash_function()(__nd->__value_);
size_type __bc = bucket_count();
bool __inserted = false;
__node_pointer __ndptr;
size_t __chash;
if (__bc != 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__chash = __constrain_hash(__nd->__hash_, __bc);
__ndptr = __bucket_list_[__chash];
if (__ndptr != nullptr)
{
for (__ndptr = __ndptr->__next_; __ndptr != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__ndptr->__hash_, __bc) == __chash;
__ndptr = __ndptr->__next_)
{
if (key_eq()(__ndptr->__value_, __nd->__value_))
goto __done;
}
}
}
{
if (size()+1 > __bc * max_load_factor() || __bc == 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
rehash(_VSTD::max<size_type>(2 * __bc + !__is_power2(__bc),
size_type(ceil(float(size() + 1) / max_load_factor()))));
__bc = bucket_count();
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__chash = __constrain_hash(__nd->__hash_, __bc);
}
// insert_after __bucket_list_[__chash], or __first_node if bucket is null
__node_pointer __pn = __bucket_list_[__chash];
if (__pn == nullptr)
{
__pn = static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
__nd->__next_ = __pn->__next_;
__pn->__next_ = __nd;
// fix up __bucket_list_
__bucket_list_[__chash] = __pn;
if (__nd->__next_ != nullptr)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__nd->__next_->__hash_, __bc)] = __nd;
}
else
{
__nd->__next_ = __pn->__next_;
__pn->__next_ = __nd;
}
__ndptr = __nd;
// increment size
++size();
__inserted = true;
}
__done:
return pair<iterator, bool>(iterator(__ndptr), __inserted);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi(__node_pointer __cp)
{
__cp->__hash_ = hash_function()(__cp->__value_);
size_type __bc = bucket_count();
if (size()+1 > __bc * max_load_factor() || __bc == 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
rehash(_VSTD::max<size_type>(2 * __bc + !__is_power2(__bc),
size_type(ceil(float(size() + 1) / max_load_factor()))));
__bc = bucket_count();
}
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __chash = __constrain_hash(__cp->__hash_, __bc);
__node_pointer __pn = __bucket_list_[__chash];
if (__pn == nullptr)
{
__pn = static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
__cp->__next_ = __pn->__next_;
__pn->__next_ = __cp;
// fix up __bucket_list_
__bucket_list_[__chash] = __pn;
if (__cp->__next_ != nullptr)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__cp->__next_->__hash_, __bc)] = __cp;
}
else
{
for (bool __found = false; __pn->__next_ != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__pn->__next_->__hash_, __bc) == __chash;
__pn = __pn->__next_)
{
// __found key_eq() action
// false false loop
// true true loop
// false true set __found to true
// true false break
if (__found != (__pn->__next_->__hash_ == __cp->__hash_ &&
key_eq()(__pn->__next_->__value_, __cp->__value_)))
{
if (!__found)
__found = true;
else
break;
}
}
__cp->__next_ = __pn->__next_;
__pn->__next_ = __cp;
if (__cp->__next_ != nullptr)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __nhash = __constrain_hash(__cp->__next_->__hash_, __bc);
if (__nhash != __chash)
__bucket_list_[__nhash] = __cp;
}
}
++size();
return iterator(__cp);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi(
const_iterator __p, __node_pointer __cp)
{
if (__p != end() && key_eq()(*__p, __cp->__value_))
{
__node_pointer __np = const_cast<__node_pointer>(__p.__node_);
__cp->__hash_ = __np->__hash_;
size_type __bc = bucket_count();
if (size()+1 > __bc * max_load_factor() || __bc == 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
rehash(_VSTD::max<size_type>(2 * __bc + !__is_power2(__bc),
size_type(ceil(float(size() + 1) / max_load_factor()))));
__bc = bucket_count();
}
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __chash = __constrain_hash(__cp->__hash_, __bc);
__node_pointer __pp = __bucket_list_[__chash];
while (__pp->__next_ != __np)
__pp = __pp->__next_;
__cp->__next_ = __np;
__pp->__next_ = __cp;
++size();
return iterator(__cp);
}
return __node_insert_multi(__cp);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator, bool>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_unique(const value_type& __x)
{
size_t __hash = hash_function()(__x);
size_type __bc = bucket_count();
bool __inserted = false;
__node_pointer __nd;
size_t __chash;
if (__bc != 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__chash = __constrain_hash(__hash, __bc);
__nd = __bucket_list_[__chash];
if (__nd != nullptr)
{
for (__nd = __nd->__next_; __nd != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__nd->__hash_, __bc) == __chash;
__nd = __nd->__next_)
{
if (key_eq()(__nd->__value_, __x))
goto __done;
}
}
}
{
__node_holder __h = __construct_node(__x, __hash);
if (size()+1 > __bc * max_load_factor() || __bc == 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
rehash(_VSTD::max<size_type>(2 * __bc + !__is_power2(__bc),
size_type(ceil(float(size() + 1) / max_load_factor()))));
__bc = bucket_count();
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__chash = __constrain_hash(__hash, __bc);
}
// insert_after __bucket_list_[__chash], or __first_node if bucket is null
__node_pointer __pn = __bucket_list_[__chash];
if (__pn == nullptr)
{
__pn = static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
__h->__next_ = __pn->__next_;
__pn->__next_ = __h.get();
// fix up __bucket_list_
__bucket_list_[__chash] = __pn;
if (__h->__next_ != nullptr)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__h->__next_->__hash_, __bc)] = __h.get();
}
else
{
__h->__next_ = __pn->__next_;
__pn->__next_ = __h.get();
}
__nd = __h.release();
// increment size
++size();
__inserted = true;
}
__done:
return pair<iterator, bool>(iterator(__nd), __inserted);
}
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
#ifndef _LIBCPP_HAS_NO_VARIADICS
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class... _Args>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator, bool>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_unique(_Args&&... __args)
{
__node_holder __h = __construct_node(_VSTD::forward<_Args>(__args)...);
pair<iterator, bool> __r = __node_insert_unique(__h.get());
if (__r.second)
__h.release();
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class... _Args>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_multi(_Args&&... __args)
{
__node_holder __h = __construct_node(_VSTD::forward<_Args>(__args)...);
iterator __r = __node_insert_multi(__h.get());
__h.release();
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class... _Args>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_hint_multi(
const_iterator __p, _Args&&... __args)
{
__node_holder __h = __construct_node(_VSTD::forward<_Args>(__args)...);
iterator __r = __node_insert_multi(__p, __h.get());
__h.release();
return __r;
}
#endif // _LIBCPP_HAS_NO_VARIADICS
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Pp>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator, bool>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_unique(_Pp&& __x)
{
__node_holder __h = __construct_node(_VSTD::forward<_Pp>(__x));
pair<iterator, bool> __r = __node_insert_unique(__h.get());
if (__r.second)
__h.release();
return __r;
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Pp>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(_Pp&& __x)
{
__node_holder __h = __construct_node(_VSTD::forward<_Pp>(__x));
iterator __r = __node_insert_multi(__h.get());
__h.release();
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Pp>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(const_iterator __p,
_Pp&& __x)
{
__node_holder __h = __construct_node(_VSTD::forward<_Pp>(__x));
iterator __r = __node_insert_multi(__p, __h.get());
__h.release();
return __r;
}
#else // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(const value_type& __x)
{
__node_holder __h = __construct_node(__x);
iterator __r = __node_insert_multi(__h.get());
__h.release();
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(const_iterator __p,
const value_type& __x)
{
__node_holder __h = __construct_node(__x);
iterator __r = __node_insert_multi(__p, __h.get());
__h.release();
return __r;
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::rehash(size_type __n)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
if (__n == 1)
__n = 2;
else if (__n & (__n - 1))
__n = __next_prime(__n);
size_type __bc = bucket_count();
if (__n > __bc)
__rehash(__n);
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
else if (__n < __bc)
{
__n = _VSTD::max<size_type>
(
__n,
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__is_power2(__bc) ? __next_pow2(size_t(ceil(float(size()) / max_load_factor()))) :
__next_prime(size_t(ceil(float(size()) / max_load_factor())))
);
if (__n < __bc)
__rehash(__n);
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__rehash(size_type __nbc)
{
__pointer_allocator& __npa = __bucket_list_.get_deleter().__alloc();
__bucket_list_.reset(__nbc > 0 ?
__pointer_alloc_traits::allocate(__npa, __nbc) : nullptr);
__bucket_list_.get_deleter().size() = __nbc;
if (__nbc > 0)
{
for (size_type __i = 0; __i < __nbc; ++__i)
__bucket_list_[__i] = nullptr;
__node_pointer __pp(static_cast<__node_pointer>(_VSTD::addressof(__p1_.first())));
__node_pointer __cp = __pp->__next_;
if (__cp != nullptr)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_type __chash = __constrain_hash(__cp->__hash_, __nbc);
__bucket_list_[__chash] = __pp;
size_type __phash = __chash;
for (__pp = __cp, __cp = __cp->__next_; __cp != nullptr;
__cp = __pp->__next_)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__chash = __constrain_hash(__cp->__hash_, __nbc);
if (__chash == __phash)
__pp = __cp;
else
{
if (__bucket_list_[__chash] == nullptr)
{
__bucket_list_[__chash] = __pp;
__pp = __cp;
__phash = __chash;
}
else
{
__node_pointer __np = __cp;
for (; __np->__next_ != nullptr &&
key_eq()(__cp->__value_, __np->__next_->__value_);
__np = __np->__next_)
;
__pp->__next_ = __np->__next_;
__np->__next_ = __bucket_list_[__chash]->__next_;
__bucket_list_[__chash]->__next_ = __cp;
}
}
}
}
}
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::find(const _Key& __k)
{
size_t __hash = hash_function()(__k);
size_type __bc = bucket_count();
if (__bc != 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __chash = __constrain_hash(__hash, __bc);
__node_pointer __nd = __bucket_list_[__chash];
if (__nd != nullptr)
{
for (__nd = __nd->__next_; __nd != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__nd->__hash_, __bc) == __chash;
__nd = __nd->__next_)
{
if (key_eq()(__nd->__value_, __k))
return iterator(__nd);
}
}
}
return end();
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::find(const _Key& __k) const
{
size_t __hash = hash_function()(__k);
size_type __bc = bucket_count();
if (__bc != 0)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __chash = __constrain_hash(__hash, __bc);
__node_const_pointer __nd = __bucket_list_[__chash];
if (__nd != nullptr)
{
for (__nd = __nd->__next_; __nd != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__nd->__hash_, __bc) == __chash;
__nd = __nd->__next_)
{
if (key_eq()(__nd->__value_, __k))
return const_iterator(__nd);
}
}
}
return end();
}
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
#ifndef _LIBCPP_HAS_NO_VARIADICS
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class ..._Args>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(_Args&& ...__args)
{
__node_allocator& __na = __node_alloc();
__node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na));
__node_traits::construct(__na, _VSTD::addressof(__h->__value_), _VSTD::forward<_Args>(__args)...);
__h.get_deleter().__value_constructed = true;
__h->__hash_ = hash_function()(__h->__value_);
__h->__next_ = nullptr;
return __h;
}
#endif // _LIBCPP_HAS_NO_VARIADICS
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(value_type&& __v,
size_t __hash)
{
__node_allocator& __na = __node_alloc();
__node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na));
__node_traits::construct(__na, _VSTD::addressof(__h->__value_), _VSTD::move(__v));
__h.get_deleter().__value_constructed = true;
__h->__hash_ = __hash;
__h->__next_ = nullptr;
return _VSTD::move(__h);
}
#else // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(const value_type& __v)
{
__node_allocator& __na = __node_alloc();
__node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na));
__node_traits::construct(__na, _VSTD::addressof(__h->__value_), __v);
__h.get_deleter().__value_constructed = true;
__h->__hash_ = hash_function()(__h->__value_);
__h->__next_ = nullptr;
return _VSTD::move(__h);
}
#endif // _LIBCPP_HAS_NO_RVALUE_REFERENCES
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(const value_type& __v,
size_t __hash)
{
__node_allocator& __na = __node_alloc();
__node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na));
__node_traits::construct(__na, _VSTD::addressof(__h->__value_), __v);
__h.get_deleter().__value_constructed = true;
__h->__hash_ = __hash;
__h->__next_ = nullptr;
return _VSTD::move(__h);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __p)
{
__node_pointer __np = const_cast<__node_pointer>(__p.__node_);
iterator __r(__np);
++__r;
remove(__p);
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator
__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __first,
const_iterator __last)
{
for (const_iterator __p = __first; __first != __last; __p = __first)
{
++__first;
erase(__p);
}
__node_pointer __np = const_cast<__node_pointer>(__last.__node_);
return iterator (__np);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__erase_unique(const _Key& __k)
{
iterator __i = find(__k);
if (__i == end())
return 0;
erase(__i);
return 1;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__erase_multi(const _Key& __k)
{
size_type __r = 0;
iterator __i = find(__k);
if (__i != end())
{
iterator __e = end();
do
{
erase(__i++);
++__r;
} while (__i != __e && key_eq()(*__i, __k));
}
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder
__hash_table<_Tp, _Hash, _Equal, _Alloc>::remove(const_iterator __p) _NOEXCEPT
{
// current node
__node_pointer __cn = const_cast<__node_pointer>(__p.__node_);
size_type __bc = bucket_count();
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __chash = __constrain_hash(__cn->__hash_, __bc);
// find previous node
__node_pointer __pn = __bucket_list_[__chash];
for (; __pn->__next_ != __cn; __pn = __pn->__next_)
;
// Fix up __bucket_list_
// if __pn is not in same bucket (before begin is not in same bucket) &&
// if __cn->__next_ is not in same bucket (nullptr is not in same bucket)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
if (__pn == _VSTD::addressof(__p1_.first()) || __constrain_hash(__pn->__hash_, __bc) != __chash)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
if (__cn->__next_ == nullptr || __constrain_hash(__cn->__next_->__hash_, __bc) != __chash)
__bucket_list_[__chash] = nullptr;
}
// if __cn->__next_ is not in same bucket (nullptr is in same bucket)
if (__cn->__next_ != nullptr)
{
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
size_t __nhash = __constrain_hash(__cn->__next_->__hash_, __bc);
if (__nhash != __chash)
__bucket_list_[__nhash] = __pn;
}
// remove __cn
__pn->__next_ = __cn->__next_;
__cn->__next_ = nullptr;
--size();
return __node_holder(__cn, _Dp(__node_alloc(), true));
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
inline _LIBCPP_INLINE_VISIBILITY
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__count_unique(const _Key& __k) const
{
return static_cast<size_type>(find(__k) != end());
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__count_multi(const _Key& __k) const
{
size_type __r = 0;
const_iterator __i = find(__k);
if (__i != end())
{
const_iterator __e = end();
do
{
++__i;
++__r;
} while (__i != __e && key_eq()(*__i, __k));
}
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator,
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_unique(
const _Key& __k)
{
iterator __i = find(__k);
iterator __j = __i;
if (__i != end())
++__j;
return pair<iterator, iterator>(__i, __j);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator,
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_unique(
const _Key& __k) const
{
const_iterator __i = find(__k);
const_iterator __j = __i;
if (__i != end())
++__j;
return pair<const_iterator, const_iterator>(__i, __j);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator,
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi(
const _Key& __k)
{
iterator __i = find(__k);
iterator __j = __i;
if (__i != end())
{
iterator __e = end();
do
{
++__j;
} while (__j != __e && key_eq()(*__j, __k));
}
return pair<iterator, iterator>(__i, __j);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <class _Key>
pair<typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator,
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator>
__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi(
const _Key& __k) const
{
const_iterator __i = find(__k);
const_iterator __j = __i;
if (__i != end())
{
const_iterator __e = end();
do
{
++__j;
} while (__j != __e && key_eq()(*__j, __k));
}
return pair<const_iterator, const_iterator>(__i, __j);
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
void
__hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u)
_NOEXCEPT_(
(!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value ||
__is_nothrow_swappable<__pointer_allocator>::value) &&
(!__node_traits::propagate_on_container_swap::value ||
__is_nothrow_swappable<__node_allocator>::value) &&
__is_nothrow_swappable<hasher>::value &&
__is_nothrow_swappable<key_equal>::value)
{
{
__node_pointer_pointer __npp = __bucket_list_.release();
__bucket_list_.reset(__u.__bucket_list_.release());
__u.__bucket_list_.reset(__npp);
}
_VSTD::swap(__bucket_list_.get_deleter().size(), __u.__bucket_list_.get_deleter().size());
__swap_alloc(__bucket_list_.get_deleter().__alloc(),
__u.__bucket_list_.get_deleter().__alloc());
__swap_alloc(__node_alloc(), __u.__node_alloc());
_VSTD::swap(__p1_.first().__next_, __u.__p1_.first().__next_);
__p2_.swap(__u.__p2_);
__p3_.swap(__u.__p3_);
if (size() > 0)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__bucket_list_[__constrain_hash(__p1_.first().__next_->__hash_, bucket_count())] =
static_cast<__node_pointer>(_VSTD::addressof(__p1_.first()));
if (__u.size() > 0)
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__u.__bucket_list_[__constrain_hash(__u.__p1_.first().__next_->__hash_, __u.bucket_count())] =
static_cast<__node_pointer>(_VSTD::addressof(__u.__p1_.first()));
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type
__hash_table<_Tp, _Hash, _Equal, _Alloc>::bucket_size(size_type __n) const
{
__node_const_pointer __np = __bucket_list_[__n];
size_type __bc = bucket_count();
size_type __r = 0;
if (__np != nullptr)
{
for (__np = __np->__next_; __np != nullptr &&
This commit establishes a new bucket_count policy in the unordered containers: The policy now allows a power-of-2 number of buckets to be requested (and that request honored) by the client. And if the number of buckets is set to a power of 2, then the constraint of the hash to the number of buckets uses & instead of %. If the client does not specify a number of buckets, then the policy remains unchanged: a prime number of buckets is selected. The growth policy is that the number of buckets is roughly doubled when needed. While growing, either the prime, or the power-of-2 strategy will be preserved. There is a small run time cost for putting in this switch. For very cheap hash functions, e.g. identity for int, the cost can be as high as 18%. However with more typical use cases, e.g. strings, the cost is in the noise level. I've measured cases with very cheap hash functions (int) that using a power-of-2 number of buckets can make look up about twice as fast. However I've also noted that a power-of-2 number of buckets is more susceptible to accidental catastrophic collisions. Though I've also noted that accidental catastrophic collisions are also possible when using a prime number of buckets (but seems far less likely). In short, this patch adds an extra tuning knob for those clients trying to get the last bit of performance squeezed out of their hash containers. Casual users of the hash containers will not notice the introduction of this tuning knob. Those clients who swear by power-of-2 hash containers can now opt-in to that strategy. Clients who prefer a prime number of buckets can continue as they have. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@159836 91177308-0d34-0410-b5e6-96231b3b80d8
2012-07-06 19:31:14 +02:00
__constrain_hash(__np->__hash_, __bc) == __n;
__np = __np->__next_, ++__r)
;
}
return __r;
}
template <class _Tp, class _Hash, class _Equal, class _Alloc>
inline _LIBCPP_INLINE_VISIBILITY
void
swap(__hash_table<_Tp, _Hash, _Equal, _Alloc>& __x,
__hash_table<_Tp, _Hash, _Equal, _Alloc>& __y)
_NOEXCEPT_(_NOEXCEPT_(__x.swap(__y)))
{
__x.swap(__y);
}
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP__HASH_TABLE